LLVM 20.0.0git
ExpandVectorPredication.cpp
Go to the documentation of this file.
1//===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements IR expansion for vector predication intrinsics, allowing
10// targets to enable vector predication until just before codegen.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/Statistic.h"
19#include "llvm/IR/Constants.h"
20#include "llvm/IR/Function.h"
21#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/Intrinsics.h"
27#include "llvm/Support/Debug.h"
29#include <optional>
30
31using namespace llvm;
32
35
36// Keep this in sync with TargetTransformInfo::VPLegalization.
37#define VPINTERNAL_VPLEGAL_CASES \
38 VPINTERNAL_CASE(Legal) \
39 VPINTERNAL_CASE(Discard) \
40 VPINTERNAL_CASE(Convert)
41
42#define VPINTERNAL_CASE(X) "|" #X
43
44// Override options.
46 "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
47 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
48 ". If non-empty, ignore "
49 "TargetTransformInfo and "
50 "always use this transformation for the %evl parameter (Used in "
51 "testing)."));
52
54 "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
55 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
56 ". If non-empty, Ignore "
57 "TargetTransformInfo and "
58 "always use this transformation for the %mask parameter (Used in "
59 "testing)."));
60
61#undef VPINTERNAL_CASE
62#define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
63
64static VPTransform parseOverrideOption(const std::string &TextOpt) {
66}
67
68#undef VPINTERNAL_VPLEGAL_CASES
69
70// Whether any override options are set.
72 return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
73}
74
75#define DEBUG_TYPE "expandvp"
76
77STATISTIC(NumFoldedVL, "Number of folded vector length params");
78STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
79
80///// Helpers {
81
82/// \returns Whether the vector mask \p MaskVal has all lane bits set.
83static bool isAllTrueMask(Value *MaskVal) {
84 if (Value *SplattedVal = getSplatValue(MaskVal))
85 if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
86 return ConstValue->isAllOnesValue();
87
88 return false;
89}
90
91/// \returns A non-excepting divisor constant for this type.
92static Constant *getSafeDivisor(Type *DivTy) {
93 assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94 return ConstantInt::get(DivTy, 1u, false);
95}
96
97/// Transfer operation properties from \p OldVPI to \p NewVal.
98static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99 auto *NewInst = dyn_cast<Instruction>(&NewVal);
100 if (!NewInst || !isa<FPMathOperator>(NewVal))
101 return;
102
103 auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
104 if (!OldFMOp)
105 return;
106
107 NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
108}
109
110/// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111/// OldVP gets erased.
112static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113 transferDecorations(NewOp, OldOp);
114 OldOp.replaceAllUsesWith(&NewOp);
115 OldOp.eraseFromParent();
116}
117
119 // The result of VP reductions depends on the mask and evl.
120 if (isa<VPReductionIntrinsic>(VPI))
121 return false;
122 // Fallback to whether the intrinsic is speculatable.
123 if (auto IntrID = VPI.getFunctionalIntrinsicID())
124 return Intrinsic::getAttributes(VPI.getContext(), *IntrID)
125 .hasFnAttr(Attribute::AttrKind::Speculatable);
126 if (auto Opc = VPI.getFunctionalOpcode())
128 return false;
129}
130
131//// } Helpers
132
133namespace {
134
135// Expansion pass state at function scope.
136struct CachingVPExpander {
138
139 /// \returns A bitmask that is true where the lane position is less-than \p
140 /// EVLParam
141 ///
142 /// \p Builder
143 /// Used for instruction creation.
144 /// \p VLParam
145 /// The explicit vector length parameter to test against the lane
146 /// positions.
147 /// \p ElemCount
148 /// Static (potentially scalable) number of vector elements.
149 Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
150 ElementCount ElemCount);
151
152 /// If needed, folds the EVL in the mask operand and discards the EVL
153 /// parameter. Returns a pair of the value of the intrinsic after the change
154 /// (if any) and whether the mask was actually folded.
155 std::pair<Value *, bool> foldEVLIntoMask(VPIntrinsic &VPI);
156
157 /// "Remove" the %evl parameter of \p PI by setting it to the static vector
158 /// length of the operation. Returns true if the %evl (if any) was effectively
159 /// changed.
160 bool discardEVLParameter(VPIntrinsic &PI);
161
162 /// Lower this VP binary operator to a unpredicated binary operator.
163 Value *expandPredicationInBinaryOperator(IRBuilder<> &Builder,
164 VPIntrinsic &PI);
165
166 /// Lower this VP int call to a unpredicated int call.
167 Value *expandPredicationToIntCall(IRBuilder<> &Builder, VPIntrinsic &PI);
168
169 /// Lower this VP fp call to a unpredicated fp call.
170 Value *expandPredicationToFPCall(IRBuilder<> &Builder, VPIntrinsic &PI,
171 unsigned UnpredicatedIntrinsicID);
172
173 /// Lower this VP reduction to a call to an unpredicated reduction intrinsic.
174 Value *expandPredicationInReduction(IRBuilder<> &Builder,
176
177 /// Lower this VP cast operation to a non-VP intrinsic.
178 Value *expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
179 VPIntrinsic &VPI);
180
181 /// Lower this VP memory operation to a non-VP intrinsic.
182 Value *expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
183 VPIntrinsic &VPI);
184
185 /// Lower this VP comparison to a call to an unpredicated comparison.
186 Value *expandPredicationInComparison(IRBuilder<> &Builder,
187 VPCmpIntrinsic &PI);
188
189 /// Query TTI and expand the vector predication in \p P accordingly.
190 Value *expandPredication(VPIntrinsic &PI);
191
192 /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
193 /// overrides TTI with the cl::opts listed at the top of this file.
194 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
195 bool UsingTTIOverrides;
196
197public:
198 CachingVPExpander(const TargetTransformInfo &TTI)
199 : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
200
201 /// Expand llvm.vp.* intrinsics as requested by \p TTI.
202 /// Returns the details of the expansion.
203 VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
204};
205
206//// CachingVPExpander {
207
208Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
209 Value *EVLParam,
210 ElementCount ElemCount) {
211 // TODO add caching
212 // Scalable vector %evl conversion.
213 if (ElemCount.isScalable()) {
214 Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
215 // `get_active_lane_mask` performs an implicit less-than comparison.
216 Value *ConstZero = Builder.getInt32(0);
217 return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
218 {BoolVecTy, EVLParam->getType()},
219 {ConstZero, EVLParam});
220 }
221
222 // Fixed vector %evl conversion.
223 Type *LaneTy = EVLParam->getType();
224 unsigned NumElems = ElemCount.getFixedValue();
225 Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
226 Value *IdxVec = Builder.CreateStepVector(VectorType::get(LaneTy, ElemCount));
227 return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
228}
229
230Value *
231CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
232 VPIntrinsic &VPI) {
234 "Implicitly dropping %evl in non-speculatable operator!");
235
236 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
238
239 Value *Op0 = VPI.getOperand(0);
240 Value *Op1 = VPI.getOperand(1);
241 Value *Mask = VPI.getMaskParam();
242
243 // Blend in safe operands.
244 if (Mask && !isAllTrueMask(Mask)) {
245 switch (OC) {
246 default:
247 // Can safely ignore the predicate.
248 break;
249
250 // Division operators need a safe divisor on masked-off lanes (1).
251 case Instruction::UDiv:
252 case Instruction::SDiv:
253 case Instruction::URem:
254 case Instruction::SRem:
255 // 2nd operand must not be zero.
256 Value *SafeDivisor = getSafeDivisor(VPI.getType());
257 Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
258 }
259 }
260
261 Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
262
263 replaceOperation(*NewBinOp, VPI);
264 return NewBinOp;
265}
266
267Value *CachingVPExpander::expandPredicationToIntCall(IRBuilder<> &Builder,
268 VPIntrinsic &VPI) {
269 std::optional<unsigned> FID = VPI.getFunctionalIntrinsicID();
270 if (!FID)
271 return nullptr;
273 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
274 Argument.push_back(VPI.getOperand(i));
275 }
276 Value *NewOp = Builder.CreateIntrinsic(FID.value(), {VPI.getType()}, Argument,
277 /*FMFSource=*/nullptr, VPI.getName());
278 replaceOperation(*NewOp, VPI);
279 return NewOp;
280}
281
282Value *CachingVPExpander::expandPredicationToFPCall(
283 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
285 "Implicitly dropping %evl in non-speculatable operator!");
286
287 switch (UnpredicatedIntrinsicID) {
288 case Intrinsic::fabs:
289 case Intrinsic::sqrt:
290 case Intrinsic::maxnum:
291 case Intrinsic::minnum: {
293 for (unsigned i = 0; i < VPI.getNumOperands() - 3; i++) {
294 Argument.push_back(VPI.getOperand(i));
295 }
296 Value *NewOp = Builder.CreateIntrinsic(
297 UnpredicatedIntrinsicID, {VPI.getType()}, Argument,
298 /*FMFSource=*/nullptr, VPI.getName());
299 replaceOperation(*NewOp, VPI);
300 return NewOp;
301 }
302 case Intrinsic::fma:
303 case Intrinsic::fmuladd:
304 case Intrinsic::experimental_constrained_fma:
305 case Intrinsic::experimental_constrained_fmuladd: {
306 Value *Op0 = VPI.getOperand(0);
307 Value *Op1 = VPI.getOperand(1);
308 Value *Op2 = VPI.getOperand(2);
310 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
311 Value *NewOp;
312 if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
313 NewOp =
314 Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
315 else
316 NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2}, VPI.getName());
317 replaceOperation(*NewOp, VPI);
318 return NewOp;
319 }
320 }
321
322 return nullptr;
323}
324
325static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
326 Type *EltTy) {
328 FastMathFlags FMF;
329 if (isa<FPMathOperator>(VPI))
330 FMF = VPI.getFastMathFlags();
331 return getReductionIdentity(RdxID, EltTy, FMF);
332}
333
334Value *
335CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
338 "Implicitly dropping %evl in non-speculatable operator!");
339
340 Value *Mask = VPI.getMaskParam();
341 Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
342
343 // Insert neutral element in masked-out positions
344 if (Mask && !isAllTrueMask(Mask)) {
345 auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
346 auto *NeutralVector = Builder.CreateVectorSplat(
347 cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
348 RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
349 }
350
352 Value *Start = VPI.getOperand(VPI.getStartParamPos());
353
354 switch (VPI.getIntrinsicID()) {
355 default:
356 llvm_unreachable("Impossible reduction kind");
357 case Intrinsic::vp_reduce_add:
358 case Intrinsic::vp_reduce_mul:
359 case Intrinsic::vp_reduce_and:
360 case Intrinsic::vp_reduce_or:
361 case Intrinsic::vp_reduce_xor: {
363 unsigned Opc = getArithmeticReductionInstruction(RedID);
365 Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
366 Reduction =
367 Builder.CreateBinOp((Instruction::BinaryOps)Opc, Reduction, Start);
368 break;
369 }
370 case Intrinsic::vp_reduce_smax:
371 case Intrinsic::vp_reduce_smin:
372 case Intrinsic::vp_reduce_umax:
373 case Intrinsic::vp_reduce_umin:
374 case Intrinsic::vp_reduce_fmax:
375 case Intrinsic::vp_reduce_fmin:
376 case Intrinsic::vp_reduce_fmaximum:
377 case Intrinsic::vp_reduce_fminimum: {
380 Reduction = Builder.CreateUnaryIntrinsic(RedID, RedOp);
381 transferDecorations(*Reduction, VPI);
382 Reduction = Builder.CreateBinaryIntrinsic(ScalarID, Reduction, Start);
383 break;
384 }
385 case Intrinsic::vp_reduce_fadd:
386 Reduction = Builder.CreateFAddReduce(Start, RedOp);
387 break;
388 case Intrinsic::vp_reduce_fmul:
389 Reduction = Builder.CreateFMulReduce(Start, RedOp);
390 break;
391 }
392
393 replaceOperation(*Reduction, VPI);
394 return Reduction;
395}
396
397Value *CachingVPExpander::expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
398 VPIntrinsic &VPI) {
399 Intrinsic::ID VPID = VPI.getIntrinsicID();
400 unsigned CastOpcode = VPIntrinsic::getFunctionalOpcodeForVP(VPID).value();
401 assert(Instruction::isCast(CastOpcode));
402 Value *CastOp =
403 Builder.CreateCast(Instruction::CastOps(CastOpcode), VPI.getOperand(0),
404 VPI.getType(), VPI.getName());
405
406 replaceOperation(*CastOp, VPI);
407 return CastOp;
408}
409
410Value *
411CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
412 VPIntrinsic &VPI) {
414
415 const auto &DL = VPI.getDataLayout();
416
417 Value *MaskParam = VPI.getMaskParam();
418 Value *PtrParam = VPI.getMemoryPointerParam();
419 Value *DataParam = VPI.getMemoryDataParam();
420 bool IsUnmasked = isAllTrueMask(MaskParam);
421
422 MaybeAlign AlignOpt = VPI.getPointerAlignment();
423
424 Value *NewMemoryInst = nullptr;
425 switch (VPI.getIntrinsicID()) {
426 default:
427 llvm_unreachable("Not a VP memory intrinsic");
428 case Intrinsic::vp_store:
429 if (IsUnmasked) {
430 StoreInst *NewStore =
431 Builder.CreateStore(DataParam, PtrParam, /*IsVolatile*/ false);
432 if (AlignOpt.has_value())
433 NewStore->setAlignment(*AlignOpt);
434 NewMemoryInst = NewStore;
435 } else
436 NewMemoryInst = Builder.CreateMaskedStore(
437 DataParam, PtrParam, AlignOpt.valueOrOne(), MaskParam);
438
439 break;
440 case Intrinsic::vp_load:
441 if (IsUnmasked) {
442 LoadInst *NewLoad =
443 Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
444 if (AlignOpt.has_value())
445 NewLoad->setAlignment(*AlignOpt);
446 NewMemoryInst = NewLoad;
447 } else
448 NewMemoryInst = Builder.CreateMaskedLoad(
449 VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
450
451 break;
452 case Intrinsic::vp_scatter: {
453 auto *ElementType =
454 cast<VectorType>(DataParam->getType())->getElementType();
455 NewMemoryInst = Builder.CreateMaskedScatter(
456 DataParam, PtrParam,
457 AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam);
458 break;
459 }
460 case Intrinsic::vp_gather: {
461 auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
462 NewMemoryInst = Builder.CreateMaskedGather(
463 VPI.getType(), PtrParam,
464 AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam, nullptr,
465 VPI.getName());
466 break;
467 }
468 }
469
470 assert(NewMemoryInst);
471 replaceOperation(*NewMemoryInst, VPI);
472 return NewMemoryInst;
473}
474
475Value *CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder,
476 VPCmpIntrinsic &VPI) {
478 "Implicitly dropping %evl in non-speculatable operator!");
479
480 assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
481 *VPI.getFunctionalOpcode() == Instruction::FCmp);
482
483 Value *Op0 = VPI.getOperand(0);
484 Value *Op1 = VPI.getOperand(1);
485 auto Pred = VPI.getPredicate();
486
487 auto *NewCmp = Builder.CreateCmp(Pred, Op0, Op1);
488
489 replaceOperation(*NewCmp, VPI);
490 return NewCmp;
491}
492
493bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
494 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
495
497 return false;
498
499 Value *EVLParam = VPI.getVectorLengthParam();
500 if (!EVLParam)
501 return false;
502
503 ElementCount StaticElemCount = VPI.getStaticVectorLength();
504 Value *MaxEVL = nullptr;
505 Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
506 if (StaticElemCount.isScalable()) {
507 // TODO add caching
508 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
509 Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
510 Value *VScale = Builder.CreateIntrinsic(Intrinsic::vscale, Int32Ty, {},
511 /*FMFSource=*/nullptr, "vscale");
512 MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
513 /*NUW*/ true, /*NSW*/ false);
514 } else {
515 MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
516 }
517 VPI.setVectorLengthParam(MaxEVL);
518 return true;
519}
520
521std::pair<Value *, bool> CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
522 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
523
524 IRBuilder<> Builder(&VPI);
525
526 // Ineffective %evl parameter and so nothing to do here.
528 return {&VPI, false};
529
530 // Only VP intrinsics can have an %evl parameter.
531 Value *OldMaskParam = VPI.getMaskParam();
532 Value *OldEVLParam = VPI.getVectorLengthParam();
533 assert(OldMaskParam && "no mask param to fold the vl param into");
534 assert(OldEVLParam && "no EVL param to fold away");
535
536 LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
537 LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
538
539 // Convert the %evl predication into vector mask predication.
540 ElementCount ElemCount = VPI.getStaticVectorLength();
541 Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
542 Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
543 VPI.setMaskParam(NewMaskParam);
544
545 // Drop the %evl parameter.
546 discardEVLParameter(VPI);
548 "transformation did not render the evl param ineffective!");
549
550 // Reassess the modified instruction.
551 return {&VPI, true};
552}
553
554Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
555 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
556
557 IRBuilder<> Builder(&VPI);
558
559 // Try lowering to a LLVM instruction first.
560 auto OC = VPI.getFunctionalOpcode();
561
562 if (OC && Instruction::isBinaryOp(*OC))
563 return expandPredicationInBinaryOperator(Builder, VPI);
564
565 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
566 return expandPredicationInReduction(Builder, *VPRI);
567
568 if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
569 return expandPredicationInComparison(Builder, *VPCmp);
570
572 return expandPredicationToCastIntrinsic(Builder, VPI);
573 }
574
575 switch (VPI.getIntrinsicID()) {
576 default:
577 break;
578 case Intrinsic::vp_fneg: {
579 Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0), VPI.getName());
580 replaceOperation(*NewNegOp, VPI);
581 return NewNegOp;
582 }
583 case Intrinsic::vp_abs:
584 case Intrinsic::vp_smax:
585 case Intrinsic::vp_smin:
586 case Intrinsic::vp_umax:
587 case Intrinsic::vp_umin:
588 case Intrinsic::vp_bswap:
589 case Intrinsic::vp_bitreverse:
590 case Intrinsic::vp_ctpop:
591 case Intrinsic::vp_ctlz:
592 case Intrinsic::vp_cttz:
593 case Intrinsic::vp_sadd_sat:
594 case Intrinsic::vp_uadd_sat:
595 case Intrinsic::vp_ssub_sat:
596 case Intrinsic::vp_usub_sat:
597 case Intrinsic::vp_fshl:
598 case Intrinsic::vp_fshr:
599 return expandPredicationToIntCall(Builder, VPI);
600 case Intrinsic::vp_fabs:
601 case Intrinsic::vp_sqrt:
602 case Intrinsic::vp_maxnum:
603 case Intrinsic::vp_minnum:
604 case Intrinsic::vp_maximum:
605 case Intrinsic::vp_minimum:
606 case Intrinsic::vp_fma:
607 case Intrinsic::vp_fmuladd:
608 return expandPredicationToFPCall(Builder, VPI,
609 VPI.getFunctionalIntrinsicID().value());
610 case Intrinsic::vp_load:
611 case Intrinsic::vp_store:
612 case Intrinsic::vp_gather:
613 case Intrinsic::vp_scatter:
614 return expandPredicationInMemoryIntrinsic(Builder, VPI);
615 }
616
617 if (auto CID = VPI.getConstrainedIntrinsicID())
618 if (Value *Call = expandPredicationToFPCall(Builder, VPI, *CID))
619 return Call;
620
621 return &VPI;
622}
623
624//// } CachingVPExpander
625
626void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
627 // Operations with speculatable lanes do not strictly need predication.
628 if (maySpeculateLanes(VPI)) {
629 // Converting a speculatable VP intrinsic means dropping %mask and %evl.
630 // No need to expand %evl into the %mask only to ignore that code.
631 if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
633 return;
634 }
635
636 // We have to preserve the predicating effect of %evl for this
637 // non-speculatable VP intrinsic.
638 // 1) Never discard %evl.
639 // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
640 // %evl gets folded into %mask.
641 if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
642 (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
644 }
645}
646
648CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
649 auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
650 if (LLVM_LIKELY(!UsingTTIOverrides)) {
651 // No overrides - we are in production.
652 return VPStrat;
653 }
654
655 // Overrides set - we are in testing, the following does not need to be
656 // efficient.
658 VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
659 return VPStrat;
660}
661
663CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
664 auto Strategy = getVPLegalizationStrategy(VPI);
665 sanitizeStrategy(VPI, Strategy);
666
667 VPExpansionDetails Changed = VPExpansionDetails::IntrinsicUnchanged;
668
669 // Transform the EVL parameter.
670 switch (Strategy.EVLParamStrategy) {
672 break;
674 if (discardEVLParameter(VPI))
675 Changed = VPExpansionDetails::IntrinsicUpdated;
676 break;
678 if (auto [NewVPI, Folded] = foldEVLIntoMask(VPI); Folded) {
679 (void)NewVPI;
680 Changed = VPExpansionDetails::IntrinsicUpdated;
681 ++NumFoldedVL;
682 }
683 break;
684 }
685
686 // Replace with a non-predicated operation.
687 switch (Strategy.OpStrategy) {
689 break;
691 llvm_unreachable("Invalid strategy for operators.");
693 if (Value *V = expandPredication(VPI); V != &VPI) {
694 ++NumLoweredVPOps;
695 Changed = VPExpansionDetails::IntrinsicReplaced;
696 }
697 break;
698 }
699
700 return Changed;
701}
702} // namespace
703
706 const TargetTransformInfo &TTI) {
707 return CachingVPExpander(TTI).expandVectorPredication(VPI);
708}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:319
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(...)
Definition: Debug.h:106
static VPTransform parseOverrideOption(const std::string &TextOpt)
static cl::opt< std::string > MaskTransformOverride("expandvp-override-mask-transform", cl::init(""), cl::Hidden, cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES ". If non-empty, Ignore " "TargetTransformInfo and " "always use this transformation for the %mask parameter (Used in " "testing)."))
static cl::opt< std::string > EVLTransformOverride("expandvp-override-evl-transform", cl::init(""), cl::Hidden, cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES ". If non-empty, ignore " "TargetTransformInfo and " "always use this transformation for the %evl parameter (Used in " "testing)."))
static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp)
Transfer all properties from OldOp to NewOp and replace all uses.
static bool isAllTrueMask(Value *MaskVal)
static void transferDecorations(Value &NewVal, VPIntrinsic &VPI)
Transfer operation properties from OldVPI to NewVal.
static bool anyExpandVPOverridesSet()
static bool maySpeculateLanes(VPIntrinsic &VPI)
static Constant * getSafeDivisor(Type *DivTy)
#define VPINTERNAL_VPLEGAL_CASES
loop Loop Strength Reduction
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This pass exposes codegen information to IR-level passes.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
This is an important base class in LLVM.
Definition: Constant.h:42
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:402
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:530
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1163
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:546
CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
Definition: IRBuilder.cpp:1036
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1053
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Definition: IRBuilder.h:2186
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:889
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:900
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:505
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2404
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Definition: IRBuilder.cpp:881
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1798
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1518
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1811
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:566
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2449
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1671
CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:407
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2380
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1742
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Definition: IRBuilder.cpp:108
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1404
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
Definition: IRBuilder.cpp:627
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
Definition: IRBuilder.cpp:596
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2705
bool isCast() const
Definition: Instruction.h:283
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:68
bool isBinaryOp() const
Definition: Instruction.h:279
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:94
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:76
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
An instruction for reading from memory.
Definition: Instructions.h:176
void setAlignment(Align Align)
Definition: Instructions.h:215
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
void setAlignment(Align Align)
Definition: Instructions.h:337
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:243
static IntegerType * getInt32Ty(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
static bool isVPCast(Intrinsic::ID ID)
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
std::optional< unsigned > getFunctionalIntrinsicID() const
bool canIgnoreVectorLengthParam() const
void setMaskParam(Value *)
static std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
Value * getVectorLengthParam() const
void setVectorLengthParam(Value *)
Value * getMemoryDataParam() const
Value * getMemoryPointerParam() const
std::optional< unsigned > getConstrainedIntrinsicID() const
MaybeAlign getPointerAlignment() const
Value * getMaskParam() const
ElementCount getStaticVectorLength() const
std::optional< unsigned > getFunctionalOpcode() const
This represents vector predication reduction intrinsics.
unsigned getStartParamPos() const
unsigned getVectorParamPos() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:731
bool isConstrainedFPIntrinsic(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics".
Definition: Intrinsics.cpp:762
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
ElementType
The element type of an SRV or UAV resource.
Definition: DXILABI.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)
Returns the min/max intrinsic used when expanding a min/max reduction.
Definition: LoopUtils.cpp:989
Value * getReductionIdentity(Intrinsic::ID RdxID, Type *Ty, FastMathFlags FMF)
Given information about an @llvm.vector.reduce.
Definition: LoopUtils.cpp:1228
unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)
Returns the arithmetic instruction opcode used when expanding a reduction.
Definition: LoopUtils.cpp:960
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
VPExpansionDetails expandVectorPredicationIntrinsic(VPIntrinsic &VPI, const TargetTransformInfo &TTI)
Expand a vector predication intrinsic.
VPExpansionDetails
Represents the details the expansion of a VP intrinsic.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141