LLVM 20.0.0git
ExpandVectorPredication.cpp
Go to the documentation of this file.
1//===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements IR expansion for vector predication intrinsics, allowing
10// targets to enable vector predication until just before codegen.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/Statistic.h"
19#include "llvm/IR/Constants.h"
20#include "llvm/IR/Function.h"
21#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/Intrinsics.h"
28#include "llvm/Support/Debug.h"
29#include <optional>
30
31using namespace llvm;
32
35
36// Keep this in sync with TargetTransformInfo::VPLegalization.
37#define VPINTERNAL_VPLEGAL_CASES \
38 VPINTERNAL_CASE(Legal) \
39 VPINTERNAL_CASE(Discard) \
40 VPINTERNAL_CASE(Convert)
41
42#define VPINTERNAL_CASE(X) "|" #X
43
44// Override options.
46 "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
47 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
48 ". If non-empty, ignore "
49 "TargetTransformInfo and "
50 "always use this transformation for the %evl parameter (Used in "
51 "testing)."));
52
54 "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
55 cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
56 ". If non-empty, Ignore "
57 "TargetTransformInfo and "
58 "always use this transformation for the %mask parameter (Used in "
59 "testing)."));
60
61#undef VPINTERNAL_CASE
62#define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
63
64static VPTransform parseOverrideOption(const std::string &TextOpt) {
66}
67
68#undef VPINTERNAL_VPLEGAL_CASES
69
70// Whether any override options are set.
72 return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
73}
74
75#define DEBUG_TYPE "expandvp"
76
77STATISTIC(NumFoldedVL, "Number of folded vector length params");
78STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
79
80///// Helpers {
81
82/// \returns Whether the vector mask \p MaskVal has all lane bits set.
83static bool isAllTrueMask(Value *MaskVal) {
84 if (Value *SplattedVal = getSplatValue(MaskVal))
85 if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
86 return ConstValue->isAllOnesValue();
87
88 return false;
89}
90
91/// \returns A non-excepting divisor constant for this type.
92static Constant *getSafeDivisor(Type *DivTy) {
93 assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
94 return ConstantInt::get(DivTy, 1u, false);
95}
96
97/// Transfer operation properties from \p OldVPI to \p NewVal.
98static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
99 auto *NewInst = dyn_cast<Instruction>(&NewVal);
100 if (!NewInst || !isa<FPMathOperator>(NewVal))
101 return;
102
103 auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
104 if (!OldFMOp)
105 return;
106
107 NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
108}
109
110/// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
111/// OldVP gets erased.
112static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
113 transferDecorations(NewOp, OldOp);
114 OldOp.replaceAllUsesWith(&NewOp);
115 OldOp.eraseFromParent();
116}
117
119 // The result of VP reductions depends on the mask and evl.
120 if (isa<VPReductionIntrinsic>(VPI))
121 return false;
122 // Fallback to whether the intrinsic is speculatable.
123 if (auto IntrID = VPI.getFunctionalIntrinsicID())
124 return Intrinsic::getAttributes(VPI.getContext(), *IntrID)
125 .hasFnAttr(Attribute::AttrKind::Speculatable);
126 if (auto Opc = VPI.getFunctionalOpcode())
128 return false;
129}
130
131//// } Helpers
132
133namespace {
134
135// Expansion pass state at function scope.
136struct CachingVPExpander {
138
139 /// \returns A (fixed length) vector with ascending integer indices
140 /// (<0, 1, ..., NumElems-1>).
141 /// \p Builder
142 /// Used for instruction creation.
143 /// \p LaneTy
144 /// Integer element type of the result vector.
145 /// \p NumElems
146 /// Number of vector elements.
147 Value *createStepVector(IRBuilder<> &Builder, Type *LaneTy,
148 unsigned NumElems);
149
150 /// \returns A bitmask that is true where the lane position is less-than \p
151 /// EVLParam
152 ///
153 /// \p Builder
154 /// Used for instruction creation.
155 /// \p VLParam
156 /// The explicit vector length parameter to test against the lane
157 /// positions.
158 /// \p ElemCount
159 /// Static (potentially scalable) number of vector elements.
160 Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
161 ElementCount ElemCount);
162
163 /// If needed, folds the EVL in the mask operand and discards the EVL
164 /// parameter. Returns a pair of the value of the intrinsic after the change
165 /// (if any) and whether the mask was actually folded.
166 std::pair<Value *, bool> foldEVLIntoMask(VPIntrinsic &VPI);
167
168 /// "Remove" the %evl parameter of \p PI by setting it to the static vector
169 /// length of the operation. Returns true if the %evl (if any) was effectively
170 /// changed.
171 bool discardEVLParameter(VPIntrinsic &PI);
172
173 /// Lower this VP binary operator to a unpredicated binary operator.
174 Value *expandPredicationInBinaryOperator(IRBuilder<> &Builder,
175 VPIntrinsic &PI);
176
177 /// Lower this VP int call to a unpredicated int call.
178 Value *expandPredicationToIntCall(IRBuilder<> &Builder, VPIntrinsic &PI,
179 unsigned UnpredicatedIntrinsicID);
180
181 /// Lower this VP fp call to a unpredicated fp call.
182 Value *expandPredicationToFPCall(IRBuilder<> &Builder, VPIntrinsic &PI,
183 unsigned UnpredicatedIntrinsicID);
184
185 /// Lower this VP reduction to a call to an unpredicated reduction intrinsic.
186 Value *expandPredicationInReduction(IRBuilder<> &Builder,
188
189 /// Lower this VP cast operation to a non-VP intrinsic.
190 Value *expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
191 VPIntrinsic &VPI);
192
193 /// Lower this VP memory operation to a non-VP intrinsic.
194 Value *expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
195 VPIntrinsic &VPI);
196
197 /// Lower this VP comparison to a call to an unpredicated comparison.
198 Value *expandPredicationInComparison(IRBuilder<> &Builder,
199 VPCmpIntrinsic &PI);
200
201 /// Query TTI and expand the vector predication in \p P accordingly.
202 Value *expandPredication(VPIntrinsic &PI);
203
204 /// Determine how and whether the VPIntrinsic \p VPI shall be expanded. This
205 /// overrides TTI with the cl::opts listed at the top of this file.
206 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
207 bool UsingTTIOverrides;
208
209public:
210 CachingVPExpander(const TargetTransformInfo &TTI)
211 : TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
212
213 /// Expand llvm.vp.* intrinsics as requested by \p TTI.
214 /// Returns the details of the expansion.
215 VPExpansionDetails expandVectorPredication(VPIntrinsic &VPI);
216};
217
218//// CachingVPExpander {
219
220Value *CachingVPExpander::createStepVector(IRBuilder<> &Builder, Type *LaneTy,
221 unsigned NumElems) {
222 // TODO add caching
224
225 for (unsigned Idx = 0; Idx < NumElems; ++Idx)
226 ConstElems.push_back(ConstantInt::get(LaneTy, Idx, false));
227
228 return ConstantVector::get(ConstElems);
229}
230
231Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
232 Value *EVLParam,
233 ElementCount ElemCount) {
234 // TODO add caching
235 // Scalable vector %evl conversion.
236 if (ElemCount.isScalable()) {
237 auto *M = Builder.GetInsertBlock()->getModule();
238 Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
239 Function *ActiveMaskFunc = Intrinsic::getDeclaration(
240 M, Intrinsic::get_active_lane_mask, {BoolVecTy, EVLParam->getType()});
241 // `get_active_lane_mask` performs an implicit less-than comparison.
242 Value *ConstZero = Builder.getInt32(0);
243 return Builder.CreateCall(ActiveMaskFunc, {ConstZero, EVLParam});
244 }
245
246 // Fixed vector %evl conversion.
247 Type *LaneTy = EVLParam->getType();
248 unsigned NumElems = ElemCount.getFixedValue();
249 Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
250 Value *IdxVec = createStepVector(Builder, LaneTy, NumElems);
251 return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
252}
253
254Value *
255CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
256 VPIntrinsic &VPI) {
258 "Implicitly dropping %evl in non-speculatable operator!");
259
260 auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
262
263 Value *Op0 = VPI.getOperand(0);
264 Value *Op1 = VPI.getOperand(1);
265 Value *Mask = VPI.getMaskParam();
266
267 // Blend in safe operands.
268 if (Mask && !isAllTrueMask(Mask)) {
269 switch (OC) {
270 default:
271 // Can safely ignore the predicate.
272 break;
273
274 // Division operators need a safe divisor on masked-off lanes (1).
275 case Instruction::UDiv:
276 case Instruction::SDiv:
277 case Instruction::URem:
278 case Instruction::SRem:
279 // 2nd operand must not be zero.
280 Value *SafeDivisor = getSafeDivisor(VPI.getType());
281 Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
282 }
283 }
284
285 Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
286
287 replaceOperation(*NewBinOp, VPI);
288 return NewBinOp;
289}
290
291Value *CachingVPExpander::expandPredicationToIntCall(
292 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
293 switch (UnpredicatedIntrinsicID) {
294 case Intrinsic::abs:
295 case Intrinsic::smax:
296 case Intrinsic::smin:
297 case Intrinsic::umax:
298 case Intrinsic::umin: {
299 Value *Op0 = VPI.getOperand(0);
300 Value *Op1 = VPI.getOperand(1);
302 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
303 Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
304 replaceOperation(*NewOp, VPI);
305 return NewOp;
306 }
307 case Intrinsic::bswap:
308 case Intrinsic::bitreverse: {
309 Value *Op = VPI.getOperand(0);
311 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
312 Value *NewOp = Builder.CreateCall(Fn, {Op}, VPI.getName());
313 replaceOperation(*NewOp, VPI);
314 return NewOp;
315 }
316 }
317 return nullptr;
318}
319
320Value *CachingVPExpander::expandPredicationToFPCall(
321 IRBuilder<> &Builder, VPIntrinsic &VPI, unsigned UnpredicatedIntrinsicID) {
323 "Implicitly dropping %evl in non-speculatable operator!");
324
325 switch (UnpredicatedIntrinsicID) {
326 case Intrinsic::fabs:
327 case Intrinsic::sqrt: {
328 Value *Op0 = VPI.getOperand(0);
330 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
331 Value *NewOp = Builder.CreateCall(Fn, {Op0}, VPI.getName());
332 replaceOperation(*NewOp, VPI);
333 return NewOp;
334 }
335 case Intrinsic::maxnum:
336 case Intrinsic::minnum: {
337 Value *Op0 = VPI.getOperand(0);
338 Value *Op1 = VPI.getOperand(1);
340 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
341 Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
342 replaceOperation(*NewOp, VPI);
343 return NewOp;
344 }
345 case Intrinsic::fma:
346 case Intrinsic::fmuladd:
347 case Intrinsic::experimental_constrained_fma:
348 case Intrinsic::experimental_constrained_fmuladd: {
349 Value *Op0 = VPI.getOperand(0);
350 Value *Op1 = VPI.getOperand(1);
351 Value *Op2 = VPI.getOperand(2);
353 VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
354 Value *NewOp;
355 if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
356 NewOp =
357 Builder.CreateConstrainedFPCall(Fn, {Op0, Op1, Op2}, VPI.getName());
358 else
359 NewOp = Builder.CreateCall(Fn, {Op0, Op1, Op2}, VPI.getName());
360 replaceOperation(*NewOp, VPI);
361 return NewOp;
362 }
363 }
364
365 return nullptr;
366}
367
368static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
369 Type *EltTy) {
370 bool Negative = false;
371 unsigned EltBits = EltTy->getScalarSizeInBits();
372 Intrinsic::ID VID = VPI.getIntrinsicID();
373 switch (VID) {
374 default:
375 llvm_unreachable("Expecting a VP reduction intrinsic");
376 case Intrinsic::vp_reduce_add:
377 case Intrinsic::vp_reduce_or:
378 case Intrinsic::vp_reduce_xor:
379 case Intrinsic::vp_reduce_umax:
380 return Constant::getNullValue(EltTy);
381 case Intrinsic::vp_reduce_mul:
382 return ConstantInt::get(EltTy, 1, /*IsSigned*/ false);
383 case Intrinsic::vp_reduce_and:
384 case Intrinsic::vp_reduce_umin:
385 return ConstantInt::getAllOnesValue(EltTy);
386 case Intrinsic::vp_reduce_smin:
387 return ConstantInt::get(EltTy->getContext(),
388 APInt::getSignedMaxValue(EltBits));
389 case Intrinsic::vp_reduce_smax:
390 return ConstantInt::get(EltTy->getContext(),
391 APInt::getSignedMinValue(EltBits));
392 case Intrinsic::vp_reduce_fmax:
393 case Intrinsic::vp_reduce_fmaximum:
394 Negative = true;
395 [[fallthrough]];
396 case Intrinsic::vp_reduce_fmin:
397 case Intrinsic::vp_reduce_fminimum: {
398 bool PropagatesNaN = VID == Intrinsic::vp_reduce_fminimum ||
399 VID == Intrinsic::vp_reduce_fmaximum;
401 const fltSemantics &Semantics = EltTy->getFltSemantics();
402 return (!Flags.noNaNs() && !PropagatesNaN)
403 ? ConstantFP::getQNaN(EltTy, Negative)
404 : !Flags.noInfs()
405 ? ConstantFP::getInfinity(EltTy, Negative)
406 : ConstantFP::get(EltTy,
407 APFloat::getLargest(Semantics, Negative));
408 }
409 case Intrinsic::vp_reduce_fadd:
410 return ConstantFP::getNegativeZero(EltTy);
411 case Intrinsic::vp_reduce_fmul:
412 return ConstantFP::get(EltTy, 1.0);
413 }
414}
415
416Value *
417CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
420 "Implicitly dropping %evl in non-speculatable operator!");
421
422 Value *Mask = VPI.getMaskParam();
423 Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
424
425 // Insert neutral element in masked-out positions
426 if (Mask && !isAllTrueMask(Mask)) {
427 auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
428 auto *NeutralVector = Builder.CreateVectorSplat(
429 cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
430 RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
431 }
432
434 Value *Start = VPI.getOperand(VPI.getStartParamPos());
435
436 switch (VPI.getIntrinsicID()) {
437 default:
438 llvm_unreachable("Impossible reduction kind");
439 case Intrinsic::vp_reduce_add:
440 Reduction = Builder.CreateAddReduce(RedOp);
441 Reduction = Builder.CreateAdd(Reduction, Start);
442 break;
443 case Intrinsic::vp_reduce_mul:
444 Reduction = Builder.CreateMulReduce(RedOp);
445 Reduction = Builder.CreateMul(Reduction, Start);
446 break;
447 case Intrinsic::vp_reduce_and:
448 Reduction = Builder.CreateAndReduce(RedOp);
449 Reduction = Builder.CreateAnd(Reduction, Start);
450 break;
451 case Intrinsic::vp_reduce_or:
452 Reduction = Builder.CreateOrReduce(RedOp);
453 Reduction = Builder.CreateOr(Reduction, Start);
454 break;
455 case Intrinsic::vp_reduce_xor:
456 Reduction = Builder.CreateXorReduce(RedOp);
457 Reduction = Builder.CreateXor(Reduction, Start);
458 break;
459 case Intrinsic::vp_reduce_smax:
460 Reduction = Builder.CreateIntMaxReduce(RedOp, /*IsSigned*/ true);
461 Reduction =
462 Builder.CreateBinaryIntrinsic(Intrinsic::smax, Reduction, Start);
463 break;
464 case Intrinsic::vp_reduce_smin:
465 Reduction = Builder.CreateIntMinReduce(RedOp, /*IsSigned*/ true);
466 Reduction =
467 Builder.CreateBinaryIntrinsic(Intrinsic::smin, Reduction, Start);
468 break;
469 case Intrinsic::vp_reduce_umax:
470 Reduction = Builder.CreateIntMaxReduce(RedOp, /*IsSigned*/ false);
471 Reduction =
472 Builder.CreateBinaryIntrinsic(Intrinsic::umax, Reduction, Start);
473 break;
474 case Intrinsic::vp_reduce_umin:
475 Reduction = Builder.CreateIntMinReduce(RedOp, /*IsSigned*/ false);
476 Reduction =
477 Builder.CreateBinaryIntrinsic(Intrinsic::umin, Reduction, Start);
478 break;
479 case Intrinsic::vp_reduce_fmax:
480 Reduction = Builder.CreateFPMaxReduce(RedOp);
481 transferDecorations(*Reduction, VPI);
482 Reduction =
483 Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, Reduction, Start);
484 break;
485 case Intrinsic::vp_reduce_fmin:
486 Reduction = Builder.CreateFPMinReduce(RedOp);
487 transferDecorations(*Reduction, VPI);
488 Reduction =
489 Builder.CreateBinaryIntrinsic(Intrinsic::minnum, Reduction, Start);
490 break;
491 case Intrinsic::vp_reduce_fmaximum:
492 Reduction = Builder.CreateFPMaximumReduce(RedOp);
493 transferDecorations(*Reduction, VPI);
494 Reduction =
495 Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Reduction, Start);
496 break;
497 case Intrinsic::vp_reduce_fminimum:
498 Reduction = Builder.CreateFPMinimumReduce(RedOp);
499 transferDecorations(*Reduction, VPI);
500 Reduction =
501 Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Reduction, Start);
502 break;
503 case Intrinsic::vp_reduce_fadd:
504 Reduction = Builder.CreateFAddReduce(Start, RedOp);
505 break;
506 case Intrinsic::vp_reduce_fmul:
507 Reduction = Builder.CreateFMulReduce(Start, RedOp);
508 break;
509 }
510
511 replaceOperation(*Reduction, VPI);
512 return Reduction;
513}
514
515Value *CachingVPExpander::expandPredicationToCastIntrinsic(IRBuilder<> &Builder,
516 VPIntrinsic &VPI) {
517 Value *CastOp = nullptr;
518 switch (VPI.getIntrinsicID()) {
519 default:
520 llvm_unreachable("Not a VP cast intrinsic");
521 case Intrinsic::vp_sext:
522 CastOp =
523 Builder.CreateSExt(VPI.getOperand(0), VPI.getType(), VPI.getName());
524 break;
525 case Intrinsic::vp_zext:
526 CastOp =
527 Builder.CreateZExt(VPI.getOperand(0), VPI.getType(), VPI.getName());
528 break;
529 case Intrinsic::vp_trunc:
530 CastOp =
531 Builder.CreateTrunc(VPI.getOperand(0), VPI.getType(), VPI.getName());
532 break;
533 case Intrinsic::vp_inttoptr:
534 CastOp =
535 Builder.CreateIntToPtr(VPI.getOperand(0), VPI.getType(), VPI.getName());
536 break;
537 case Intrinsic::vp_ptrtoint:
538 CastOp =
539 Builder.CreatePtrToInt(VPI.getOperand(0), VPI.getType(), VPI.getName());
540 break;
541 case Intrinsic::vp_fptosi:
542 CastOp =
543 Builder.CreateFPToSI(VPI.getOperand(0), VPI.getType(), VPI.getName());
544 break;
545
546 case Intrinsic::vp_fptoui:
547 CastOp =
548 Builder.CreateFPToUI(VPI.getOperand(0), VPI.getType(), VPI.getName());
549 break;
550 case Intrinsic::vp_sitofp:
551 CastOp =
552 Builder.CreateSIToFP(VPI.getOperand(0), VPI.getType(), VPI.getName());
553 break;
554 case Intrinsic::vp_uitofp:
555 CastOp =
556 Builder.CreateUIToFP(VPI.getOperand(0), VPI.getType(), VPI.getName());
557 break;
558 case Intrinsic::vp_fptrunc:
559 CastOp =
560 Builder.CreateFPTrunc(VPI.getOperand(0), VPI.getType(), VPI.getName());
561 break;
562 case Intrinsic::vp_fpext:
563 CastOp =
564 Builder.CreateFPExt(VPI.getOperand(0), VPI.getType(), VPI.getName());
565 break;
566 }
567 replaceOperation(*CastOp, VPI);
568 return CastOp;
569}
570
571Value *
572CachingVPExpander::expandPredicationInMemoryIntrinsic(IRBuilder<> &Builder,
573 VPIntrinsic &VPI) {
575
576 const auto &DL = VPI.getDataLayout();
577
578 Value *MaskParam = VPI.getMaskParam();
579 Value *PtrParam = VPI.getMemoryPointerParam();
580 Value *DataParam = VPI.getMemoryDataParam();
581 bool IsUnmasked = isAllTrueMask(MaskParam);
582
583 MaybeAlign AlignOpt = VPI.getPointerAlignment();
584
585 Value *NewMemoryInst = nullptr;
586 switch (VPI.getIntrinsicID()) {
587 default:
588 llvm_unreachable("Not a VP memory intrinsic");
589 case Intrinsic::vp_store:
590 if (IsUnmasked) {
591 StoreInst *NewStore =
592 Builder.CreateStore(DataParam, PtrParam, /*IsVolatile*/ false);
593 if (AlignOpt.has_value())
594 NewStore->setAlignment(*AlignOpt);
595 NewMemoryInst = NewStore;
596 } else
597 NewMemoryInst = Builder.CreateMaskedStore(
598 DataParam, PtrParam, AlignOpt.valueOrOne(), MaskParam);
599
600 break;
601 case Intrinsic::vp_load:
602 if (IsUnmasked) {
603 LoadInst *NewLoad =
604 Builder.CreateLoad(VPI.getType(), PtrParam, /*IsVolatile*/ false);
605 if (AlignOpt.has_value())
606 NewLoad->setAlignment(*AlignOpt);
607 NewMemoryInst = NewLoad;
608 } else
609 NewMemoryInst = Builder.CreateMaskedLoad(
610 VPI.getType(), PtrParam, AlignOpt.valueOrOne(), MaskParam);
611
612 break;
613 case Intrinsic::vp_scatter: {
614 auto *ElementType =
615 cast<VectorType>(DataParam->getType())->getElementType();
616 NewMemoryInst = Builder.CreateMaskedScatter(
617 DataParam, PtrParam,
618 AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam);
619 break;
620 }
621 case Intrinsic::vp_gather: {
622 auto *ElementType = cast<VectorType>(VPI.getType())->getElementType();
623 NewMemoryInst = Builder.CreateMaskedGather(
624 VPI.getType(), PtrParam,
625 AlignOpt.value_or(DL.getPrefTypeAlign(ElementType)), MaskParam, nullptr,
626 VPI.getName());
627 break;
628 }
629 }
630
631 assert(NewMemoryInst);
632 replaceOperation(*NewMemoryInst, VPI);
633 return NewMemoryInst;
634}
635
636Value *CachingVPExpander::expandPredicationInComparison(IRBuilder<> &Builder,
637 VPCmpIntrinsic &VPI) {
639 "Implicitly dropping %evl in non-speculatable operator!");
640
641 assert(*VPI.getFunctionalOpcode() == Instruction::ICmp ||
642 *VPI.getFunctionalOpcode() == Instruction::FCmp);
643
644 Value *Op0 = VPI.getOperand(0);
645 Value *Op1 = VPI.getOperand(1);
646 auto Pred = VPI.getPredicate();
647
648 auto *NewCmp = Builder.CreateCmp(Pred, Op0, Op1);
649
650 replaceOperation(*NewCmp, VPI);
651 return NewCmp;
652}
653
654bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
655 LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
656
658 return false;
659
660 Value *EVLParam = VPI.getVectorLengthParam();
661 if (!EVLParam)
662 return false;
663
664 ElementCount StaticElemCount = VPI.getStaticVectorLength();
665 Value *MaxEVL = nullptr;
666 Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
667 if (StaticElemCount.isScalable()) {
668 // TODO add caching
669 auto *M = VPI.getModule();
670 Function *VScaleFunc =
671 Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
672 IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
673 Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
674 Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
675 MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
676 /*NUW*/ true, /*NSW*/ false);
677 } else {
678 MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
679 }
680 VPI.setVectorLengthParam(MaxEVL);
681 return true;
682}
683
684std::pair<Value *, bool> CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
685 LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
686
687 IRBuilder<> Builder(&VPI);
688
689 // Ineffective %evl parameter and so nothing to do here.
691 return {&VPI, false};
692
693 // Only VP intrinsics can have an %evl parameter.
694 Value *OldMaskParam = VPI.getMaskParam();
695 Value *OldEVLParam = VPI.getVectorLengthParam();
696 assert(OldMaskParam && "no mask param to fold the vl param into");
697 assert(OldEVLParam && "no EVL param to fold away");
698
699 LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
700 LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
701
702 // Convert the %evl predication into vector mask predication.
703 ElementCount ElemCount = VPI.getStaticVectorLength();
704 Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
705 Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
706 VPI.setMaskParam(NewMaskParam);
707
708 // Drop the %evl parameter.
709 discardEVLParameter(VPI);
711 "transformation did not render the evl param ineffective!");
712
713 // Reassess the modified instruction.
714 return {&VPI, true};
715}
716
717Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
718 LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
719
720 IRBuilder<> Builder(&VPI);
721
722 // Try lowering to a LLVM instruction first.
723 auto OC = VPI.getFunctionalOpcode();
724
725 if (OC && Instruction::isBinaryOp(*OC))
726 return expandPredicationInBinaryOperator(Builder, VPI);
727
728 if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
729 return expandPredicationInReduction(Builder, *VPRI);
730
731 if (auto *VPCmp = dyn_cast<VPCmpIntrinsic>(&VPI))
732 return expandPredicationInComparison(Builder, *VPCmp);
733
735 return expandPredicationToCastIntrinsic(Builder, VPI);
736 }
737
738 switch (VPI.getIntrinsicID()) {
739 default:
740 break;
741 case Intrinsic::vp_fneg: {
742 Value *NewNegOp = Builder.CreateFNeg(VPI.getOperand(0), VPI.getName());
743 replaceOperation(*NewNegOp, VPI);
744 return NewNegOp;
745 }
746 case Intrinsic::vp_abs:
747 case Intrinsic::vp_smax:
748 case Intrinsic::vp_smin:
749 case Intrinsic::vp_umax:
750 case Intrinsic::vp_umin:
751 case Intrinsic::vp_bswap:
752 case Intrinsic::vp_bitreverse:
753 return expandPredicationToIntCall(Builder, VPI,
754 VPI.getFunctionalIntrinsicID().value());
755 case Intrinsic::vp_fabs:
756 case Intrinsic::vp_sqrt:
757 case Intrinsic::vp_maxnum:
758 case Intrinsic::vp_minnum:
759 case Intrinsic::vp_maximum:
760 case Intrinsic::vp_minimum:
761 case Intrinsic::vp_fma:
762 case Intrinsic::vp_fmuladd:
763 return expandPredicationToFPCall(Builder, VPI,
764 VPI.getFunctionalIntrinsicID().value());
765 case Intrinsic::vp_load:
766 case Intrinsic::vp_store:
767 case Intrinsic::vp_gather:
768 case Intrinsic::vp_scatter:
769 return expandPredicationInMemoryIntrinsic(Builder, VPI);
770 }
771
772 if (auto CID = VPI.getConstrainedIntrinsicID())
773 if (Value *Call = expandPredicationToFPCall(Builder, VPI, *CID))
774 return Call;
775
776 return &VPI;
777}
778
779//// } CachingVPExpander
780
781void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
782 // Operations with speculatable lanes do not strictly need predication.
783 if (maySpeculateLanes(VPI)) {
784 // Converting a speculatable VP intrinsic means dropping %mask and %evl.
785 // No need to expand %evl into the %mask only to ignore that code.
786 if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
788 return;
789 }
790
791 // We have to preserve the predicating effect of %evl for this
792 // non-speculatable VP intrinsic.
793 // 1) Never discard %evl.
794 // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
795 // %evl gets folded into %mask.
796 if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
797 (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
799 }
800}
801
803CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
804 auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
805 if (LLVM_LIKELY(!UsingTTIOverrides)) {
806 // No overrides - we are in production.
807 return VPStrat;
808 }
809
810 // Overrides set - we are in testing, the following does not need to be
811 // efficient.
813 VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
814 return VPStrat;
815}
816
818CachingVPExpander::expandVectorPredication(VPIntrinsic &VPI) {
819 auto Strategy = getVPLegalizationStrategy(VPI);
820 sanitizeStrategy(VPI, Strategy);
821
822 VPExpansionDetails Changed = VPExpansionDetails::IntrinsicUnchanged;
823
824 // Transform the EVL parameter.
825 switch (Strategy.EVLParamStrategy) {
827 break;
829 if (discardEVLParameter(VPI))
830 Changed = VPExpansionDetails::IntrinsicUpdated;
831 break;
833 if (auto [NewVPI, Folded] = foldEVLIntoMask(VPI); Folded) {
834 (void)NewVPI;
835 Changed = VPExpansionDetails::IntrinsicUpdated;
836 ++NumFoldedVL;
837 }
838 break;
839 }
840
841 // Replace with a non-predicated operation.
842 switch (Strategy.OpStrategy) {
844 break;
846 llvm_unreachable("Invalid strategy for operators.");
848 if (Value *V = expandPredication(VPI); V != &VPI) {
849 ++NumLoweredVPOps;
850 Changed = VPExpansionDetails::IntrinsicReplaced;
851 }
852 break;
853 }
854
855 return Changed;
856}
857} // namespace
858
861 const TargetTransformInfo &TTI) {
862 return CachingVPExpander(TTI).expandVectorPredication(VPI);
863}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:236
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static VPTransform parseOverrideOption(const std::string &TextOpt)
static cl::opt< std::string > MaskTransformOverride("expandvp-override-mask-transform", cl::init(""), cl::Hidden, cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES ". If non-empty, Ignore " "TargetTransformInfo and " "always use this transformation for the %mask parameter (Used in " "testing)."))
static cl::opt< std::string > EVLTransformOverride("expandvp-override-evl-transform", cl::init(""), cl::Hidden, cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES ". If non-empty, ignore " "TargetTransformInfo and " "always use this transformation for the %evl parameter (Used in " "testing)."))
static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp)
Transfer all properties from OldOp to NewOp and replace all uses.
static bool isAllTrueMask(Value *MaskVal)
static void transferDecorations(Value &NewVal, VPIntrinsic &VPI)
Transfer operation properties from OldVPI to NewVal.
static bool anyExpandVPOverridesSet()
static bool maySpeculateLanes(VPIntrinsic &VPI)
static Constant * getSafeDivisor(Type *DivTy)
#define VPINTERNAL_VPLEGAL_CASES
loop Loop Strength Reduction
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This pass exposes codegen information to IR-level passes.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition: APInt.h:187
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:197
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:292
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
static Constant * getNegativeZero(Type *Ty)
Definition: Constants.h:307
static Constant * getQNaN(Type *Ty, bool Negative=false, APInt *Payload=nullptr)
Definition: Constants.cpp:1016
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1399
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
CallInst * CreateMulReduce(Value *Src)
Create a vector int mul reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:438
CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:418
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:922
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition: IRBuilder.h:508
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2114
Value * CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2121
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:442
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Definition: IRBuilder.cpp:1193
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Definition: IRBuilder.cpp:579
CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
Definition: IRBuilder.cpp:1074
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1091
Value * CreateFPToUI(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2087
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2053
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2142
CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:434
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2101
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:171
CallInst * CreateXorReduce(Value *Src)
Create a vector int XOR reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:450
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:446
CallInst * CreateFPMinReduce(Value *Src)
Create a vector float min reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:470
CallInst * CreateFPMaximumReduce(Value *Src)
Create a vector float maximum reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:474
CallInst * CreateFPMaxReduce(Value *Src)
Create a vector float max reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:466
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:483
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2386
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1807
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2041
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1492
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition: IRBuilder.h:1820
CallInst * CreateIntMaxReduce(Value *Src, bool IsSigned=false)
Create a vector integer max reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:454
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Definition: IRBuilder.cpp:599
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1344
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2137
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2027
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1514
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1683
CallInst * CreateIntMinReduce(Value *Src, bool IsSigned=false)
Create a vector integer min reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:460
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2432
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2130
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1536
CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:426
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2371
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1747
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1378
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
Definition: IRBuilder.cpp:662
CallInst * CreateFPMinimumReduce(Value *Src)
Create a vector float minimum reduction intrinsic of the source vector.
Definition: IRBuilder.cpp:478
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
Definition: IRBuilder.cpp:631
Value * CreateFPToSI(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2094
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2686
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool isBinaryOp() const
Definition: Instruction.h:279
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:55
An instruction for reading from memory.
Definition: Instructions.h:174
void setAlignment(Align Align)
Definition: Instructions.h:213
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
void setAlignment(Align Align)
Definition: Instructions.h:333
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:230
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
static IntegerType * getInt32Ty(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:169
static bool isVPCast(Intrinsic::ID ID)
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
std::optional< unsigned > getFunctionalIntrinsicID() const
bool canIgnoreVectorLengthParam() const
void setMaskParam(Value *)
Value * getVectorLengthParam() const
void setVectorLengthParam(Value *)
Value * getMemoryDataParam() const
Value * getMemoryPointerParam() const
std::optional< unsigned > getConstrainedIntrinsicID() const
MaybeAlign getPointerAlignment() const
Value * getMaskParam() const
ElementCount getStaticVectorLength() const
std::optional< unsigned > getFunctionalOpcode() const
This represents vector predication reduction intrinsics.
unsigned getStartParamPos() const
unsigned getVectorParamPos() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
bool isConstrainedFPIntrinsic(ID QID)
Returns true if the intrinsic ID is for one of the "Constrained Floating-Point Intrinsics".
Definition: Function.cpp:1559
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
ElementType
The element type of an SRV or UAV resource.
Definition: DXILABI.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
VPExpansionDetails expandVectorPredicationIntrinsic(VPIntrinsic &VPI, const TargetTransformInfo &TTI)
Expand a vector predication intrinsic.
VPExpansionDetails
Represents the details the expansion of a VP intrinsic.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141