LLVM 20.0.0git
RISCVGatherScatterLowering.cpp
Go to the documentation of this file.
1//===- RISCVGatherScatterLowering.cpp - Gather/Scatter lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass custom lowers llvm.gather and llvm.scatter instructions to
10// RISC-V intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCV.h"
15#include "RISCVTargetMachine.h"
22#include "llvm/IR/IRBuilder.h"
26#include <optional>
27
28using namespace llvm;
29using namespace PatternMatch;
30
31#define DEBUG_TYPE "riscv-gather-scatter-lowering"
32
33namespace {
34
35class RISCVGatherScatterLowering : public FunctionPass {
36 const RISCVSubtarget *ST = nullptr;
37 const RISCVTargetLowering *TLI = nullptr;
38 LoopInfo *LI = nullptr;
39 const DataLayout *DL = nullptr;
40
41 SmallVector<WeakTrackingVH> MaybeDeadPHIs;
42
43 // Cache of the BasePtr and Stride determined from this GEP. When a GEP is
44 // used by multiple gathers/scatters, this allow us to reuse the scalar
45 // instructions we created for the first gather/scatter for the others.
47
48public:
49 static char ID; // Pass identification, replacement for typeid
50
51 RISCVGatherScatterLowering() : FunctionPass(ID) {}
52
53 bool runOnFunction(Function &F) override;
54
55 void getAnalysisUsage(AnalysisUsage &AU) const override {
56 AU.setPreservesCFG();
59 }
60
61 StringRef getPassName() const override {
62 return "RISC-V gather/scatter lowering";
63 }
64
65private:
66 bool tryCreateStridedLoadStore(IntrinsicInst *II);
67
68 std::pair<Value *, Value *> determineBaseAndStride(Instruction *Ptr,
69 IRBuilderBase &Builder);
70
71 bool matchStridedRecurrence(Value *Index, Loop *L, Value *&Stride,
72 PHINode *&BasePtr, BinaryOperator *&Inc,
73 IRBuilderBase &Builder);
74};
75
76} // end anonymous namespace
77
78char RISCVGatherScatterLowering::ID = 0;
79
80INITIALIZE_PASS(RISCVGatherScatterLowering, DEBUG_TYPE,
81 "RISC-V gather/scatter lowering pass", false, false)
82
84 return new RISCVGatherScatterLowering();
85}
86
87// TODO: Should we consider the mask when looking for a stride?
88static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) {
89 if (!isa<FixedVectorType>(StartC->getType()))
90 return std::make_pair(nullptr, nullptr);
91
92 unsigned NumElts = cast<FixedVectorType>(StartC->getType())->getNumElements();
93
94 // Check that the start value is a strided constant.
95 auto *StartVal =
96 dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement((unsigned)0));
97 if (!StartVal)
98 return std::make_pair(nullptr, nullptr);
99 APInt StrideVal(StartVal->getValue().getBitWidth(), 0);
100 ConstantInt *Prev = StartVal;
101 for (unsigned i = 1; i != NumElts; ++i) {
102 auto *C = dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement(i));
103 if (!C)
104 return std::make_pair(nullptr, nullptr);
105
106 APInt LocalStride = C->getValue() - Prev->getValue();
107 if (i == 1)
108 StrideVal = LocalStride;
109 else if (StrideVal != LocalStride)
110 return std::make_pair(nullptr, nullptr);
111
112 Prev = C;
113 }
114
115 Value *Stride = ConstantInt::get(StartVal->getType(), StrideVal);
116
117 return std::make_pair(StartVal, Stride);
118}
119
120static std::pair<Value *, Value *> matchStridedStart(Value *Start,
121 IRBuilderBase &Builder) {
122 // Base case, start is a strided constant.
123 auto *StartC = dyn_cast<Constant>(Start);
124 if (StartC)
125 return matchStridedConstant(StartC);
126
127 // Base case, start is a stepvector
128 if (match(Start, m_Intrinsic<Intrinsic::stepvector>())) {
129 auto *Ty = Start->getType()->getScalarType();
130 return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1));
131 }
132
133 // Not a constant, maybe it's a strided constant with a splat added or
134 // multipled.
135 auto *BO = dyn_cast<BinaryOperator>(Start);
136 if (!BO || (BO->getOpcode() != Instruction::Add &&
137 BO->getOpcode() != Instruction::Or &&
138 BO->getOpcode() != Instruction::Shl &&
139 BO->getOpcode() != Instruction::Mul))
140 return std::make_pair(nullptr, nullptr);
141
142 if (BO->getOpcode() == Instruction::Or &&
143 !cast<PossiblyDisjointInst>(BO)->isDisjoint())
144 return std::make_pair(nullptr, nullptr);
145
146 // Look for an operand that is splatted.
147 unsigned OtherIndex = 0;
148 Value *Splat = getSplatValue(BO->getOperand(1));
149 if (!Splat && Instruction::isCommutative(BO->getOpcode())) {
150 Splat = getSplatValue(BO->getOperand(0));
151 OtherIndex = 1;
152 }
153 if (!Splat)
154 return std::make_pair(nullptr, nullptr);
155
156 Value *Stride;
157 std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex),
158 Builder);
159 if (!Start)
160 return std::make_pair(nullptr, nullptr);
161
162 Builder.SetInsertPoint(BO);
164 // Add the splat value to the start or multiply the start and stride by the
165 // splat.
166 switch (BO->getOpcode()) {
167 default:
168 llvm_unreachable("Unexpected opcode");
169 case Instruction::Or:
170 // TODO: We'd be better off creating disjoint or here, but we don't yet
171 // have an IRBuilder API for that.
172 [[fallthrough]];
173 case Instruction::Add:
174 Start = Builder.CreateAdd(Start, Splat);
175 break;
176 case Instruction::Mul:
177 Start = Builder.CreateMul(Start, Splat);
178 Stride = Builder.CreateMul(Stride, Splat);
179 break;
180 case Instruction::Shl:
181 Start = Builder.CreateShl(Start, Splat);
182 Stride = Builder.CreateShl(Stride, Splat);
183 break;
184 }
185
186 return std::make_pair(Start, Stride);
187}
188
189// Recursively, walk about the use-def chain until we find a Phi with a strided
190// start value. Build and update a scalar recurrence as we unwind the recursion.
191// We also update the Stride as we unwind. Our goal is to move all of the
192// arithmetic out of the loop.
193bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L,
194 Value *&Stride,
195 PHINode *&BasePtr,
196 BinaryOperator *&Inc,
197 IRBuilderBase &Builder) {
198 // Our base case is a Phi.
199 if (auto *Phi = dyn_cast<PHINode>(Index)) {
200 // A phi node we want to perform this function on should be from the
201 // loop header.
202 if (Phi->getParent() != L->getHeader())
203 return false;
204
205 Value *Step, *Start;
206 if (!matchSimpleRecurrence(Phi, Inc, Start, Step) ||
207 Inc->getOpcode() != Instruction::Add)
208 return false;
209 assert(Phi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
210 unsigned IncrementingBlock = Phi->getIncomingValue(0) == Inc ? 0 : 1;
211 assert(Phi->getIncomingValue(IncrementingBlock) == Inc &&
212 "Expected one operand of phi to be Inc");
213
214 // Only proceed if the step is loop invariant.
215 if (!L->isLoopInvariant(Step))
216 return false;
217
218 // Step should be a splat.
219 Step = getSplatValue(Step);
220 if (!Step)
221 return false;
222
223 std::tie(Start, Stride) = matchStridedStart(Start, Builder);
224 if (!Start)
225 return false;
226 assert(Stride != nullptr);
227
228 // Build scalar phi and increment.
229 BasePtr =
230 PHINode::Create(Start->getType(), 2, Phi->getName() + ".scalar", Phi->getIterator());
231 Inc = BinaryOperator::CreateAdd(BasePtr, Step, Inc->getName() + ".scalar",
232 Inc->getIterator());
233 BasePtr->addIncoming(Start, Phi->getIncomingBlock(1 - IncrementingBlock));
234 BasePtr->addIncoming(Inc, Phi->getIncomingBlock(IncrementingBlock));
235
236 // Note that this Phi might be eligible for removal.
237 MaybeDeadPHIs.push_back(Phi);
238 return true;
239 }
240
241 // Otherwise look for binary operator.
242 auto *BO = dyn_cast<BinaryOperator>(Index);
243 if (!BO)
244 return false;
245
246 switch (BO->getOpcode()) {
247 default:
248 return false;
249 case Instruction::Or:
250 // We need to be able to treat Or as Add.
251 if (!cast<PossiblyDisjointInst>(BO)->isDisjoint())
252 return false;
253 break;
254 case Instruction::Add:
255 break;
256 case Instruction::Shl:
257 break;
258 case Instruction::Mul:
259 break;
260 }
261
262 // We should have one operand in the loop and one splat.
263 Value *OtherOp;
264 if (isa<Instruction>(BO->getOperand(0)) &&
265 L->contains(cast<Instruction>(BO->getOperand(0)))) {
266 Index = cast<Instruction>(BO->getOperand(0));
267 OtherOp = BO->getOperand(1);
268 } else if (isa<Instruction>(BO->getOperand(1)) &&
269 L->contains(cast<Instruction>(BO->getOperand(1))) &&
270 Instruction::isCommutative(BO->getOpcode())) {
271 Index = cast<Instruction>(BO->getOperand(1));
272 OtherOp = BO->getOperand(0);
273 } else {
274 return false;
275 }
276
277 // Make sure other op is loop invariant.
278 if (!L->isLoopInvariant(OtherOp))
279 return false;
280
281 // Make sure we have a splat.
282 Value *SplatOp = getSplatValue(OtherOp);
283 if (!SplatOp)
284 return false;
285
286 // Recurse up the use-def chain.
287 if (!matchStridedRecurrence(Index, L, Stride, BasePtr, Inc, Builder))
288 return false;
289
290 // Locate the Step and Start values from the recurrence.
291 unsigned StepIndex = Inc->getOperand(0) == BasePtr ? 1 : 0;
292 unsigned StartBlock = BasePtr->getOperand(0) == Inc ? 1 : 0;
293 Value *Step = Inc->getOperand(StepIndex);
294 Value *Start = BasePtr->getOperand(StartBlock);
295
296 // We need to adjust the start value in the preheader.
297 Builder.SetInsertPoint(
298 BasePtr->getIncomingBlock(StartBlock)->getTerminator());
300
301 switch (BO->getOpcode()) {
302 default:
303 llvm_unreachable("Unexpected opcode!");
304 case Instruction::Add:
305 case Instruction::Or: {
306 // An add only affects the start value. It's ok to do this for Or because
307 // we already checked that there are no common set bits.
308 Start = Builder.CreateAdd(Start, SplatOp, "start");
309 break;
310 }
311 case Instruction::Mul: {
312 Start = Builder.CreateMul(Start, SplatOp, "start");
313 Step = Builder.CreateMul(Step, SplatOp, "step");
314 Stride = Builder.CreateMul(Stride, SplatOp, "stride");
315 break;
316 }
317 case Instruction::Shl: {
318 Start = Builder.CreateShl(Start, SplatOp, "start");
319 Step = Builder.CreateShl(Step, SplatOp, "step");
320 Stride = Builder.CreateShl(Stride, SplatOp, "stride");
321 break;
322 }
323 }
324
325 Inc->setOperand(StepIndex, Step);
326 BasePtr->setIncomingValue(StartBlock, Start);
327 return true;
328}
329
330std::pair<Value *, Value *>
331RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
332 IRBuilderBase &Builder) {
333
334 // A gather/scatter of a splat is a zero strided load/store.
335 if (auto *BasePtr = getSplatValue(Ptr)) {
336 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
337 return std::make_pair(BasePtr, ConstantInt::get(IntPtrTy, 0));
338 }
339
340 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
341 if (!GEP)
342 return std::make_pair(nullptr, nullptr);
343
344 auto I = StridedAddrs.find(GEP);
345 if (I != StridedAddrs.end())
346 return I->second;
347
348 SmallVector<Value *, 2> Ops(GEP->operands());
349
350 // If the base pointer is a vector, check if it's strided.
351 Value *Base = GEP->getPointerOperand();
352 if (auto *BaseInst = dyn_cast<Instruction>(Base);
353 BaseInst && BaseInst->getType()->isVectorTy()) {
354 // If GEP's offset is scalar then we can add it to the base pointer's base.
355 auto IsScalar = [](Value *Idx) { return !Idx->getType()->isVectorTy(); };
356 if (all_of(GEP->indices(), IsScalar)) {
357 auto [BaseBase, Stride] = determineBaseAndStride(BaseInst, Builder);
358 if (BaseBase) {
359 Builder.SetInsertPoint(GEP);
360 SmallVector<Value *> Indices(GEP->indices());
361 Value *OffsetBase =
362 Builder.CreateGEP(GEP->getSourceElementType(), BaseBase, Indices,
363 GEP->getName() + "offset", GEP->isInBounds());
364 return {OffsetBase, Stride};
365 }
366 }
367 }
368
369 // Base pointer needs to be a scalar.
370 Value *ScalarBase = Base;
371 if (ScalarBase->getType()->isVectorTy()) {
372 ScalarBase = getSplatValue(ScalarBase);
373 if (!ScalarBase)
374 return std::make_pair(nullptr, nullptr);
375 }
376
377 std::optional<unsigned> VecOperand;
378 unsigned TypeScale = 0;
379
380 // Look for a vector operand and scale.
382 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
383 if (!Ops[i]->getType()->isVectorTy())
384 continue;
385
386 if (VecOperand)
387 return std::make_pair(nullptr, nullptr);
388
389 VecOperand = i;
390
392 if (TS.isScalable())
393 return std::make_pair(nullptr, nullptr);
394
395 TypeScale = TS.getFixedValue();
396 }
397
398 // We need to find a vector index to simplify.
399 if (!VecOperand)
400 return std::make_pair(nullptr, nullptr);
401
402 // We can't extract the stride if the arithmetic is done at a different size
403 // than the pointer type. Adding the stride later may not wrap correctly.
404 // Technically we could handle wider indices, but I don't expect that in
405 // practice. Handle one special case here - constants. This simplifies
406 // writing test cases.
407 Value *VecIndex = Ops[*VecOperand];
408 Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType());
409 if (VecIndex->getType() != VecIntPtrTy) {
410 auto *VecIndexC = dyn_cast<Constant>(VecIndex);
411 if (!VecIndexC)
412 return std::make_pair(nullptr, nullptr);
413 if (VecIndex->getType()->getScalarSizeInBits() > VecIntPtrTy->getScalarSizeInBits())
414 VecIndex = ConstantFoldCastInstruction(Instruction::Trunc, VecIndexC, VecIntPtrTy);
415 else
416 VecIndex = ConstantFoldCastInstruction(Instruction::SExt, VecIndexC, VecIntPtrTy);
417 }
418
419 // Handle the non-recursive case. This is what we see if the vectorizer
420 // decides to use a scalar IV + vid on demand instead of a vector IV.
421 auto [Start, Stride] = matchStridedStart(VecIndex, Builder);
422 if (Start) {
423 assert(Stride);
424 Builder.SetInsertPoint(GEP);
425
426 // Replace the vector index with the scalar start and build a scalar GEP.
427 Ops[*VecOperand] = Start;
428 Type *SourceTy = GEP->getSourceElementType();
429 Value *BasePtr =
430 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front());
431
432 // Convert stride to pointer size if needed.
433 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
434 assert(Stride->getType() == IntPtrTy && "Unexpected type");
435
436 // Scale the stride by the size of the indexed type.
437 if (TypeScale != 1)
438 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
439
440 auto P = std::make_pair(BasePtr, Stride);
441 StridedAddrs[GEP] = P;
442 return P;
443 }
444
445 // Make sure we're in a loop and that has a pre-header and a single latch.
446 Loop *L = LI->getLoopFor(GEP->getParent());
447 if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
448 return std::make_pair(nullptr, nullptr);
449
450 BinaryOperator *Inc;
451 PHINode *BasePhi;
452 if (!matchStridedRecurrence(VecIndex, L, Stride, BasePhi, Inc, Builder))
453 return std::make_pair(nullptr, nullptr);
454
455 assert(BasePhi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
456 unsigned IncrementingBlock = BasePhi->getOperand(0) == Inc ? 0 : 1;
457 assert(BasePhi->getIncomingValue(IncrementingBlock) == Inc &&
458 "Expected one operand of phi to be Inc");
459
460 Builder.SetInsertPoint(GEP);
461
462 // Replace the vector index with the scalar phi and build a scalar GEP.
463 Ops[*VecOperand] = BasePhi;
464 Type *SourceTy = GEP->getSourceElementType();
465 Value *BasePtr =
466 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front());
467
468 // Final adjustments to stride should go in the start block.
469 Builder.SetInsertPoint(
470 BasePhi->getIncomingBlock(1 - IncrementingBlock)->getTerminator());
471
472 // Convert stride to pointer size if needed.
473 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
474 assert(Stride->getType() == IntPtrTy && "Unexpected type");
475
476 // Scale the stride by the size of the indexed type.
477 if (TypeScale != 1)
478 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
479
480 auto P = std::make_pair(BasePtr, Stride);
481 StridedAddrs[GEP] = P;
482 return P;
483}
484
485bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II) {
486 VectorType *DataType;
487 Value *StoreVal = nullptr, *Ptr, *Mask, *EVL = nullptr;
488 MaybeAlign MA;
489 switch (II->getIntrinsicID()) {
490 case Intrinsic::masked_gather:
491 DataType = cast<VectorType>(II->getType());
492 Ptr = II->getArgOperand(0);
493 MA = cast<ConstantInt>(II->getArgOperand(1))->getMaybeAlignValue();
494 Mask = II->getArgOperand(2);
495 break;
496 case Intrinsic::vp_gather:
497 DataType = cast<VectorType>(II->getType());
498 Ptr = II->getArgOperand(0);
499 MA = II->getParamAlign(0).value_or(
500 DL->getABITypeAlign(DataType->getElementType()));
501 Mask = II->getArgOperand(1);
502 EVL = II->getArgOperand(2);
503 break;
504 case Intrinsic::masked_scatter:
505 DataType = cast<VectorType>(II->getArgOperand(0)->getType());
506 StoreVal = II->getArgOperand(0);
507 Ptr = II->getArgOperand(1);
508 MA = cast<ConstantInt>(II->getArgOperand(2))->getMaybeAlignValue();
509 Mask = II->getArgOperand(3);
510 break;
511 case Intrinsic::vp_scatter:
512 DataType = cast<VectorType>(II->getArgOperand(0)->getType());
513 StoreVal = II->getArgOperand(0);
514 Ptr = II->getArgOperand(1);
515 MA = II->getParamAlign(1).value_or(
516 DL->getABITypeAlign(DataType->getElementType()));
517 Mask = II->getArgOperand(2);
518 EVL = II->getArgOperand(3);
519 break;
520 default:
521 llvm_unreachable("Unexpected intrinsic");
522 }
523
524 // Make sure the operation will be supported by the backend.
525 EVT DataTypeVT = TLI->getValueType(*DL, DataType);
526 if (!MA || !TLI->isLegalStridedLoadStore(DataTypeVT, *MA))
527 return false;
528
529 // FIXME: Let the backend type legalize by splitting/widening?
530 if (!TLI->isTypeLegal(DataTypeVT))
531 return false;
532
533 // Pointer should be an instruction.
534 auto *PtrI = dyn_cast<Instruction>(Ptr);
535 if (!PtrI)
536 return false;
537
538 LLVMContext &Ctx = PtrI->getContext();
539 IRBuilder Builder(Ctx, InstSimplifyFolder(*DL));
540 Builder.SetInsertPoint(PtrI);
541
542 Value *BasePtr, *Stride;
543 std::tie(BasePtr, Stride) = determineBaseAndStride(PtrI, Builder);
544 if (!BasePtr)
545 return false;
546 assert(Stride != nullptr);
547
548 Builder.SetInsertPoint(II);
549
550 if (!EVL)
551 EVL = Builder.CreateElementCount(
552 Builder.getInt32Ty(), cast<VectorType>(DataType)->getElementCount());
553
554 CallInst *Call;
555
556 if (!StoreVal) {
557 Call = Builder.CreateIntrinsic(
558 Intrinsic::experimental_vp_strided_load,
559 {DataType, BasePtr->getType(), Stride->getType()},
560 {BasePtr, Stride, Mask, EVL});
561
562 // Merge llvm.masked.gather's passthru
563 if (II->getIntrinsicID() == Intrinsic::masked_gather)
564 Call = Builder.CreateIntrinsic(Intrinsic::vp_select, {DataType},
565 {Mask, Call, II->getArgOperand(3), EVL});
566 } else
567 Call = Builder.CreateIntrinsic(
568 Intrinsic::experimental_vp_strided_store,
569 {DataType, BasePtr->getType(), Stride->getType()},
570 {StoreVal, BasePtr, Stride, Mask, EVL});
571
572 Call->takeName(II);
573 II->replaceAllUsesWith(Call);
574 II->eraseFromParent();
575
576 if (PtrI->use_empty())
578
579 return true;
580}
581
582bool RISCVGatherScatterLowering::runOnFunction(Function &F) {
583 if (skipFunction(F))
584 return false;
585
586 auto &TPC = getAnalysis<TargetPassConfig>();
587 auto &TM = TPC.getTM<RISCVTargetMachine>();
588 ST = &TM.getSubtarget<RISCVSubtarget>(F);
589 if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors())
590 return false;
591
592 TLI = ST->getTargetLowering();
593 DL = &F.getDataLayout();
594 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
595
596 StridedAddrs.clear();
597
599
600 bool Changed = false;
601
602 for (BasicBlock &BB : F) {
603 for (Instruction &I : BB) {
604 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
605 if (!II)
606 continue;
607 switch (II->getIntrinsicID()) {
608 case Intrinsic::masked_gather:
609 case Intrinsic::masked_scatter:
610 case Intrinsic::vp_gather:
611 case Intrinsic::vp_scatter:
612 Worklist.push_back(II);
613 break;
614 default:
615 break;
616 }
617 }
618 }
619
620 // Rewrite gather/scatter to form strided load/store if possible.
621 for (auto *II : Worklist)
622 Changed |= tryCreateStridedLoadStore(II);
623
624 // Remove any dead phis.
625 while (!MaybeDeadPHIs.empty()) {
626 if (auto *Phi = dyn_cast_or_null<PHINode>(MaybeDeadPHIs.pop_back_val()))
628 }
629
630 return Changed;
631}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Hexagon Common GEP
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static std::pair< Value *, Value * > matchStridedStart(Value *Start, IRBuilderBase &Builder)
static std::pair< Value *, Value * > matchStridedConstant(Constant *StartC)
#define DEBUG_TYPE
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
Definition: APInt.h:78
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
This is an important base class in LLVM.
Definition: Constant.h:42
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:113
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:545
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:239
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1873
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:890
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1458
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1369
Value * CreateElementCount(Type *DstType, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
Definition: IRBuilder.cpp:98
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:199
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1403
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2704
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:593
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
void setOperand(unsigned i, Value *Val)
Definition: User.h:233
Value * getOperand(unsigned i) const
Definition: User.h:228
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
TypeSize getSequentialElementStride(const DataLayout &DL) const
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:546
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
FunctionPass * createRISCVGatherScatterLoweringPass()
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
gep_type_iterator gep_type_begin(const User *GEP)
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:657
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Extended Value Type.
Definition: ValueTypes.h:35
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117