LLVM 22.0.0git
RISCVGatherScatterLowering.cpp
Go to the documentation of this file.
1//===- RISCVGatherScatterLowering.cpp - Gather/Scatter lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass custom lowers llvm.gather and llvm.scatter instructions to
10// RISC-V intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCV.h"
15#include "RISCVTargetMachine.h"
22#include "llvm/IR/IRBuilder.h"
26#include <optional>
27
28using namespace llvm;
29using namespace PatternMatch;
30
31#define DEBUG_TYPE "riscv-gather-scatter-lowering"
32
33namespace {
34
35class RISCVGatherScatterLowering : public FunctionPass {
36 const RISCVSubtarget *ST = nullptr;
37 const RISCVTargetLowering *TLI = nullptr;
38 LoopInfo *LI = nullptr;
39 const DataLayout *DL = nullptr;
40
41 SmallVector<WeakTrackingVH> MaybeDeadPHIs;
42
43 // Cache of the BasePtr and Stride determined from this GEP. When a GEP is
44 // used by multiple gathers/scatters, this allow us to reuse the scalar
45 // instructions we created for the first gather/scatter for the others.
47
48public:
49 static char ID; // Pass identification, replacement for typeid
50
51 RISCVGatherScatterLowering() : FunctionPass(ID) {}
52
53 bool runOnFunction(Function &F) override;
54
55 void getAnalysisUsage(AnalysisUsage &AU) const override {
56 AU.setPreservesCFG();
59 }
60
61 StringRef getPassName() const override {
62 return "RISC-V gather/scatter lowering";
63 }
64
65private:
66 bool tryCreateStridedLoadStore(IntrinsicInst *II);
67
68 std::pair<Value *, Value *> determineBaseAndStride(Instruction *Ptr,
69 IRBuilderBase &Builder);
70
71 bool matchStridedRecurrence(Value *Index, Loop *L, Value *&Stride,
72 PHINode *&BasePtr, BinaryOperator *&Inc,
73 IRBuilderBase &Builder);
74};
75
76} // end anonymous namespace
77
78char RISCVGatherScatterLowering::ID = 0;
79
80INITIALIZE_PASS(RISCVGatherScatterLowering, DEBUG_TYPE,
81 "RISC-V gather/scatter lowering pass", false, false)
82
84 return new RISCVGatherScatterLowering();
85}
86
87// TODO: Should we consider the mask when looking for a stride?
88static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) {
89 if (!isa<FixedVectorType>(StartC->getType()))
90 return std::make_pair(nullptr, nullptr);
91
92 unsigned NumElts = cast<FixedVectorType>(StartC->getType())->getNumElements();
93
94 // Check that the start value is a strided constant.
95 auto *StartVal =
97 if (!StartVal)
98 return std::make_pair(nullptr, nullptr);
99 APInt StrideVal(StartVal->getValue().getBitWidth(), 0);
100 ConstantInt *Prev = StartVal;
101 for (unsigned i = 1; i != NumElts; ++i) {
103 if (!C)
104 return std::make_pair(nullptr, nullptr);
105
106 APInt LocalStride = C->getValue() - Prev->getValue();
107 if (i == 1)
108 StrideVal = LocalStride;
109 else if (StrideVal != LocalStride)
110 return std::make_pair(nullptr, nullptr);
111
112 Prev = C;
113 }
114
115 Value *Stride = ConstantInt::get(StartVal->getType(), StrideVal);
116
117 return std::make_pair(StartVal, Stride);
118}
119
120static std::pair<Value *, Value *> matchStridedStart(Value *Start,
121 IRBuilderBase &Builder) {
122 // Base case, start is a strided constant.
123 auto *StartC = dyn_cast<Constant>(Start);
124 if (StartC)
125 return matchStridedConstant(StartC);
126
127 // Base case, start is a stepvector
129 auto *Ty = Start->getType()->getScalarType();
130 return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1));
131 }
132
133 // Not a constant, maybe it's a strided constant with a splat added or
134 // multiplied.
135 auto *BO = dyn_cast<BinaryOperator>(Start);
136 if (!BO || (BO->getOpcode() != Instruction::Add &&
137 BO->getOpcode() != Instruction::Or &&
138 BO->getOpcode() != Instruction::Shl &&
139 BO->getOpcode() != Instruction::Mul))
140 return std::make_pair(nullptr, nullptr);
141
142 if (BO->getOpcode() == Instruction::Or &&
143 !cast<PossiblyDisjointInst>(BO)->isDisjoint())
144 return std::make_pair(nullptr, nullptr);
145
146 // Look for an operand that is splatted.
147 unsigned OtherIndex = 0;
148 Value *Splat = getSplatValue(BO->getOperand(1));
149 if (!Splat && Instruction::isCommutative(BO->getOpcode())) {
150 Splat = getSplatValue(BO->getOperand(0));
151 OtherIndex = 1;
152 }
153 if (!Splat)
154 return std::make_pair(nullptr, nullptr);
155
156 Value *Stride;
157 std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex),
158 Builder);
159 if (!Start)
160 return std::make_pair(nullptr, nullptr);
161
162 Builder.SetInsertPoint(BO);
163 Builder.SetCurrentDebugLocation(DebugLoc());
164 // Add the splat value to the start or multiply the start and stride by the
165 // splat.
166 switch (BO->getOpcode()) {
167 default:
168 llvm_unreachable("Unexpected opcode");
169 case Instruction::Or:
170 Start = Builder.CreateOr(Start, Splat, "", /*IsDisjoint=*/true);
171 break;
172 case Instruction::Add:
173 Start = Builder.CreateAdd(Start, Splat);
174 break;
175 case Instruction::Mul:
176 Start = Builder.CreateMul(Start, Splat);
177 Stride = Builder.CreateMul(Stride, Splat);
178 break;
179 case Instruction::Shl:
180 Start = Builder.CreateShl(Start, Splat);
181 Stride = Builder.CreateShl(Stride, Splat);
182 break;
183 }
184
185 return std::make_pair(Start, Stride);
186}
187
188// Recursively, walk about the use-def chain until we find a Phi with a strided
189// start value. Build and update a scalar recurrence as we unwind the recursion.
190// We also update the Stride as we unwind. Our goal is to move all of the
191// arithmetic out of the loop.
192bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L,
193 Value *&Stride,
194 PHINode *&BasePtr,
195 BinaryOperator *&Inc,
196 IRBuilderBase &Builder) {
197 // Our base case is a Phi.
198 if (auto *Phi = dyn_cast<PHINode>(Index)) {
199 // A phi node we want to perform this function on should be from the
200 // loop header.
201 if (Phi->getParent() != L->getHeader())
202 return false;
203
204 Value *Step, *Start;
205 if (!matchSimpleRecurrence(Phi, Inc, Start, Step) ||
206 Inc->getOpcode() != Instruction::Add)
207 return false;
208 assert(Phi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
209 unsigned IncrementingBlock = Phi->getIncomingValue(0) == Inc ? 0 : 1;
210 assert(Phi->getIncomingValue(IncrementingBlock) == Inc &&
211 "Expected one operand of phi to be Inc");
212
213 // Step should be a splat.
214 Step = getSplatValue(Step);
215 if (!Step)
216 return false;
217
218 std::tie(Start, Stride) = matchStridedStart(Start, Builder);
219 if (!Start)
220 return false;
221 assert(Stride != nullptr);
222
223 // Build scalar phi and increment.
224 BasePtr =
225 PHINode::Create(Start->getType(), 2, Phi->getName() + ".scalar", Phi->getIterator());
226 Inc = BinaryOperator::CreateAdd(BasePtr, Step, Inc->getName() + ".scalar",
227 Inc->getIterator());
228 BasePtr->addIncoming(Start, Phi->getIncomingBlock(1 - IncrementingBlock));
229 BasePtr->addIncoming(Inc, Phi->getIncomingBlock(IncrementingBlock));
230
231 // Note that this Phi might be eligible for removal.
232 MaybeDeadPHIs.push_back(Phi);
233 return true;
234 }
235
236 // Otherwise look for binary operator.
237 auto *BO = dyn_cast<BinaryOperator>(Index);
238 if (!BO)
239 return false;
240
241 switch (BO->getOpcode()) {
242 default:
243 return false;
244 case Instruction::Or:
245 // We need to be able to treat Or as Add.
246 if (!cast<PossiblyDisjointInst>(BO)->isDisjoint())
247 return false;
248 break;
249 case Instruction::Add:
250 break;
251 case Instruction::Shl:
252 break;
253 case Instruction::Mul:
254 break;
255 }
256
257 // We should have one operand in the loop and one splat.
258 Value *OtherOp;
259 if (isa<Instruction>(BO->getOperand(0)) &&
260 L->contains(cast<Instruction>(BO->getOperand(0)))) {
261 Index = cast<Instruction>(BO->getOperand(0));
262 OtherOp = BO->getOperand(1);
263 } else if (isa<Instruction>(BO->getOperand(1)) &&
264 L->contains(cast<Instruction>(BO->getOperand(1))) &&
265 Instruction::isCommutative(BO->getOpcode())) {
266 Index = cast<Instruction>(BO->getOperand(1));
267 OtherOp = BO->getOperand(0);
268 } else {
269 return false;
270 }
271
272 // Make sure other op is loop invariant.
273 if (!L->isLoopInvariant(OtherOp))
274 return false;
275
276 // Make sure we have a splat.
277 Value *SplatOp = getSplatValue(OtherOp);
278 if (!SplatOp)
279 return false;
280
281 // Recurse up the use-def chain.
282 if (!matchStridedRecurrence(Index, L, Stride, BasePtr, Inc, Builder))
283 return false;
284
285 // Locate the Step and Start values from the recurrence.
286 unsigned StepIndex = Inc->getOperand(0) == BasePtr ? 1 : 0;
287 unsigned StartBlock = BasePtr->getOperand(0) == Inc ? 1 : 0;
288 Value *Step = Inc->getOperand(StepIndex);
289 Value *Start = BasePtr->getOperand(StartBlock);
290
291 // We need to adjust the start value in the preheader.
292 Builder.SetInsertPoint(
293 BasePtr->getIncomingBlock(StartBlock)->getTerminator());
295
296 // TODO: Share this switch with matchStridedStart?
297 switch (BO->getOpcode()) {
298 default:
299 llvm_unreachable("Unexpected opcode!");
300 case Instruction::Add:
301 case Instruction::Or: {
302 // An add only affects the start value. It's ok to do this for Or because
303 // we already checked that there are no common set bits.
304 Start = Builder.CreateAdd(Start, SplatOp, "start");
305 break;
306 }
307 case Instruction::Mul: {
308 Start = Builder.CreateMul(Start, SplatOp, "start");
309 Stride = Builder.CreateMul(Stride, SplatOp, "stride");
310 break;
311 }
312 case Instruction::Shl: {
313 Start = Builder.CreateShl(Start, SplatOp, "start");
314 Stride = Builder.CreateShl(Stride, SplatOp, "stride");
315 break;
316 }
317 }
318
319 // If the Step was defined inside the loop, adjust it before its definition
320 // instead of in the preheader.
321 if (auto *StepI = dyn_cast<Instruction>(Step); StepI && L->contains(StepI))
322 Builder.SetInsertPoint(*StepI->getInsertionPointAfterDef());
323
324 switch (BO->getOpcode()) {
325 default:
326 break;
327 case Instruction::Mul:
328 Step = Builder.CreateMul(Step, SplatOp, "step");
329 break;
330 case Instruction::Shl:
331 Step = Builder.CreateShl(Step, SplatOp, "step");
332 break;
333 }
334
335 Inc->setOperand(StepIndex, Step);
336 BasePtr->setIncomingValue(StartBlock, Start);
337 return true;
338}
339
340std::pair<Value *, Value *>
341RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
342 IRBuilderBase &Builder) {
343
344 // A gather/scatter of a splat is a zero strided load/store.
345 if (auto *BasePtr = getSplatValue(Ptr)) {
346 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
347 return std::make_pair(BasePtr, ConstantInt::get(IntPtrTy, 0));
348 }
349
351 if (!GEP)
352 return std::make_pair(nullptr, nullptr);
353
354 auto I = StridedAddrs.find(GEP);
355 if (I != StridedAddrs.end())
356 return I->second;
357
358 SmallVector<Value *, 2> Ops(GEP->operands());
359
360 // If the base pointer is a vector, check if it's strided.
361 Value *Base = GEP->getPointerOperand();
362 if (auto *BaseInst = dyn_cast<Instruction>(Base);
363 BaseInst && BaseInst->getType()->isVectorTy()) {
364 // If GEP's offset is scalar then we can add it to the base pointer's base.
365 auto IsScalar = [](Value *Idx) { return !Idx->getType()->isVectorTy(); };
366 if (all_of(GEP->indices(), IsScalar)) {
367 auto [BaseBase, Stride] = determineBaseAndStride(BaseInst, Builder);
368 if (BaseBase) {
369 Builder.SetInsertPoint(GEP);
370 SmallVector<Value *> Indices(GEP->indices());
371 Value *OffsetBase =
372 Builder.CreateGEP(GEP->getSourceElementType(), BaseBase, Indices,
373 GEP->getName() + "offset", GEP->isInBounds());
374 return {OffsetBase, Stride};
375 }
376 }
377 }
378
379 // Base pointer needs to be a scalar.
380 Value *ScalarBase = Base;
381 if (ScalarBase->getType()->isVectorTy()) {
382 ScalarBase = getSplatValue(ScalarBase);
383 if (!ScalarBase)
384 return std::make_pair(nullptr, nullptr);
385 }
386
387 std::optional<unsigned> VecOperand;
388 unsigned TypeScale = 0;
389
390 // Look for a vector operand and scale.
392 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
393 if (!Ops[i]->getType()->isVectorTy())
394 continue;
395
396 if (VecOperand)
397 return std::make_pair(nullptr, nullptr);
398
399 VecOperand = i;
400
401 TypeSize TS = GTI.getSequentialElementStride(*DL);
402 if (TS.isScalable())
403 return std::make_pair(nullptr, nullptr);
404
405 TypeScale = TS.getFixedValue();
406 }
407
408 // We need to find a vector index to simplify.
409 if (!VecOperand)
410 return std::make_pair(nullptr, nullptr);
411
412 // We can't extract the stride if the arithmetic is done at a different size
413 // than the pointer type. Adding the stride later may not wrap correctly.
414 // Technically we could handle wider indices, but I don't expect that in
415 // practice. Handle one special case here - constants. This simplifies
416 // writing test cases.
417 Value *VecIndex = Ops[*VecOperand];
418 Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType());
419 if (VecIndex->getType() != VecIntPtrTy) {
420 auto *VecIndexC = dyn_cast<Constant>(VecIndex);
421 if (!VecIndexC)
422 return std::make_pair(nullptr, nullptr);
423 if (VecIndex->getType()->getScalarSizeInBits() > VecIntPtrTy->getScalarSizeInBits())
424 VecIndex = ConstantFoldCastInstruction(Instruction::Trunc, VecIndexC, VecIntPtrTy);
425 else
426 VecIndex = ConstantFoldCastInstruction(Instruction::SExt, VecIndexC, VecIntPtrTy);
427 }
428
429 // Handle the non-recursive case. This is what we see if the vectorizer
430 // decides to use a scalar IV + vid on demand instead of a vector IV.
431 auto [Start, Stride] = matchStridedStart(VecIndex, Builder);
432 if (Start) {
433 assert(Stride);
434 Builder.SetInsertPoint(GEP);
435
436 // Replace the vector index with the scalar start and build a scalar GEP.
437 Ops[*VecOperand] = Start;
438 Type *SourceTy = GEP->getSourceElementType();
439 Value *BasePtr =
440 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front());
441
442 // Convert stride to pointer size if needed.
443 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
444 assert(Stride->getType() == IntPtrTy && "Unexpected type");
445
446 // Scale the stride by the size of the indexed type.
447 if (TypeScale != 1)
448 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
449
450 auto P = std::make_pair(BasePtr, Stride);
451 StridedAddrs[GEP] = P;
452 return P;
453 }
454
455 // Make sure we're in a loop and that has a pre-header and a single latch.
456 Loop *L = LI->getLoopFor(GEP->getParent());
457 if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
458 return std::make_pair(nullptr, nullptr);
459
460 BinaryOperator *Inc;
461 PHINode *BasePhi;
462 if (!matchStridedRecurrence(VecIndex, L, Stride, BasePhi, Inc, Builder))
463 return std::make_pair(nullptr, nullptr);
464
465 assert(BasePhi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
466 unsigned IncrementingBlock = BasePhi->getOperand(0) == Inc ? 0 : 1;
467 assert(BasePhi->getIncomingValue(IncrementingBlock) == Inc &&
468 "Expected one operand of phi to be Inc");
469
470 Builder.SetInsertPoint(GEP);
471
472 // Replace the vector index with the scalar phi and build a scalar GEP.
473 Ops[*VecOperand] = BasePhi;
474 Type *SourceTy = GEP->getSourceElementType();
475 Value *BasePtr =
476 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front());
477
478 // Final adjustments to stride should go in the start block.
479 Builder.SetInsertPoint(
480 BasePhi->getIncomingBlock(1 - IncrementingBlock)->getTerminator());
481
482 // Convert stride to pointer size if needed.
483 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
484 assert(Stride->getType() == IntPtrTy && "Unexpected type");
485
486 // Scale the stride by the size of the indexed type.
487 if (TypeScale != 1)
488 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
489
490 auto P = std::make_pair(BasePtr, Stride);
491 StridedAddrs[GEP] = P;
492 return P;
493}
494
495bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II) {
496 VectorType *DataType;
497 Value *StoreVal = nullptr, *Ptr, *Mask, *EVL = nullptr;
498 MaybeAlign MA;
499 switch (II->getIntrinsicID()) {
500 case Intrinsic::masked_gather:
501 DataType = cast<VectorType>(II->getType());
502 Ptr = II->getArgOperand(0);
503 MA = cast<ConstantInt>(II->getArgOperand(1))->getMaybeAlignValue();
504 Mask = II->getArgOperand(2);
505 break;
506 case Intrinsic::vp_gather:
507 DataType = cast<VectorType>(II->getType());
508 Ptr = II->getArgOperand(0);
509 MA = II->getParamAlign(0).value_or(
510 DL->getABITypeAlign(DataType->getElementType()));
511 Mask = II->getArgOperand(1);
512 EVL = II->getArgOperand(2);
513 break;
514 case Intrinsic::masked_scatter:
515 DataType = cast<VectorType>(II->getArgOperand(0)->getType());
516 StoreVal = II->getArgOperand(0);
517 Ptr = II->getArgOperand(1);
518 MA = cast<ConstantInt>(II->getArgOperand(2))->getMaybeAlignValue();
519 Mask = II->getArgOperand(3);
520 break;
521 case Intrinsic::vp_scatter:
522 DataType = cast<VectorType>(II->getArgOperand(0)->getType());
523 StoreVal = II->getArgOperand(0);
524 Ptr = II->getArgOperand(1);
525 MA = II->getParamAlign(1).value_or(
526 DL->getABITypeAlign(DataType->getElementType()));
527 Mask = II->getArgOperand(2);
528 EVL = II->getArgOperand(3);
529 break;
530 default:
531 llvm_unreachable("Unexpected intrinsic");
532 }
533
534 // Make sure the operation will be supported by the backend.
535 EVT DataTypeVT = TLI->getValueType(*DL, DataType);
536 if (!MA || !TLI->isLegalStridedLoadStore(DataTypeVT, *MA))
537 return false;
538
539 // FIXME: Let the backend type legalize by splitting/widening?
540 if (!TLI->isTypeLegal(DataTypeVT))
541 return false;
542
543 // Pointer should be an instruction.
544 auto *PtrI = dyn_cast<Instruction>(Ptr);
545 if (!PtrI)
546 return false;
547
548 LLVMContext &Ctx = PtrI->getContext();
549 IRBuilder Builder(Ctx, InstSimplifyFolder(*DL));
550 Builder.SetInsertPoint(PtrI);
551
552 Value *BasePtr, *Stride;
553 std::tie(BasePtr, Stride) = determineBaseAndStride(PtrI, Builder);
554 if (!BasePtr)
555 return false;
556 assert(Stride != nullptr);
557
558 Builder.SetInsertPoint(II);
559
560 if (!EVL)
561 EVL = Builder.CreateElementCount(
562 Builder.getInt32Ty(), cast<VectorType>(DataType)->getElementCount());
563
564 Value *Call;
565
566 if (!StoreVal) {
567 Call = Builder.CreateIntrinsic(
568 Intrinsic::experimental_vp_strided_load,
569 {DataType, BasePtr->getType(), Stride->getType()},
570 {BasePtr, Stride, Mask, EVL});
571
572 // Merge llvm.masked.gather's passthru
573 if (II->getIntrinsicID() == Intrinsic::masked_gather)
574 Call = Builder.CreateSelect(Mask, Call, II->getArgOperand(3));
575 } else
576 Call = Builder.CreateIntrinsic(
577 Intrinsic::experimental_vp_strided_store,
578 {DataType, BasePtr->getType(), Stride->getType()},
579 {StoreVal, BasePtr, Stride, Mask, EVL});
580
581 Call->takeName(II);
582 II->replaceAllUsesWith(Call);
583 II->eraseFromParent();
584
585 if (PtrI->use_empty())
587
588 return true;
589}
590
591bool RISCVGatherScatterLowering::runOnFunction(Function &F) {
592 if (skipFunction(F))
593 return false;
594
595 auto &TPC = getAnalysis<TargetPassConfig>();
596 auto &TM = TPC.getTM<RISCVTargetMachine>();
597 ST = &TM.getSubtarget<RISCVSubtarget>(F);
598 if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors())
599 return false;
600
601 TLI = ST->getTargetLowering();
602 DL = &F.getDataLayout();
603 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
604
605 StridedAddrs.clear();
606
608
609 bool Changed = false;
610
611 for (BasicBlock &BB : F) {
612 for (Instruction &I : BB) {
613 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
614 if (!II)
615 continue;
616 switch (II->getIntrinsicID()) {
617 case Intrinsic::masked_gather:
618 case Intrinsic::masked_scatter:
619 case Intrinsic::vp_gather:
620 case Intrinsic::vp_scatter:
621 Worklist.push_back(II);
622 break;
623 default:
624 break;
625 }
626 }
627 }
628
629 // Rewrite gather/scatter to form strided load/store if possible.
630 for (auto *II : Worklist)
631 Changed |= tryCreateStridedLoadStore(II);
632
633 // Remove any dead phis.
634 while (!MaybeDeadPHIs.empty()) {
635 if (auto *Phi = dyn_cast_or_null<PHINode>(MaybeDeadPHIs.pop_back_val()))
637 }
638
639 return Changed;
640}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
Hexagon Common GEP
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
uint64_t IntrinsicInst * II
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static std::pair< Value *, Value * > matchStridedStart(Value *Start, IRBuilderBase &Builder)
static std::pair< Value *, Value * > matchStridedConstant(Constant *StartC)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
Definition APInt.h:78
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
This is the shared class of boolean and integer constants.
Definition Constants.h:87
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1923
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1492
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
LLVM_ABI Value * CreateElementCount(Type *Ty, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
A wrapper class for inspecting calls to intrinsic functions.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Definition LoopInfo.h:596
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const
Return true if a stride load store of the given result type and alignment is legal.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Target-Independent Code Generator Pass Configuration Options.
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
TypeSize getSequentialElementStride(const DataLayout &DL) const
self_iterator getIterator()
Definition ilist_node.h:134
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1707
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
FunctionPass * createRISCVGatherScatterLoweringPass()
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
generic_gep_type_iterator<> gep_type_iterator
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition Local.cpp:641
LLVM_ABI Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)