LLVM 17.0.0git
RISCVGatherScatterLowering.cpp
Go to the documentation of this file.
1//===- RISCVGatherScatterLowering.cpp - Gather/Scatter lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass custom lowers llvm.gather and llvm.scatter instructions to
10// RISC-V intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCV.h"
15#include "RISCVTargetMachine.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/IntrinsicsRISCV.h"
26#include <optional>
27
28using namespace llvm;
29using namespace PatternMatch;
30
31#define DEBUG_TYPE "riscv-gather-scatter-lowering"
32
33namespace {
34
35class RISCVGatherScatterLowering : public FunctionPass {
36 const RISCVSubtarget *ST = nullptr;
37 const RISCVTargetLowering *TLI = nullptr;
38 LoopInfo *LI = nullptr;
39 const DataLayout *DL = nullptr;
40
41 SmallVector<WeakTrackingVH> MaybeDeadPHIs;
42
43 // Cache of the BasePtr and Stride determined from this GEP. When a GEP is
44 // used by multiple gathers/scatters, this allow us to reuse the scalar
45 // instructions we created for the first gather/scatter for the others.
47
48public:
49 static char ID; // Pass identification, replacement for typeid
50
51 RISCVGatherScatterLowering() : FunctionPass(ID) {}
52
53 bool runOnFunction(Function &F) override;
54
55 void getAnalysisUsage(AnalysisUsage &AU) const override {
56 AU.setPreservesCFG();
59 }
60
61 StringRef getPassName() const override {
62 return "RISC-V gather/scatter lowering";
63 }
64
65private:
66 bool isLegalTypeAndAlignment(Type *DataType, Value *AlignOp);
67
68 bool tryCreateStridedLoadStore(IntrinsicInst *II, Type *DataType, Value *Ptr,
69 Value *AlignOp);
70
71 std::pair<Value *, Value *> determineBaseAndStride(GetElementPtrInst *GEP,
73
74 bool matchStridedRecurrence(Value *Index, Loop *L, Value *&Stride,
75 PHINode *&BasePtr, BinaryOperator *&Inc,
77};
78
79} // end anonymous namespace
80
81char RISCVGatherScatterLowering::ID = 0;
82
83INITIALIZE_PASS(RISCVGatherScatterLowering, DEBUG_TYPE,
84 "RISC-V gather/scatter lowering pass", false, false)
85
87 return new RISCVGatherScatterLowering();
88}
89
90bool RISCVGatherScatterLowering::isLegalTypeAndAlignment(Type *DataType,
91 Value *AlignOp) {
92 Type *ScalarType = DataType->getScalarType();
93 if (!TLI->isLegalElementTypeForRVV(ScalarType))
94 return false;
95
96 MaybeAlign MA = cast<ConstantInt>(AlignOp)->getMaybeAlignValue();
97 if (MA && MA->value() < DL->getTypeStoreSize(ScalarType).getFixedValue())
98 return false;
99
100 // FIXME: Let the backend type legalize by splitting/widening?
101 EVT DataVT = TLI->getValueType(*DL, DataType);
102 if (!TLI->isTypeLegal(DataVT))
103 return false;
104
105 return true;
106}
107
108// TODO: Should we consider the mask when looking for a stride?
109static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) {
110 unsigned NumElts = cast<FixedVectorType>(StartC->getType())->getNumElements();
111
112 // Check that the start value is a strided constant.
113 auto *StartVal =
114 dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement((unsigned)0));
115 if (!StartVal)
116 return std::make_pair(nullptr, nullptr);
117 APInt StrideVal(StartVal->getValue().getBitWidth(), 0);
118 ConstantInt *Prev = StartVal;
119 for (unsigned i = 1; i != NumElts; ++i) {
120 auto *C = dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement(i));
121 if (!C)
122 return std::make_pair(nullptr, nullptr);
123
124 APInt LocalStride = C->getValue() - Prev->getValue();
125 if (i == 1)
126 StrideVal = LocalStride;
127 else if (StrideVal != LocalStride)
128 return std::make_pair(nullptr, nullptr);
129
130 Prev = C;
131 }
132
133 Value *Stride = ConstantInt::get(StartVal->getType(), StrideVal);
134
135 return std::make_pair(StartVal, Stride);
136}
137
138static std::pair<Value *, Value *> matchStridedStart(Value *Start,
139 IRBuilder<> &Builder) {
140 // Base case, start is a strided constant.
141 auto *StartC = dyn_cast<Constant>(Start);
142 if (StartC)
143 return matchStridedConstant(StartC);
144
145 // Base case, start is a stepvector
146 if (match(Start, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
147 auto *Ty = Start->getType()->getScalarType();
148 return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1));
149 }
150
151 // Not a constant, maybe it's a strided constant with a splat added or
152 // multipled.
153 auto *BO = dyn_cast<BinaryOperator>(Start);
154 if (!BO || (BO->getOpcode() != Instruction::Add &&
155 BO->getOpcode() != Instruction::Mul))
156 return std::make_pair(nullptr, nullptr);
157
158 // Look for an operand that is splatted.
159 unsigned OtherIndex = 1;
160 Value *Splat = getSplatValue(BO->getOperand(0));
161 if (!Splat) {
162 Splat = getSplatValue(BO->getOperand(1));
163 OtherIndex = 0;
164 }
165 if (!Splat)
166 return std::make_pair(nullptr, nullptr);
167
168 Value *Stride;
169 std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex),
170 Builder);
171 if (!Start)
172 return std::make_pair(nullptr, nullptr);
173
174 Builder.SetInsertPoint(BO);
175 Builder.SetCurrentDebugLocation(DebugLoc());
176 // Add the splat value to the start or multiply the start and stride by the
177 // splat.
178 if (BO->getOpcode() == Instruction::Add) {
179 Start = Builder.CreateAdd(Start, Splat);
180 } else {
181 assert(BO->getOpcode() == Instruction::Mul && "Unexpected opcode");
182 Start = Builder.CreateMul(Start, Splat);
183 Stride = Builder.CreateMul(Stride, Splat);
184 }
185 return std::make_pair(Start, Stride);
186}
187
188// Recursively, walk about the use-def chain until we find a Phi with a strided
189// start value. Build and update a scalar recurrence as we unwind the recursion.
190// We also update the Stride as we unwind. Our goal is to move all of the
191// arithmetic out of the loop.
192bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L,
193 Value *&Stride,
194 PHINode *&BasePtr,
195 BinaryOperator *&Inc,
196 IRBuilder<> &Builder) {
197 // Our base case is a Phi.
198 if (auto *Phi = dyn_cast<PHINode>(Index)) {
199 // A phi node we want to perform this function on should be from the
200 // loop header.
201 if (Phi->getParent() != L->getHeader())
202 return false;
203
204 Value *Step, *Start;
205 if (!matchSimpleRecurrence(Phi, Inc, Start, Step) ||
206 Inc->getOpcode() != Instruction::Add)
207 return false;
208 assert(Phi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
209 unsigned IncrementingBlock = Phi->getIncomingValue(0) == Inc ? 0 : 1;
210 assert(Phi->getIncomingValue(IncrementingBlock) == Inc &&
211 "Expected one operand of phi to be Inc");
212
213 // Only proceed if the step is loop invariant.
214 if (!L->isLoopInvariant(Step))
215 return false;
216
217 // Step should be a splat.
218 Step = getSplatValue(Step);
219 if (!Step)
220 return false;
221
222 std::tie(Start, Stride) = matchStridedStart(Start, Builder);
223 if (!Start)
224 return false;
225 assert(Stride != nullptr);
226
227 // Build scalar phi and increment.
228 BasePtr =
229 PHINode::Create(Start->getType(), 2, Phi->getName() + ".scalar", Phi);
230 Inc = BinaryOperator::CreateAdd(BasePtr, Step, Inc->getName() + ".scalar",
231 Inc);
232 BasePtr->addIncoming(Start, Phi->getIncomingBlock(1 - IncrementingBlock));
233 BasePtr->addIncoming(Inc, Phi->getIncomingBlock(IncrementingBlock));
234
235 // Note that this Phi might be eligible for removal.
236 MaybeDeadPHIs.push_back(Phi);
237 return true;
238 }
239
240 // Otherwise look for binary operator.
241 auto *BO = dyn_cast<BinaryOperator>(Index);
242 if (!BO)
243 return false;
244
245 if (BO->getOpcode() != Instruction::Add &&
246 BO->getOpcode() != Instruction::Or &&
247 BO->getOpcode() != Instruction::Mul &&
248 BO->getOpcode() != Instruction::Shl)
249 return false;
250
251 // Only support shift by constant.
252 if (BO->getOpcode() == Instruction::Shl && !isa<Constant>(BO->getOperand(1)))
253 return false;
254
255 // We need to be able to treat Or as Add.
256 if (BO->getOpcode() == Instruction::Or &&
257 !haveNoCommonBitsSet(BO->getOperand(0), BO->getOperand(1), *DL))
258 return false;
259
260 // We should have one operand in the loop and one splat.
261 Value *OtherOp;
262 if (isa<Instruction>(BO->getOperand(0)) &&
263 L->contains(cast<Instruction>(BO->getOperand(0)))) {
264 Index = cast<Instruction>(BO->getOperand(0));
265 OtherOp = BO->getOperand(1);
266 } else if (isa<Instruction>(BO->getOperand(1)) &&
267 L->contains(cast<Instruction>(BO->getOperand(1)))) {
268 Index = cast<Instruction>(BO->getOperand(1));
269 OtherOp = BO->getOperand(0);
270 } else {
271 return false;
272 }
273
274 // Make sure other op is loop invariant.
275 if (!L->isLoopInvariant(OtherOp))
276 return false;
277
278 // Make sure we have a splat.
279 Value *SplatOp = getSplatValue(OtherOp);
280 if (!SplatOp)
281 return false;
282
283 // Recurse up the use-def chain.
284 if (!matchStridedRecurrence(Index, L, Stride, BasePtr, Inc, Builder))
285 return false;
286
287 // Locate the Step and Start values from the recurrence.
288 unsigned StepIndex = Inc->getOperand(0) == BasePtr ? 1 : 0;
289 unsigned StartBlock = BasePtr->getOperand(0) == Inc ? 1 : 0;
290 Value *Step = Inc->getOperand(StepIndex);
291 Value *Start = BasePtr->getOperand(StartBlock);
292
293 // We need to adjust the start value in the preheader.
294 Builder.SetInsertPoint(
295 BasePtr->getIncomingBlock(StartBlock)->getTerminator());
296 Builder.SetCurrentDebugLocation(DebugLoc());
297
298 switch (BO->getOpcode()) {
299 default:
300 llvm_unreachable("Unexpected opcode!");
301 case Instruction::Add:
302 case Instruction::Or: {
303 // An add only affects the start value. It's ok to do this for Or because
304 // we already checked that there are no common set bits.
305
306 // If the start value is Zero, just take the SplatOp.
307 if (isa<ConstantInt>(Start) && cast<ConstantInt>(Start)->isZero())
308 Start = SplatOp;
309 else
310 Start = Builder.CreateAdd(Start, SplatOp, "start");
311 BasePtr->setIncomingValue(StartBlock, Start);
312 break;
313 }
314 case Instruction::Mul: {
315 // If the start is zero we don't need to multiply.
316 if (!isa<ConstantInt>(Start) || !cast<ConstantInt>(Start)->isZero())
317 Start = Builder.CreateMul(Start, SplatOp, "start");
318
319 Step = Builder.CreateMul(Step, SplatOp, "step");
320
321 // If the Stride is 1 just take the SplatOpt.
322 if (isa<ConstantInt>(Stride) && cast<ConstantInt>(Stride)->isOne())
323 Stride = SplatOp;
324 else
325 Stride = Builder.CreateMul(Stride, SplatOp, "stride");
326 Inc->setOperand(StepIndex, Step);
327 BasePtr->setIncomingValue(StartBlock, Start);
328 break;
329 }
330 case Instruction::Shl: {
331 // If the start is zero we don't need to shift.
332 if (!isa<ConstantInt>(Start) || !cast<ConstantInt>(Start)->isZero())
333 Start = Builder.CreateShl(Start, SplatOp, "start");
334 Step = Builder.CreateShl(Step, SplatOp, "step");
335 Stride = Builder.CreateShl(Stride, SplatOp, "stride");
336 Inc->setOperand(StepIndex, Step);
337 BasePtr->setIncomingValue(StartBlock, Start);
338 break;
339 }
340 }
341
342 return true;
343}
344
345std::pair<Value *, Value *>
346RISCVGatherScatterLowering::determineBaseAndStride(GetElementPtrInst *GEP,
347 IRBuilder<> &Builder) {
348
349 auto I = StridedAddrs.find(GEP);
350 if (I != StridedAddrs.end())
351 return I->second;
352
353 SmallVector<Value *, 2> Ops(GEP->operands());
354
355 // Base pointer needs to be a scalar.
356 if (Ops[0]->getType()->isVectorTy())
357 return std::make_pair(nullptr, nullptr);
358
359 std::optional<unsigned> VecOperand;
360 unsigned TypeScale = 0;
361
362 // Look for a vector operand and scale.
364 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
365 if (!Ops[i]->getType()->isVectorTy())
366 continue;
367
368 if (VecOperand)
369 return std::make_pair(nullptr, nullptr);
370
371 VecOperand = i;
372
373 TypeSize TS = DL->getTypeAllocSize(GTI.getIndexedType());
374 if (TS.isScalable())
375 return std::make_pair(nullptr, nullptr);
376
377 TypeScale = TS.getFixedValue();
378 }
379
380 // We need to find a vector index to simplify.
381 if (!VecOperand)
382 return std::make_pair(nullptr, nullptr);
383
384 // We can't extract the stride if the arithmetic is done at a different size
385 // than the pointer type. Adding the stride later may not wrap correctly.
386 // Technically we could handle wider indices, but I don't expect that in
387 // practice.
388 Value *VecIndex = Ops[*VecOperand];
389 Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType());
390 if (VecIndex->getType() != VecIntPtrTy)
391 return std::make_pair(nullptr, nullptr);
392
393 // Handle the non-recursive case. This is what we see if the vectorizer
394 // decides to use a scalar IV + vid on demand instead of a vector IV.
395 auto [Start, Stride] = matchStridedStart(VecIndex, Builder);
396 if (Start) {
397 assert(Stride);
398 Builder.SetInsertPoint(GEP);
399
400 // Replace the vector index with the scalar start and build a scalar GEP.
401 Ops[*VecOperand] = Start;
402 Type *SourceTy = GEP->getSourceElementType();
403 Value *BasePtr =
404 Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
405
406 // Convert stride to pointer size if needed.
407 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
408 assert(Stride->getType() == IntPtrTy && "Unexpected type");
409
410 // Scale the stride by the size of the indexed type.
411 if (TypeScale != 1)
412 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
413
414 auto P = std::make_pair(BasePtr, Stride);
415 StridedAddrs[GEP] = P;
416 return P;
417 }
418
419 // Make sure we're in a loop and that has a pre-header and a single latch.
420 Loop *L = LI->getLoopFor(GEP->getParent());
421 if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
422 return std::make_pair(nullptr, nullptr);
423
424 BinaryOperator *Inc;
425 PHINode *BasePhi;
426 if (!matchStridedRecurrence(VecIndex, L, Stride, BasePhi, Inc, Builder))
427 return std::make_pair(nullptr, nullptr);
428
429 assert(BasePhi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
430 unsigned IncrementingBlock = BasePhi->getOperand(0) == Inc ? 0 : 1;
431 assert(BasePhi->getIncomingValue(IncrementingBlock) == Inc &&
432 "Expected one operand of phi to be Inc");
433
434 Builder.SetInsertPoint(GEP);
435
436 // Replace the vector index with the scalar phi and build a scalar GEP.
437 Ops[*VecOperand] = BasePhi;
438 Type *SourceTy = GEP->getSourceElementType();
439 Value *BasePtr =
440 Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
441
442 // Final adjustments to stride should go in the start block.
443 Builder.SetInsertPoint(
444 BasePhi->getIncomingBlock(1 - IncrementingBlock)->getTerminator());
445
446 // Convert stride to pointer size if needed.
447 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
448 assert(Stride->getType() == IntPtrTy && "Unexpected type");
449
450 // Scale the stride by the size of the indexed type.
451 if (TypeScale != 1)
452 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
453
454 auto P = std::make_pair(BasePtr, Stride);
455 StridedAddrs[GEP] = P;
456 return P;
457}
458
459bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II,
460 Type *DataType,
461 Value *Ptr,
462 Value *AlignOp) {
463 // Make sure the operation will be supported by the backend.
464 if (!isLegalTypeAndAlignment(DataType, AlignOp))
465 return false;
466
467 // Pointer should be a GEP.
468 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
469 if (!GEP)
470 return false;
471
473
474 Value *BasePtr, *Stride;
475 std::tie(BasePtr, Stride) = determineBaseAndStride(GEP, Builder);
476 if (!BasePtr)
477 return false;
478 assert(Stride != nullptr);
479
480 Builder.SetInsertPoint(II);
481
482 CallInst *Call;
483 if (II->getIntrinsicID() == Intrinsic::masked_gather)
484 Call = Builder.CreateIntrinsic(
485 Intrinsic::riscv_masked_strided_load,
486 {DataType, BasePtr->getType(), Stride->getType()},
487 {II->getArgOperand(3), BasePtr, Stride, II->getArgOperand(2)});
488 else
489 Call = Builder.CreateIntrinsic(
490 Intrinsic::riscv_masked_strided_store,
491 {DataType, BasePtr->getType(), Stride->getType()},
492 {II->getArgOperand(0), BasePtr, Stride, II->getArgOperand(3)});
493
494 Call->takeName(II);
495 II->replaceAllUsesWith(Call);
496 II->eraseFromParent();
497
498 if (GEP->use_empty())
500
501 return true;
502}
503
504bool RISCVGatherScatterLowering::runOnFunction(Function &F) {
505 if (skipFunction(F))
506 return false;
507
508 auto &TPC = getAnalysis<TargetPassConfig>();
509 auto &TM = TPC.getTM<RISCVTargetMachine>();
510 ST = &TM.getSubtarget<RISCVSubtarget>(F);
511 if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors())
512 return false;
513
514 TLI = ST->getTargetLowering();
515 DL = &F.getParent()->getDataLayout();
516 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
517
518 StridedAddrs.clear();
519
522
523 bool Changed = false;
524
525 for (BasicBlock &BB : F) {
526 for (Instruction &I : BB) {
527 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
528 if (II && II->getIntrinsicID() == Intrinsic::masked_gather) {
529 Gathers.push_back(II);
530 } else if (II && II->getIntrinsicID() == Intrinsic::masked_scatter) {
531 Scatters.push_back(II);
532 }
533 }
534 }
535
536 // Rewrite gather/scatter to form strided load/store if possible.
537 for (auto *II : Gathers)
538 Changed |= tryCreateStridedLoadStore(
539 II, II->getType(), II->getArgOperand(0), II->getArgOperand(1));
540 for (auto *II : Scatters)
541 Changed |=
542 tryCreateStridedLoadStore(II, II->getArgOperand(0)->getType(),
543 II->getArgOperand(1), II->getArgOperand(2));
544
545 // Remove any dead phis.
546 while (!MaybeDeadPHIs.empty()) {
547 if (auto *Phi = dyn_cast_or_null<PHINode>(MaybeDeadPHIs.pop_back_val()))
549 }
550
551 return Changed;
552}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
assume Assume Builder
Hexagon Common GEP
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:524
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static std::pair< Value *, Value * > matchStridedStart(Value *Start, IRBuilder<> &Builder)
static std::pair< Value *, Value * > matchStridedConstant(Constant *StartC)
#define DEBUG_TYPE
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
Definition: APInt.h:75
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:265
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
BinaryOps getOpcode() const
Definition: InstrTypes.h:391
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1353
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:136
This is an important base class in LLVM.
Definition: Constant.h:41
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:418
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2564
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:1293
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:547
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:350
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:537
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
FunctionPass * createRISCVGatherScatterLoweringPass()
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
gep_type_iterator gep_type_begin(const User *GEP)
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:645
Extended Value Type.
Definition: ValueTypes.h:34
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117