27using namespace PatternMatch;
29#define DEBUG_TYPE "instcombine"
31STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
35 "instcombine-max-copied-from-constant-users",
cl::init(300),
36 cl::desc(
"Maximum users to visit in copy from constant transform"),
58 while (!Worklist.
empty()) {
60 if (!Visited.
insert(Elem).second)
65 const auto [
Value, IsOffset] = Elem;
67 auto *
I = cast<Instruction>(U.getUser());
69 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
71 if (!LI->isSimple())
return false;
75 if (isa<PHINode, SelectInst>(
I)) {
82 if (isa<BitCastInst, AddrSpaceCastInst>(
I)) {
87 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
94 if (
auto *Call = dyn_cast<CallBase>(
I)) {
97 if (Call->isCallee(&U))
100 unsigned DataOpNo = Call->getDataOperandNo(&U);
101 bool IsArgOperand = Call->isArgOperand(&U);
104 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
110 bool NoCapture = Call->doesNotCapture(DataOpNo);
111 if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
112 (Call->onlyReadsMemory(DataOpNo) && NoCapture))
117 if (IsArgOperand && Call->isByValArgument(DataOpNo))
122 if (
I->isLifetimeStartOrEnd()) {
123 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
135 if (
MI->isVolatile())
140 if (U.getOperandNo() == 1)
144 if (TheCopy)
return false;
148 if (IsOffset)
return false;
151 if (U.getOperandNo() != 0)
return false;
204 if (
C->getValue().getActiveBits() <= 64) {
216 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
226 NewTy, New,
Idx, New->getName() +
".sub");
261class PointerReplacer {
264 : IC(IC), Root(Root), FromAS(SrcAS) {}
267 void replacePointer(
Value *V);
274 return I == &Root || Worklist.contains(
I);
278 unsigned FromAS)
const {
279 const auto *ASC = dyn_cast<AddrSpaceCastInst>(
I);
282 unsigned ToAS = ASC->getDestAddressSpace();
283 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
295bool PointerReplacer::collectUsers() {
296 if (!collectUsersRecursive(Root))
302 for (
auto *Inst : ValuesToRevisit)
303 if (!Worklist.contains(Inst))
308bool PointerReplacer::collectUsersRecursive(
Instruction &
I) {
309 for (
auto *U :
I.users()) {
310 auto *Inst = cast<Instruction>(&*U);
311 if (
auto *Load = dyn_cast<LoadInst>(Inst)) {
312 if (
Load->isVolatile())
314 Worklist.insert(Load);
315 }
else if (
auto *
PHI = dyn_cast<PHINode>(Inst)) {
318 [](
Value *V) { return !isa<Instruction>(V); }))
325 return !isAvailable(cast<Instruction>(V));
327 ValuesToRevisit.insert(Inst);
331 Worklist.insert(
PHI);
332 if (!collectUsersRecursive(*
PHI))
334 }
else if (
auto *SI = dyn_cast<SelectInst>(Inst)) {
335 if (!isa<Instruction>(
SI->getTrueValue()) ||
336 !isa<Instruction>(
SI->getFalseValue()))
341 ValuesToRevisit.insert(Inst);
345 if (!collectUsersRecursive(*SI))
347 }
else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
348 Worklist.insert(Inst);
349 if (!collectUsersRecursive(*Inst))
351 }
else if (
auto *
MI = dyn_cast<MemTransferInst>(Inst)) {
352 if (
MI->isVolatile())
354 Worklist.insert(Inst);
355 }
else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
356 Worklist.insert(Inst);
357 }
else if (Inst->isLifetimeStartOrEnd()) {
360 LLVM_DEBUG(
dbgs() <<
"Cannot handle pointer user: " << *U <<
'\n');
368Value *PointerReplacer::getReplacement(
Value *V) {
return WorkMap.lookup(V); }
371 if (getReplacement(
I))
374 if (
auto *LT = dyn_cast<LoadInst>(
I)) {
375 auto *
V = getReplacement(
LT->getPointerOperand());
376 assert(V &&
"Operand not replaced");
377 auto *NewI =
new LoadInst(
LT->getType(), V,
"",
LT->isVolatile(),
378 LT->getAlign(),
LT->getOrdering(),
379 LT->getSyncScopeID());
383 IC.InsertNewInstWith(NewI, *LT);
384 IC.replaceInstUsesWith(*LT, NewI);
386 }
else if (
auto *
PHI = dyn_cast<PHINode>(
I)) {
387 Type *NewTy = getReplacement(
PHI->getIncomingValue(0))->getType();
390 for (
unsigned int I = 0;
I <
PHI->getNumIncomingValues(); ++
I)
391 NewPHI->addIncoming(getReplacement(
PHI->getIncomingValue(
I)),
392 PHI->getIncomingBlock(
I));
393 WorkMap[
PHI] = NewPHI;
394 }
else if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
395 auto *
V = getReplacement(
GEP->getPointerOperand());
396 assert(V &&
"Operand not replaced");
401 IC.InsertNewInstWith(NewI, *
GEP);
404 }
else if (
auto *BC = dyn_cast<BitCastInst>(
I)) {
405 auto *
V = getReplacement(BC->getOperand(0));
406 assert(V &&
"Operand not replaced");
407 auto *NewT = PointerType::getWithSamePointeeType(
408 cast<PointerType>(BC->getType()),
409 V->getType()->getPointerAddressSpace());
411 IC.InsertNewInstWith(NewI, *BC);
414 }
else if (
auto *SI = dyn_cast<SelectInst>(
I)) {
416 SI->getCondition(), getReplacement(
SI->getTrueValue()),
417 getReplacement(
SI->getFalseValue()),
SI->getName(),
nullptr, SI);
418 IC.InsertNewInstWith(NewSI, *SI);
421 }
else if (
auto *MemCpy = dyn_cast<MemTransferInst>(
I)) {
422 auto *SrcV = getReplacement(MemCpy->getRawSource());
426 assert(getReplacement(MemCpy->getRawDest()) &&
427 "destination not in replace list");
431 IC.Builder.SetInsertPoint(MemCpy);
432 auto *NewI = IC.Builder.CreateMemTransferInst(
433 MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
434 SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
435 MemCpy->isVolatile());
436 AAMDNodes AAMD = MemCpy->getAAMetadata();
438 NewI->setAAMetadata(AAMD);
440 IC.eraseInstFromFunction(*MemCpy);
441 WorkMap[MemCpy] = NewI;
442 }
else if (
auto *ASC = dyn_cast<AddrSpaceCastInst>(
I)) {
443 auto *
V = getReplacement(ASC->getPointerOperand());
444 assert(V &&
"Operand not replaced");
445 assert(isEqualOrValidAddrSpaceCast(
446 ASC,
V->getType()->getPointerAddressSpace()) &&
447 "Invalid address space cast!");
449 if (
V->getType()->getPointerAddressSpace() !=
450 ASC->getType()->getPointerAddressSpace()) {
453 IC.InsertNewInstWith(NewI, *ASC);
456 IC.replaceInstUsesWith(*ASC, NewV);
457 IC.eraseInstFromFunction(*ASC);
463void PointerReplacer::replacePointer(
Value *V) {
465 auto *PT = cast<PointerType>(Root.getType());
466 auto *
NT = cast<PointerType>(
V->getType());
467 assert(PT != NT &&
"Invalid usage");
494 if (FirstInst != &AI) {
498 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
526 Value *TheSrc = Copy->getSource();
529 TheSrc, AllocaAlign,
DL, &AI, &
AC, &
DT);
530 if (AllocaAlign <= SourceAlign &&
532 !isa<Instruction>(TheSrc)) {
535 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
550 PointerReplacer PtrReplacer(*
this, AI, SrcAddrSpace);
551 if (PtrReplacer.collectUsers()) {
556 PtrReplacer.replacePointer(Cast);
582 const Twine &Suffix) {
584 "can't fold an atomic load to requested type");
587 unsigned AS =
LI.getPointerAddressSpace();
589 Value *NewPtr =
nullptr;
591 NewPtr->
getType() == NewPtrTy))
595 NewTy, NewPtr,
LI.getAlign(),
LI.isVolatile(),
LI.getName() + Suffix);
607 "can't fold an atomic store of requested type");
610 unsigned AS =
SI.getPointerAddressSpace();
612 SI.getAllMetadata(MD);
616 SI.getAlign(),
SI.isVolatile());
618 for (
const auto &MDPair : MD) {
619 unsigned ID = MDPair.first;
630 case LLVMContext::MD_dbg:
631 case LLVMContext::MD_DIAssignID:
632 case LLVMContext::MD_tbaa:
633 case LLVMContext::MD_prof:
634 case LLVMContext::MD_fpmath:
635 case LLVMContext::MD_tbaa_struct:
636 case LLVMContext::MD_alias_scope:
637 case LLVMContext::MD_noalias:
638 case LLVMContext::MD_nontemporal:
639 case LLVMContext::MD_mem_parallel_loop_access:
640 case LLVMContext::MD_access_group:
644 case LLVMContext::MD_invariant_load:
645 case LLVMContext::MD_nonnull:
646 case LLVMContext::MD_noundef:
647 case LLVMContext::MD_range:
648 case LLVMContext::MD_align:
649 case LLVMContext::MD_dereferenceable:
650 case LLVMContext::MD_dereferenceable_or_null:
662 assert(V->getType()->isPointerTy() &&
"Expected pointer type.");
703 if (!Load.isUnordered())
706 if (Load.use_empty())
710 if (Load.getPointerOperand()->isSwiftError())
716 if (Load.hasOneUse()) {
719 Type *LoadTy = Load.getType();
720 if (
auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
722 if (BC->getType()->isX86_AMXTy())
726 if (
auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
727 Type *DestTy = CastUser->getDestTy();
751 if (!
T->isAggregateType())
756 if (
auto *ST = dyn_cast<StructType>(
T)) {
758 auto NumElements = ST->getNumElements();
759 if (NumElements == 1) {
770 auto *SL =
DL.getStructLayout(ST);
773 if (SL->getSizeInBits().isScalable())
776 if (SL->hasPadding())
785 for (
unsigned i = 0; i < NumElements; i++) {
786 Value *Indices[2] = {
793 ST->getElementType(i),
Ptr,
804 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
805 auto *ET = AT->getElementType();
806 auto NumElements = AT->getNumElements();
807 if (NumElements == 1) {
822 auto EltSize =
DL.getTypeAllocSize(ET);
831 for (
uint64_t i = 0; i < NumElements; i++) {
832 Value *Indices[2] = {
866 P =
P->stripPointerCasts();
877 if (
PHINode *PN = dyn_cast<PHINode>(
P)) {
883 if (GA->isInterposable())
892 if (!AI->getAllocatedType()->isSized())
895 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
899 TypeSize TS =
DL.getTypeAllocSize(AI->getAllocatedType());
911 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
914 uint64_t InitSize =
DL.getTypeAllocSize(GV->getValueType());
915 if (InitSize > MaxSize)
921 }
while (!Worklist.
empty());
953 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V))
965 Idx = FirstNZIdx(GEPI);
975 if (isa<ScalableVectorType>(SourceElementType))
979 if (!AllocTy || !AllocTy->
isSized())
982 uint64_t TyAllocSize =
DL.getTypeAllocSize(AllocTy).getFixedValue();
988 auto IsAllNonNegative = [&]() {
1036 auto *
Ptr =
SI.getPointerOperand();
1038 Ptr = GEPI->getOperand(0);
1039 return (isa<ConstantPointerNull>(
Ptr) &&
1045 const Value *GEPI0 = GEPI->getOperand(0);
1046 if (isa<ConstantPointerNull>(GEPI0) &&
1050 if (isa<UndefValue>(Op) ||
1051 (isa<ConstantPointerNull>(Op) &&
1069 if (KnownAlign >
LI.getAlign())
1070 LI.setAlignment(KnownAlign);
1082 bool IsLoadCSE =
false;
1089 LI.getName() +
".cast"));
1094 if (!
LI.isUnordered())
return nullptr;
1109 if (Op->hasOneUse()) {
1122 Align Alignment =
LI.getAlign();
1124 Alignment,
DL,
SI) &&
1126 Alignment,
DL,
SI)) {
1129 SI->getOperand(1)->getName() +
".val");
1132 SI->getOperand(2)->getName() +
".val");
1133 assert(
LI.isUnordered() &&
"implied by above");
1136 V2->setAlignment(Alignment);
1137 V2->setAtomic(
LI.getOrdering(),
LI.getSyncScopeID());
1142 if (isa<ConstantPointerNull>(
SI->getOperand(1)) &&
1144 LI.getPointerAddressSpace()))
1148 if (isa<ConstantPointerNull>(
SI->getOperand(2)) &&
1150 LI.getPointerAddressSpace()))
1173 while (
auto *
IV = dyn_cast<InsertValueInst>(V)) {
1174 auto *
E = dyn_cast<ExtractElementInst>(
IV->getInsertedValueOperand());
1177 auto *W =
E->getVectorOperand();
1182 auto *CI = dyn_cast<ConstantInt>(
E->getIndexOperand());
1183 if (!CI ||
IV->getNumIndices() != 1 || CI->getZExtValue() != *
IV->idx_begin())
1185 V =
IV->getAggregateOperand();
1190 auto *UT = cast<VectorType>(U->getType());
1191 auto *VT = V->getType();
1194 if (
DL.getTypeStoreSizeInBits(UT) !=
DL.getTypeStoreSizeInBits(VT)) {
1197 if (
auto *AT = dyn_cast<ArrayType>(VT)) {
1198 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1201 auto *ST = cast<StructType>(VT);
1202 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1204 for (
const auto *EltT : ST->elements()) {
1205 if (EltT != UT->getElementType())
1235 if (!
SI.isUnordered())
1239 if (
SI.getPointerOperand()->isSwiftError())
1242 Value *V =
SI.getValueOperand();
1245 if (
auto *BC = dyn_cast<BitCastInst>(V)) {
1246 assert(!BC->getType()->isX86_AMXTy() &&
1247 "store to x86_amx* should not happen!");
1248 V = BC->getOperand(0);
1251 if (V->getType()->isX86_AMXTy())
1276 Value *V =
SI.getValueOperand();
1277 Type *
T = V->getType();
1279 if (!
T->isAggregateType())
1282 if (
auto *ST = dyn_cast<StructType>(
T)) {
1284 unsigned Count = ST->getNumElements();
1294 auto *SL =
DL.getStructLayout(ST);
1297 if (SL->getSizeInBits().isScalable())
1300 if (SL->hasPadding())
1303 const auto Align =
SI.getAlign();
1307 auto *
Addr =
SI.getPointerOperand();
1309 AddrName +=
".repack";
1313 for (
unsigned i = 0; i < Count; i++) {
1314 Value *Indices[2] = {
1329 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
1331 auto NumElements = AT->getNumElements();
1332 if (NumElements == 1) {
1346 auto EltSize =
DL.getTypeAllocSize(AT->getElementType());
1347 const auto Align =
SI.getAlign();
1351 auto *
Addr =
SI.getPointerOperand();
1353 AddrName +=
".repack";
1359 for (
uint64_t i = 0; i < NumElements; i++) {
1360 Value *Indices[2] = {
1389 if (
A ==
B)
return true;
1396 if (isa<BinaryOperator>(
A) ||
1399 isa<GetElementPtrInst>(
A))
1401 if (cast<Instruction>(
A)->isIdenticalToWhenDefined(BI))
1420 auto *LI = cast<LoadInst>(
SI.getValueOperand());
1421 if (!LI->getType()->isIntegerTy())
1429 if (LI->getType() == CmpLoadTy)
1434 if (
DL.getTypeStoreSizeInBits(LI->getType()) !=
1435 DL.getTypeStoreSizeInBits(CmpLoadTy))
1438 if (!
all_of(LI->users(), [LI, LoadAddr](
User *U) {
1439 auto *SI = dyn_cast<StoreInst>(U);
1440 return SI && SI->getPointerOperand() != LI &&
1441 InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1443 !SI->getPointerOperand()->isSwiftError();
1450 for (
auto *UI : LI->users()) {
1451 auto *USI = cast<StoreInst>(UI);
1461 Value *Val =
SI.getOperand(0);
1471 if (KnownAlign >
SI.getAlign())
1472 SI.setAlignment(KnownAlign);
1487 if (!
SI.isUnordered())
return nullptr;
1491 if (
Ptr->hasOneUse()) {
1492 if (isa<AllocaInst>(
Ptr))
1495 if (isa<AllocaInst>(
GEP->getOperand(0))) {
1496 if (
GEP->getOperand(0)->hasOneUse())
1512 for (
unsigned ScanInsts = 6; BBI !=
SI.getParent()->begin() && ScanInsts;
1517 if (BBI->isDebugOrPseudoInst() ||
1518 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1523 if (
StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1525 if (PrevSI->isUnordered() &&
1527 PrevSI->getValueOperand()->getType() ==
1528 SI.getValueOperand()->getType()) {
1543 if (
LoadInst *
LI = dyn_cast<LoadInst>(BBI)) {
1545 assert(
SI.isUnordered() &&
"can't eliminate ordering operation");
1555 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1562 if (!isa<PoisonValue>(Val))
1570 if (isa<UndefValue>(Val))
1582 if (!
SI.isUnordered())
1593 if (*PredIter == StoreBB)
1599 if (StoreBB == DestBB || OtherBB == DestBB)
1604 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1605 if (!OtherBr || BBI == OtherBB->
begin())
1608 auto OtherStoreIsMergeable = [&](
StoreInst *OtherStore) ->
bool {
1610 OtherStore->getPointerOperand() !=
SI.getPointerOperand())
1613 auto *SIVTy =
SI.getValueOperand()->getType();
1614 auto *OSVTy = OtherStore->getValueOperand()->getType();
1616 SI.hasSameSpecialState(OtherStore);
1625 while (BBI->isDebugOrPseudoInst() ||
1626 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1627 if (BBI==OtherBB->
begin())
1633 OtherStore = dyn_cast<StoreInst>(BBI);
1634 if (!OtherStoreIsMergeable(OtherStore))
1648 OtherStore = dyn_cast<StoreInst>(BBI);
1649 if (OtherStoreIsMergeable(OtherStore))
1654 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1655 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1663 if (
I->mayReadFromMemory() ||
I->mayThrow() ||
I->mayWriteToMemory())
1673 if (MergedVal !=
SI.getValueOperand()) {
1687 new StoreInst(MergedVal,
SI.getOperand(1),
SI.isVolatile(),
SI.getAlign(),
1688 SI.getOrdering(),
SI.getSyncScopeID());
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC, StoreInst &SI)
Converts store (bitcast (load (bitcast (select ...)))) to store (load (select ...)),...
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isMinMaxWithLoads(Value *V, Type *&LoadTy)
Returns true if instruction represent minmax pattern like: select ((cmp load V1, load V2),...
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
This file implements a map that provides insertion order iteration.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction & front() const
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
const BasicBlock & getEntryBlock() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Instruction * visitLoadInst(LoadInst &LI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
const DataLayout & getDataLayout() const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Instruction * InsertNewInstBefore(Instruction *New, Instruction &Old)
Inserts an instruction New before instruction Old.
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
This class implements a map that also provides access to all stored values in a deterministic order.
This class wraps the llvm.memcpy/memmove intrinsics.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CastClass_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_Undef()
Match an arbitrary undef constant.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, AAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
void replace(Container &Cont, typename Container::iterator ContIt, typename Container::iterator ContEnd, RandomAccessIterator ValIt, RandomAccessIterator ValEnd)
Given a sequence container Cont, replace the range [ContIt, ContEnd) with the range [ValIt,...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
SimplifyQuery getWithInstruction(Instruction *I) const