28using namespace PatternMatch;
30#define DEBUG_TYPE "instcombine"
32STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
33STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
36 "instcombine-max-copied-from-constant-users",
cl::init(300),
37 cl::desc(
"Maximum users to visit in copy from constant transform"),
43 cl::desc(
"Enable the InferAlignment pass, disabling alignment inference in "
66 while (!Worklist.
empty()) {
68 if (!Visited.
insert(Elem).second)
73 const auto [
Value, IsOffset] = Elem;
75 auto *
I = cast<Instruction>(U.getUser());
77 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
79 if (!LI->isSimple())
return false;
83 if (isa<PHINode, SelectInst>(
I)) {
90 if (isa<BitCastInst, AddrSpaceCastInst>(
I)) {
95 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
102 if (
auto *Call = dyn_cast<CallBase>(
I)) {
105 if (Call->isCallee(&U))
108 unsigned DataOpNo = Call->getDataOperandNo(&U);
109 bool IsArgOperand = Call->isArgOperand(&U);
112 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
118 bool NoCapture = Call->doesNotCapture(DataOpNo);
119 if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
120 (Call->onlyReadsMemory(DataOpNo) && NoCapture))
125 if (IsArgOperand && Call->isByValArgument(DataOpNo))
130 if (
I->isLifetimeStartOrEnd()) {
131 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
143 if (
MI->isVolatile())
148 if (U.getOperandNo() == 1)
152 if (TheCopy)
return false;
156 if (IsOffset)
return false;
159 if (U.getOperandNo() != 0)
return false;
212 if (
C->getValue().getActiveBits() <= 64) {
250class PointerReplacer {
253 : IC(IC), Root(Root), FromAS(SrcAS) {}
256 void replacePointer(
Value *V);
263 return I == &Root || Worklist.contains(
I);
267 unsigned FromAS)
const {
268 const auto *ASC = dyn_cast<AddrSpaceCastInst>(
I);
271 unsigned ToAS = ASC->getDestAddressSpace();
272 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
284bool PointerReplacer::collectUsers() {
285 if (!collectUsersRecursive(Root))
294bool PointerReplacer::collectUsersRecursive(
Instruction &
I) {
295 for (
auto *U :
I.users()) {
296 auto *Inst = cast<Instruction>(&*U);
297 if (
auto *Load = dyn_cast<LoadInst>(Inst)) {
298 if (
Load->isVolatile())
300 Worklist.insert(Load);
301 }
else if (
auto *
PHI = dyn_cast<PHINode>(Inst)) {
304 [](
Value *V) { return !isa<Instruction>(V); }))
311 return !isAvailable(cast<Instruction>(V));
313 ValuesToRevisit.insert(Inst);
317 Worklist.insert(
PHI);
318 if (!collectUsersRecursive(*
PHI))
320 }
else if (
auto *SI = dyn_cast<SelectInst>(Inst)) {
321 if (!isa<Instruction>(
SI->getTrueValue()) ||
322 !isa<Instruction>(
SI->getFalseValue()))
327 ValuesToRevisit.insert(Inst);
331 if (!collectUsersRecursive(*SI))
333 }
else if (isa<GetElementPtrInst>(Inst)) {
334 Worklist.insert(Inst);
335 if (!collectUsersRecursive(*Inst))
337 }
else if (
auto *
MI = dyn_cast<MemTransferInst>(Inst)) {
338 if (
MI->isVolatile())
340 Worklist.insert(Inst);
341 }
else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
342 Worklist.insert(Inst);
343 if (!collectUsersRecursive(*Inst))
345 }
else if (Inst->isLifetimeStartOrEnd()) {
350 LLVM_DEBUG(
dbgs() <<
"Cannot handle pointer user: " << *U <<
'\n');
358Value *PointerReplacer::getReplacement(
Value *V) {
return WorkMap.lookup(V); }
361 if (getReplacement(
I))
364 if (
auto *LT = dyn_cast<LoadInst>(
I)) {
365 auto *
V = getReplacement(
LT->getPointerOperand());
366 assert(V &&
"Operand not replaced");
367 auto *NewI =
new LoadInst(
LT->getType(), V,
"",
LT->isVolatile(),
368 LT->getAlign(),
LT->getOrdering(),
369 LT->getSyncScopeID());
373 IC.InsertNewInstWith(NewI,
LT->getIterator());
374 IC.replaceInstUsesWith(*LT, NewI);
376 }
else if (
auto *
PHI = dyn_cast<PHINode>(
I)) {
377 Type *NewTy = getReplacement(
PHI->getIncomingValue(0))->getType();
379 PHI->getName(),
PHI->getIterator());
380 for (
unsigned int I = 0;
I <
PHI->getNumIncomingValues(); ++
I)
381 NewPHI->addIncoming(getReplacement(
PHI->getIncomingValue(
I)),
382 PHI->getIncomingBlock(
I));
383 WorkMap[
PHI] = NewPHI;
384 }
else if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
385 auto *
V = getReplacement(
GEP->getPointerOperand());
386 assert(V &&
"Operand not replaced");
390 IC.InsertNewInstWith(NewI,
GEP->getIterator());
392 NewI->setNoWrapFlags(
GEP->getNoWrapFlags());
394 }
else if (
auto *SI = dyn_cast<SelectInst>(
I)) {
395 Value *TrueValue =
SI->getTrueValue();
396 Value *FalseValue =
SI->getFalseValue();
397 if (
Value *Replacement = getReplacement(TrueValue))
398 TrueValue = Replacement;
399 if (
Value *Replacement = getReplacement(FalseValue))
400 FalseValue = Replacement;
402 SI->getName(),
nullptr, SI);
403 IC.InsertNewInstWith(NewSI,
SI->getIterator());
406 }
else if (
auto *MemCpy = dyn_cast<MemTransferInst>(
I)) {
407 auto *DestV = MemCpy->getRawDest();
408 auto *SrcV = MemCpy->getRawSource();
410 if (
auto *DestReplace = getReplacement(DestV))
412 if (
auto *SrcReplace = getReplacement(SrcV))
415 IC.Builder.SetInsertPoint(MemCpy);
416 auto *NewI = IC.Builder.CreateMemTransferInst(
417 MemCpy->getIntrinsicID(), DestV, MemCpy->getDestAlign(), SrcV,
418 MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile());
419 AAMDNodes AAMD = MemCpy->getAAMetadata();
421 NewI->setAAMetadata(AAMD);
423 IC.eraseInstFromFunction(*MemCpy);
424 WorkMap[MemCpy] = NewI;
425 }
else if (
auto *ASC = dyn_cast<AddrSpaceCastInst>(
I)) {
426 auto *
V = getReplacement(ASC->getPointerOperand());
427 assert(V &&
"Operand not replaced");
428 assert(isEqualOrValidAddrSpaceCast(
429 ASC,
V->getType()->getPointerAddressSpace()) &&
430 "Invalid address space cast!");
432 if (
V->getType()->getPointerAddressSpace() !=
433 ASC->getType()->getPointerAddressSpace()) {
436 IC.InsertNewInstWith(NewI, ASC->getIterator());
447void PointerReplacer::replacePointer(
Value *V) {
449 auto *PT = cast<PointerType>(Root.getType());
450 auto *
NT = cast<PointerType>(
V->getType());
451 assert(PT != NT &&
"Invalid usage");
478 if (FirstInst != &AI) {
482 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
508 Value *TheSrc = Copy->getSource();
511 TheSrc, AllocaAlign,
DL, &AI, &
AC, &
DT);
512 if (AllocaAlign <= SourceAlign &&
514 !isa<Instruction>(TheSrc)) {
517 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
530 PointerReplacer PtrReplacer(*
this, AI, SrcAddrSpace);
531 if (PtrReplacer.collectUsers()) {
535 PtrReplacer.replacePointer(TheSrc);
561 const Twine &Suffix) {
563 "can't fold an atomic load to requested type");
567 LI.isVolatile(),
LI.getName() + Suffix);
579 "can't fold an atomic store of requested type");
581 Value *
Ptr = SI.getPointerOperand();
583 SI.getAllMetadata(MD);
587 NewStore->
setAtomic(SI.getOrdering(), SI.getSyncScopeID());
588 for (
const auto &MDPair : MD) {
589 unsigned ID = MDPair.first;
600 case LLVMContext::MD_dbg:
601 case LLVMContext::MD_DIAssignID:
602 case LLVMContext::MD_tbaa:
603 case LLVMContext::MD_prof:
604 case LLVMContext::MD_fpmath:
605 case LLVMContext::MD_tbaa_struct:
606 case LLVMContext::MD_alias_scope:
607 case LLVMContext::MD_noalias:
608 case LLVMContext::MD_nontemporal:
609 case LLVMContext::MD_mem_parallel_loop_access:
610 case LLVMContext::MD_access_group:
614 case LLVMContext::MD_invariant_load:
615 case LLVMContext::MD_nonnull:
616 case LLVMContext::MD_noundef:
617 case LLVMContext::MD_range:
618 case LLVMContext::MD_align:
619 case LLVMContext::MD_dereferenceable:
620 case LLVMContext::MD_dereferenceable_or_null:
650 if (!Load.isUnordered())
653 if (Load.use_empty())
657 if (Load.getPointerOperand()->isSwiftError())
663 if (Load.hasOneUse()) {
666 Type *LoadTy = Load.getType();
667 if (
auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
669 if (BC->getType()->isX86_AMXTy())
673 if (
auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
674 Type *DestTy = CastUser->getDestTy();
698 if (!
T->isAggregateType())
703 if (
auto *ST = dyn_cast<StructType>(
T)) {
705 auto NumElements = ST->getNumElements();
706 if (NumElements == 1) {
717 auto *SL =
DL.getStructLayout(ST);
720 if (SL->getSizeInBits().isScalable())
723 if (SL->hasPadding())
729 auto *Zero = ConstantInt::get(IdxType, 0);
732 for (
unsigned i = 0; i < NumElements; i++) {
733 Value *Indices[2] = {
735 ConstantInt::get(IdxType, i),
740 ST->getElementType(i),
Ptr,
751 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
752 auto *ET = AT->getElementType();
753 auto NumElements = AT->getNumElements();
754 if (NumElements == 1) {
774 auto *Zero = ConstantInt::get(IdxType, 0);
778 for (
uint64_t i = 0; i < NumElements; i++) {
779 Value *Indices[2] = {
781 ConstantInt::get(IdxType, i),
787 EltAlign,
Name +
".unpack");
813 P =
P->stripPointerCasts();
824 if (
PHINode *PN = dyn_cast<PHINode>(
P)) {
830 if (GA->isInterposable())
839 if (!AI->getAllocatedType()->isSized())
842 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
846 TypeSize TS =
DL.getTypeAllocSize(AI->getAllocatedType());
858 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
861 uint64_t InitSize =
DL.getTypeAllocSize(GV->getValueType());
862 if (InitSize > MaxSize)
868 }
while (!Worklist.
empty());
900 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V))
912 Idx = FirstNZIdx(GEPI);
926 if (!AllocTy || !AllocTy->
isSized())
929 uint64_t TyAllocSize =
DL.getTypeAllocSize(AllocTy).getFixedValue();
935 auto IsAllNonNegative = [&]() {
970 ConstantInt::get(GEPI->getOperand(
Idx)->getType(), 0));
983 auto *
Ptr = SI.getPointerOperand();
985 Ptr = GEPI->getOperand(0);
986 return (isa<ConstantPointerNull>(
Ptr) &&
992 const Value *GEPI0 = GEPI->getOperand(0);
993 if (isa<ConstantPointerNull>(GEPI0) &&
997 if (isa<UndefValue>(
Op) ||
998 (isa<ConstantPointerNull>(
Op) &&
1017 if (KnownAlign >
LI.getAlign())
1018 LI.setAlignment(KnownAlign);
1031 bool IsLoadCSE =
false;
1039 LI.getName() +
".cast"));
1044 if (!
LI.isUnordered())
return nullptr;
1054 if (
Op->hasOneUse()) {
1067 Align Alignment =
LI.getAlign();
1069 Alignment,
DL, SI) &&
1071 Alignment,
DL, SI)) {
1074 SI->getOperand(1)->getName() +
".val");
1077 SI->getOperand(2)->getName() +
".val");
1078 assert(
LI.isUnordered() &&
"implied by above");
1081 V2->setAlignment(Alignment);
1082 V2->setAtomic(
LI.getOrdering(),
LI.getSyncScopeID());
1087 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1089 LI.getPointerAddressSpace()))
1093 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1095 LI.getPointerAddressSpace()))
1118 while (
auto *
IV = dyn_cast<InsertValueInst>(V)) {
1119 auto *E = dyn_cast<ExtractElementInst>(
IV->getInsertedValueOperand());
1122 auto *W = E->getVectorOperand();
1127 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1128 if (!CI ||
IV->getNumIndices() != 1 || CI->getZExtValue() != *
IV->idx_begin())
1130 V =
IV->getAggregateOperand();
1135 auto *UT = cast<VectorType>(U->getType());
1136 auto *VT = V->getType();
1139 if (
DL.getTypeStoreSizeInBits(UT) !=
DL.getTypeStoreSizeInBits(VT)) {
1142 if (
auto *AT = dyn_cast<ArrayType>(VT)) {
1143 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1146 auto *ST = cast<StructType>(VT);
1147 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1149 for (
const auto *EltT : ST->elements()) {
1150 if (EltT != UT->getElementType())
1180 if (!SI.isUnordered())
1184 if (SI.getPointerOperand()->isSwiftError())
1187 Value *V = SI.getValueOperand();
1190 if (
auto *BC = dyn_cast<BitCastInst>(V)) {
1191 assert(!BC->getType()->isX86_AMXTy() &&
1192 "store to x86_amx* should not happen!");
1193 V = BC->getOperand(0);
1196 if (V->getType()->isX86_AMXTy())
1221 Value *V = SI.getValueOperand();
1222 Type *
T = V->getType();
1224 if (!
T->isAggregateType())
1227 if (
auto *ST = dyn_cast<StructType>(
T)) {
1229 unsigned Count = ST->getNumElements();
1239 auto *SL =
DL.getStructLayout(ST);
1242 if (SL->getSizeInBits().isScalable())
1245 if (SL->hasPadding())
1248 const auto Align = SI.getAlign();
1252 auto *
Addr = SI.getPointerOperand();
1254 AddrName +=
".repack";
1257 auto *Zero = ConstantInt::get(IdxType, 0);
1258 for (
unsigned i = 0; i < Count; i++) {
1259 Value *Indices[2] = {
1261 ConstantInt::get(IdxType, i),
1274 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
1276 auto NumElements = AT->getNumElements();
1277 if (NumElements == 1) {
1291 TypeSize EltSize =
DL.getTypeAllocSize(AT->getElementType());
1292 const auto Align = SI.getAlign();
1296 auto *
Addr = SI.getPointerOperand();
1298 AddrName +=
".repack";
1301 auto *Zero = ConstantInt::get(IdxType, 0);
1304 for (
uint64_t i = 0; i < NumElements; i++) {
1305 Value *Indices[2] = {
1307 ConstantInt::get(IdxType, i),
1334 if (
A ==
B)
return true;
1341 if (isa<BinaryOperator>(
A) ||
1344 isa<GetElementPtrInst>(
A))
1346 if (cast<Instruction>(
A)->isIdenticalToWhenDefined(BI))
1354 Value *Val = SI.getOperand(0);
1365 if (KnownAlign > SI.getAlign())
1366 SI.setAlignment(KnownAlign);
1379 if (!SI.isUnordered())
return nullptr;
1383 if (
Ptr->hasOneUse()) {
1384 if (isa<AllocaInst>(
Ptr))
1387 if (isa<AllocaInst>(
GEP->getOperand(0))) {
1388 if (
GEP->getOperand(0)->hasOneUse())
1404 for (
unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1409 if (BBI->isDebugOrPseudoInst()) {
1414 if (
StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1416 if (PrevSI->isUnordered() &&
1418 PrevSI->getValueOperand()->getType() ==
1419 SI.getValueOperand()->getType()) {
1434 if (
LoadInst *
LI = dyn_cast<LoadInst>(BBI)) {
1436 assert(SI.isUnordered() &&
"can't eliminate ordering operation");
1446 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1453 if (!isa<PoisonValue>(Val))
1459 if (isa<UndefValue>(
Ptr)) {
1475 if (isa<UndefValue>(Val))
1487 if (!SI.isUnordered())
1498 if (*PredIter == StoreBB)
1504 if (StoreBB == DestBB || OtherBB == DestBB)
1509 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1510 if (!OtherBr || BBI == OtherBB->
begin())
1513 auto OtherStoreIsMergeable = [&](
StoreInst *OtherStore) ->
bool {
1515 OtherStore->getPointerOperand() != SI.getPointerOperand())
1518 auto *SIVTy = SI.getValueOperand()->getType();
1519 auto *OSVTy = OtherStore->getValueOperand()->getType();
1521 SI.hasSameSpecialState(OtherStore);
1530 while (BBI->isDebugOrPseudoInst()) {
1531 if (BBI==OtherBB->
begin())
1537 OtherStore = dyn_cast<StoreInst>(BBI);
1538 if (!OtherStoreIsMergeable(OtherStore))
1552 OtherStore = dyn_cast<StoreInst>(BBI);
1553 if (OtherStoreIsMergeable(OtherStore))
1558 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1559 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1567 if (
I->mayReadFromMemory() ||
I->mayThrow() ||
I->mayWriteToMemory())
1577 if (MergedVal != SI.getValueOperand()) {
1580 PN->
addIncoming(SI.getValueOperand(), SI.getParent());
1591 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1592 SI.getOrdering(), SI.getSyncScopeID());
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
This file implements a map that provides insertion order iteration.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines generic set operations that may be used on set's of different types,...
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitLoadInst(LoadInst &LI)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
bool removeInstructionsBeforeUnreachable(Instruction &I)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
const DataLayout & getDataLayout() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
const Function * getFunction() const
Return the function this instruction belongs to.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
This class implements a map that also provides access to all stored values in a deterministic order.
This class wraps the llvm.memcpy/memmove intrinsics.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
auto m_Undef()
Match an arbitrary undef constant.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
pred_iterator pred_begin(BasicBlock *BB)
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
cl::opt< bool > EnableInferAlignmentPass
void replace(R &&Range, const T &OldValue, const T &NewValue)
Provide wrappers to std::replace which take ranges instead of having to pass begin/end explicitly.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
SimplifyQuery getWithInstruction(const Instruction *I) const