Go to the documentation of this file.
25 :
User(ty,
Value::InstructionVal +
it, Ops, NumOps), Parent(nullptr) {
30 assert(
BB &&
"Instruction to insert before is not in a basic block!");
37 :
User(ty,
Value::InstructionVal +
it, Ops, NumOps), Parent(nullptr) {
40 assert(InsertAtEnd &&
"Basic block to append to may not be NULL!");
45 assert(!Parent &&
"Instruction still linked in the program!");
112 "instructions without BB parents have no order");
113 assert(Parent ==
Other->Parent &&
"cross-BB instruction order comparison");
116 return Order <
Other->Order;
124 cast<OverflowingBinaryOperator>(
this)->setHasNoUnsignedWrap(
b);
128 cast<OverflowingBinaryOperator>(
this)->setHasNoSignedWrap(
b);
132 cast<PossiblyExactOperator>(
this)->setIsExact(
b);
136 return cast<OverflowingBinaryOperator>(
this)->hasNoUnsignedWrap();
140 return cast<OverflowingBinaryOperator>(
this)->hasNoSignedWrap();
144 return cast<Operator>(
this)->hasPoisonGeneratingFlags();
150 case Instruction::Sub:
152 case Instruction::Shl:
154 cast<OverflowingBinaryOperator>(
this)->setHasNoSignedWrap(
false);
157 case Instruction::UDiv:
158 case Instruction::SDiv:
159 case Instruction::AShr:
160 case Instruction::LShr:
161 cast<PossiblyExactOperator>(
this)->setIsExact(
false);
164 case Instruction::GetElementPtr:
165 cast<GetElementPtrInst>(
this)->setIsInBounds(
false);
168 if (isa<FPMathOperator>(
this)) {
179 auto *CB = dyn_cast<CallBase>(
this);
190 for (
unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
191 CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
192 CB->removeRetAttrs(UBImplyingAttributes);
196 return cast<PossiblyExactOperator>(
this)->isExact();
200 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
201 cast<FPMathOperator>(
this)->setFast(
B);
205 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
206 cast<FPMathOperator>(
this)->setHasAllowReassoc(
B);
210 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
211 cast<FPMathOperator>(
this)->setHasNoNaNs(
B);
215 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
216 cast<FPMathOperator>(
this)->setHasNoInfs(
B);
220 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
221 cast<FPMathOperator>(
this)->setHasNoSignedZeros(
B);
225 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
226 cast<FPMathOperator>(
this)->setHasAllowReciprocal(
B);
230 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
231 cast<FPMathOperator>(
this)->setHasAllowContract(
B);
235 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
236 cast<FPMathOperator>(
this)->setHasApproxFunc(
B);
240 assert(isa<FPMathOperator>(
this) &&
"setting fast-math flag on invalid op");
241 cast<FPMathOperator>(
this)->setFastMathFlags(FMF);
245 assert(isa<FPMathOperator>(
this) &&
"copying fast-math flag on invalid op");
246 cast<FPMathOperator>(
this)->copyFastMathFlags(FMF);
250 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
251 return cast<FPMathOperator>(
this)->isFast();
255 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
256 return cast<FPMathOperator>(
this)->hasAllowReassoc();
260 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
261 return cast<FPMathOperator>(
this)->hasNoNaNs();
265 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
266 return cast<FPMathOperator>(
this)->hasNoInfs();
270 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
271 return cast<FPMathOperator>(
this)->hasNoSignedZeros();
275 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
276 return cast<FPMathOperator>(
this)->hasAllowReciprocal();
280 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
281 return cast<FPMathOperator>(
this)->hasAllowContract();
285 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
286 return cast<FPMathOperator>(
this)->hasApproxFunc();
290 assert(isa<FPMathOperator>(
this) &&
"getting fast-math flag on invalid op");
291 return cast<FPMathOperator>(
this)->getFastMathFlags();
300 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(
this)) {
301 if (
auto *
OB = dyn_cast<OverflowingBinaryOperator>(V)) {
308 if (
auto *PE = dyn_cast<PossiblyExactOperator>(V))
309 if (isa<PossiblyExactOperator>(
this))
313 if (
auto *
FP = dyn_cast<FPMathOperator>(V))
314 if (isa<FPMathOperator>(
this))
317 if (
auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
318 if (
auto *DestGEP = dyn_cast<GetElementPtrInst>(
this))
319 DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
323 if (
auto *
OB = dyn_cast<OverflowingBinaryOperator>(V)) {
324 if (isa<OverflowingBinaryOperator>(
this)) {
330 if (
auto *PE = dyn_cast<PossiblyExactOperator>(V))
331 if (isa<PossiblyExactOperator>(
this))
334 if (
auto *
FP = dyn_cast<FPMathOperator>(V)) {
335 if (isa<FPMathOperator>(
this)) {
337 FM &=
FP->getFastMathFlags();
342 if (
auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
343 if (
auto *DestGEP = dyn_cast<GetElementPtrInst>(
this))
344 DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
350 case Ret:
return "ret";
351 case Br:
return "br";
352 case Switch:
return "switch";
353 case IndirectBr:
return "indirectbr";
354 case Invoke:
return "invoke";
355 case Resume:
return "resume";
356 case Unreachable:
return "unreachable";
357 case CleanupRet:
return "cleanupret";
358 case CatchRet:
return "catchret";
359 case CatchPad:
return "catchpad";
360 case CatchSwitch:
return "catchswitch";
361 case CallBr:
return "callbr";
364 case FNeg:
return "fneg";
367 case Add:
return "add";
368 case FAdd:
return "fadd";
369 case Sub:
return "sub";
370 case FSub:
return "fsub";
371 case Mul:
return "mul";
372 case FMul:
return "fmul";
373 case UDiv:
return "udiv";
374 case SDiv:
return "sdiv";
375 case FDiv:
return "fdiv";
376 case URem:
return "urem";
377 case SRem:
return "srem";
378 case FRem:
return "frem";
381 case And:
return "and";
382 case Or :
return "or";
383 case Xor:
return "xor";
386 case Alloca:
return "alloca";
387 case Load:
return "load";
388 case Store:
return "store";
389 case AtomicCmpXchg:
return "cmpxchg";
390 case AtomicRMW:
return "atomicrmw";
391 case Fence:
return "fence";
392 case GetElementPtr:
return "getelementptr";
395 case Trunc:
return "trunc";
396 case ZExt:
return "zext";
397 case SExt:
return "sext";
398 case FPTrunc:
return "fptrunc";
399 case FPExt:
return "fpext";
400 case FPToUI:
return "fptoui";
401 case FPToSI:
return "fptosi";
402 case UIToFP:
return "uitofp";
403 case SIToFP:
return "sitofp";
404 case IntToPtr:
return "inttoptr";
405 case PtrToInt:
return "ptrtoint";
406 case BitCast:
return "bitcast";
407 case AddrSpaceCast:
return "addrspacecast";
410 case ICmp:
return "icmp";
411 case FCmp:
return "fcmp";
412 case PHI:
return "phi";
413 case Select:
return "select";
414 case Call:
return "call";
415 case Shl:
return "shl";
416 case LShr:
return "lshr";
417 case AShr:
return "ashr";
418 case VAArg:
return "va_arg";
419 case ExtractElement:
return "extractelement";
420 case InsertElement:
return "insertelement";
421 case ShuffleVector:
return "shufflevector";
422 case ExtractValue:
return "extractvalue";
423 case InsertValue:
return "insertvalue";
424 case LandingPad:
return "landingpad";
425 case CleanupPad:
return "cleanuppad";
426 case Freeze:
return "freeze";
428 default:
return "<Invalid operator> ";
436 bool IgnoreAlignment =
false) {
438 "Can not compare special state of different instructions");
441 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
442 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
444 if (
const LoadInst *LI = dyn_cast<LoadInst>(
I1))
445 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
446 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
448 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
449 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
451 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
452 (
SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
454 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
455 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
456 if (
const CmpInst *CI = dyn_cast<CmpInst>(
I1))
457 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
458 if (
const CallInst *CI = dyn_cast<CallInst>(
I1))
459 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
460 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
461 CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
462 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
464 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
465 CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
466 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
468 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
469 CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
470 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
472 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
474 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
476 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
477 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
479 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
480 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
481 CXI->getSuccessOrdering() ==
482 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
483 CXI->getFailureOrdering() ==
484 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
485 CXI->getSyncScopeID() ==
486 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
488 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
489 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
490 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
491 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
493 return SVI->getShuffleMask() ==
494 cast<ShuffleVectorInst>(I2)->getShuffleMask();
496 return GEP->getSourceElementType() ==
497 cast<GetElementPtrInst>(I2)->getSourceElementType();
523 if (
const PHINode *thisPHI = dyn_cast<PHINode>(
this)) {
524 const PHINode *otherPHI = cast<PHINode>(
I);
525 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
535 unsigned flags)
const {
542 getType()->getScalarType() !=
I->getType()->getScalarType() :
551 I->getOperand(
i)->getType()->getScalarType() :
563 const PHINode *PN = dyn_cast<PHINode>(
I);
565 if (
I->getParent() !=
BB)
578 default:
return false;
579 case Instruction::VAArg:
581 case Instruction::Fence:
582 case Instruction::AtomicCmpXchg:
583 case Instruction::AtomicRMW:
584 case Instruction::CatchPad:
585 case Instruction::CatchRet:
588 case Instruction::Invoke:
589 case Instruction::CallBr:
590 return !cast<CallBase>(
this)->onlyWritesMemory();
592 return !cast<StoreInst>(
this)->isUnordered();
598 default:
return false;
599 case Instruction::Fence:
601 case Instruction::VAArg:
602 case Instruction::AtomicCmpXchg:
603 case Instruction::AtomicRMW:
604 case Instruction::CatchPad:
605 case Instruction::CatchRet:
608 case Instruction::Invoke:
609 case Instruction::CallBr:
610 return !cast<CallBase>(
this)->onlyReadsMemory();
612 return !cast<LoadInst>(
this)->isUnordered();
620 case Instruction::AtomicCmpXchg:
621 case Instruction::AtomicRMW:
622 case Instruction::Fence:
636 case Instruction::AtomicCmpXchg:
637 case Instruction::AtomicRMW:
648 case Instruction::AtomicCmpXchg:
649 case Instruction::AtomicRMW:
659 case Instruction::AtomicRMW:
660 return cast<AtomicRMWInst>(
this)->isVolatile();
662 return cast<StoreInst>(
this)->isVolatile();
664 return cast<LoadInst>(
this)->isVolatile();
665 case Instruction::AtomicCmpXchg:
666 return cast<AtomicCmpXchgInst>(
this)->isVolatile();
668 case Instruction::Invoke:
670 if (
auto *II = dyn_cast<IntrinsicInst>(
this)) {
671 if (
auto *
MI = dyn_cast<MemIntrinsic>(II))
672 return MI->isVolatile();
673 switch (II->getIntrinsicID()) {
675 case Intrinsic::matrix_column_major_load:
676 return cast<ConstantInt>(II->getArgOperand(2))->isOne();
677 case Intrinsic::matrix_column_major_store:
678 return cast<ConstantInt>(II->getArgOperand(3))->isOne();
686 if (
const CallInst *CI = dyn_cast<CallInst>(
this))
687 return !CI->doesNotThrow();
688 if (
const auto *CRI = dyn_cast<CleanupReturnInst>(
this))
689 return CRI->unwindsToCaller();
690 if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
this))
691 return CatchSwitch->unwindsToCaller();
692 return isa<ResumeInst>(
this);
706 if (
auto *
SI = dyn_cast<StoreInst>(
this))
707 return !
SI->isVolatile();
709 if (
const auto *CB = dyn_cast<CallBase>(
this))
713 return CB->hasFnAttr(Attribute::WillReturn) ||
714 (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
719 auto *II = dyn_cast<IntrinsicInst>(
this);
723 return ID == Intrinsic::lifetime_start ||
ID == Intrinsic::lifetime_end;
727 auto *II = dyn_cast<IntrinsicInst>(
this);
731 return ID == Intrinsic::launder_invariant_group ||
732 ID == Intrinsic::strip_invariant_group;
736 return isa<DbgInfoIntrinsic>(
this) || isa<PseudoProbeInst>(
this);
742 if (!isa<DbgInfoIntrinsic>(
I) && !(SkipPseudoOp && isa<PseudoProbeInst>(
I)))
750 if (!isa<DbgInfoIntrinsic>(
I) && !(SkipPseudoOp && isa<PseudoProbeInst>(
I)))
763 return cast<FPMathOperator>(
this)->hasAllowReassoc() &&
764 cast<FPMathOperator>(
this)->hasNoSignedZeros();
771 if (
auto *II = dyn_cast<IntrinsicInst>(
this))
772 return II->isCommutative();
779 #define HANDLE_TERM_INST(N, OPC, CLASS) \
780 case Instruction::OPC: \
781 return static_cast<const CLASS *>(this)->getNumSuccessors();
782 #include "llvm/IR/Instruction.def"
791 #define HANDLE_TERM_INST(N, OPC, CLASS) \
792 case Instruction::OPC: \
793 return static_cast<const CLASS *>(this)->getSuccessor(idx);
794 #include "llvm/IR/Instruction.def"
803 #define HANDLE_TERM_INST(N, OPC, CLASS) \
804 case Instruction::OPC: \
805 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
806 #include "llvm/IR/Instruction.def"
815 Idx != NumSuccessors; ++Idx)
831 if (MDName->
getString() !=
"branch_weights")
847 for (
unsigned M : WL)
854 for (
const auto &MD : TheMDs) {
855 if (WL.empty() ||
WLS.count(MD.first))
858 if (WL.empty() ||
WLS.count(LLVMContext::MD_dbg))
867 #define HANDLE_INST(num, opc, clas) \
868 case Instruction::opc: \
869 New = cast<clas>(this)->cloneImpl(); \
871 #include "llvm/IR/Instruction.def"
876 New->copyMetadata(*
this);
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
bool isTerminator() const
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
This is an optimization pass for GlobalISel generic memory operations.
void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB)
Replace specified successor OldBB to point at the provided block.
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
into xmm2 addss xmm2 xmm1 xmm3 addss xmm3 movaps xmm0 unpcklps xmm0 ret seems silly when it could just be one addps Expand libm rounding functions main should enable SSE DAZ mode and other fast SSE modes Think about doing i64 math in SSE regs on x86 This testcase should have no SSE instructions in it
@ Or
Bitwise or logical OR of integers.
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
bool isVolatile() const
Return true if this instruction has a volatile memory access.
void getAllMetadataOtherThanDebugLoc(SmallVectorImpl< std::pair< unsigned, MDNode * >> &MDs) const
This does the same thing as getAllMetadata, except that it filters out the debug location.
Instruction * getNextNode()
Get the next node, or nullptr for the list tail.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void setHasAllowContract(bool B)
Set or clear the allow-contract flag on this instruction, which must be an operator which supports th...
AttributeMask getUBImplyingAttributes()
Get param/return attributes which imply immediate undefined behavior if an invalid value is passed.
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
bool hasAllowReciprocal() const
Determine whether the allow-reciprocal flag is set.
The instances of the Type class are immutable: once they are created, they are never changed.
bool hasApproxFunc() const
Determine whether the approximate-math-functions flag is set.
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
bool hasOneUser() const
Return true if there is exactly one user of this value.
An instruction for ordering other memory operations.
bool mayThrow() const
Return true if this instruction may throw an exception.
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Convenience struct for specifying and reasoning about fast-math flags.
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
void dropUnknownNonDebugMetadata()
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM Basic Block Representation.
unsigned getNumOperands() const
Return number of MDNode operands.
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool willReturn() const
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
@ And
Bitwise or logical AND of integers.
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
bool isIdenticalToWhenDefined(const Instruction *I) const
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int b
iterator_range< use_iterator > uses()
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const char * getOpcodeName() const
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
bool isSafeToRemove() const
Return true if the instruction can be removed if the result is unused.
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
bool isLifetimeStartOrEnd() const
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
bool isLaunderOrStripInvariantGroup() const
Return true if the instruction is a llvm.launder.invariant.group or llvm.strip.invariant....
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
void renumberInstructions()
Renumber instructions and mark the ordering as valid.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setHasNoNaNs(bool B)
Set or clear the no-nans flag on this instruction, which must be an operator which supports this flag...
bool hasAtomicLoad() const
Return true if this atomic instruction loads from memory.
This class is the base class for the comparison instructions.
const MDOperand & getOperand(unsigned I) const
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
An instruction for storing to memory.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setFast(bool B)
Set or clear all fast-math-flags on this instruction, which must be an operator which supports this f...
static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, bool IgnoreAlignment=false)
Return true if both instructions have the same special state.
bool hasAllowContract() const
Determine whether the allow-contract flag is set.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
bool isIdenticalTo(const Instruction *I) const
Return true if the specified instruction is exactly identical to the current one.
void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isOnlyUserOfAnyOperand()
It checks if this instruction is the only user of at least one of its operands.
bool isInstrOrderValid() const
Returns true if the Order field of child Instructions is valid.
bool hasAtomicStore() const
Return true if this atomic instruction stores to memory.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Instruction * getPrevNode()
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
StandardInstrumentations SI(Debug, VerifyEach)
bool isFast() const
Determine whether all fast-math-flags are set.
A Module instance is used to store all the information related to an LLVM module.
block_iterator block_begin()
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void dropUndefImplyingAttrsAndUnknownMetadata(ArrayRef< unsigned > KnownIDs={})
This function drops non-debug unknown metadata (through dropUnknownNonDebugMetadata).
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
List that automatically updates parent links and symbol tables.
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
const Function * getFunction() const
Return the function this instruction belongs to.
self_iterator getIterator()
bool isExact() const
Determine whether the exact flag is set.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
bool mayReadFromMemory() const
Return true if this instruction may read memory.
An instruction for reading from memory.
an instruction that atomically reads a memory location, combines it with another value,...
LLVMContext & getContext() const
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
void setHasApproxFunc(bool B)
Set or clear the approximate-math-functions flag on this instruction, which must be an operator which...
@ CompareIgnoringAlignment
Check for equivalence ignoring load/store alignment.
Instruction(const Instruction &)=delete
bool isUsedOutsideOfBlock(const BasicBlock *BB) const
Return true if there are any uses of this instruction in blocks other than the specified block.
Iterator for intrusive lists based on ilist_node.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
bool hasAllowReassoc() const
Determine whether the allow-reassociation flag is set.
bool isDebugOrPseudoInst() const
Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
bool hasNoNaNs() const
Determine whether the no-NaNs flag is set.
bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this instruction to evaluate to poison despite...
void setHasAllowReciprocal(bool B)
Set or clear the allow-reciprocal flag on this instruction, which must be an operator which supports ...
const InstListType & getInstList() const
Return the underlying instruction list container.
This instruction constructs a fixed permutation of two input vectors.
unsigned getNumOperands() const
void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
bool isUsedByMetadata() const
Return true if there is metadata referencing this value.
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
const BasicBlock * getParent() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
@ CompareUsingScalarTypes
Check for equivalence treating a type and a vector of that type as equivalent.
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
bool hasNoInfs() const
Determine whether the no-infs flag is set.
bool hasNoSignedZeros() const
Determine whether the no-signed-zeros flag is set.
This class represents a function call, abstracting a target machine's calling convention.
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
an instruction to allocate memory on the stack
Value * getOperand(unsigned i) const
StringRef getString() const
bool isSameOperationAs(const Instruction *I, unsigned flags=0) const
This function determines if the specified instruction executes the same operation as the current one.
This instruction inserts a struct field of array element value into an aggregate value.
LLVM Value Representation.
An instruction that atomically checks whether a specified value is in a memory location,...
void setHasAllowReassoc(bool B)
Set or clear the reassociation flag on this instruction, which must be an operator which supports thi...
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
@ Xor
Bitwise or logical XOR of integers.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Optional< std::vector< StOtherPiece > > Other
A Use represents the edge between a Value definition and its users.