52 "disable-i2p-p2i-opt",
cl::init(
false),
53 cl::desc(
"Disables inttoptr/ptrtoint roundtrip optimization"));
59std::optional<TypeSize>
66 assert(!
Size.isScalable() &&
"Array elements cannot have a scalable size");
67 Size *=
C->getZExtValue();
72std::optional<TypeSize>
88 return "both values to select must have same type";
91 return "select values cannot have token type";
96 return "vector select condition element type must be i1";
99 return "selected values for vector select must be vectors";
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
104 return "select condition must be i1 or <n x i1>";
113PHINode::PHINode(
const PHINode &PN)
115 ReservedSpace(PN.getNumOperands()) {
136 Op<-1>().set(
nullptr);
149 bool DeletePHIIfEmpty) {
155 if (RemoveIndices.
empty())
160 return RemoveIndices.
contains(U.getOperandNo());
185void PHINode::growOperands() {
187 unsigned NumOps = e + e / 2;
188 if (NumOps < 2) NumOps = 2;
190 ReservedSpace = NumOps;
201 if (ConstantValue !=
this)
206 if (ConstantValue ==
this)
208 return ConstantValue;
217 Value *ConstantValue =
nullptr;
221 if (ConstantValue && ConstantValue !=
Incoming)
233LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
234 const Twine &NameStr,
237 init(NumReservedValues, NameStr);
240LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
243 init(NumReservedValues, NameStr);
246LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
249 init(NumReservedValues, NameStr);
254 LP.getNumOperands()),
255 ReservedSpace(LP.getNumOperands()) {
259 for (
unsigned I = 0,
E = ReservedSpace;
I !=
E; ++
I)
266 const Twine &NameStr,
272 const Twine &NameStr,
277void LandingPadInst::init(
unsigned NumReservedValues,
const Twine &NameStr) {
278 ReservedSpace = NumReservedValues;
287void LandingPadInst::growOperands(
unsigned Size) {
289 if (ReservedSpace >= e +
Size)
return;
290 ReservedSpace = (std::max(e, 1U) +
Size / 2) * 2;
297 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
309 case Instruction::Call:
311 case Instruction::Invoke:
313 case Instruction::CallBr:
323 case Instruction::Call:
325 case Instruction::Invoke:
327 case Instruction::CallBr:
339 if (ChildOB.getTagName() != OpB.
getTag())
351 return cast<CallBrInst>(
this)->getNumIndirectDests() + 1;
356 if (isa<Function>(V) || isa<Constant>(V))
364 if (
auto *CI = dyn_cast<CallInst>(
this))
365 return CI->isMustTailCall();
371 if (
auto *CI = dyn_cast<CallInst>(
this))
372 return CI->isTailCall();
378 return F->getIntrinsicID();
386 Mask |=
F->getAttributes().getRetNoFPClass();
394 Mask |=
F->getAttributes().getParamNoFPClass(i);
415 if (
F->getAttributes().hasAttrSomewhere(Kind, &
Index))
432 if (!
F->getAttributes().hasParamAttr(ArgNo, Kind))
437 case Attribute::ReadNone:
439 case Attribute::ReadOnly:
441 case Attribute::WriteOnly:
450 if (
auto *CE = dyn_cast<ConstantExpr>(V))
451 if (CE->getOpcode() == BitCast)
452 V = CE->getOperand(0);
454 if (
auto *
F = dyn_cast<Function>(V))
455 return F->getAttributes().hasFnAttr(Kind);
460bool CallBase::hasFnAttrOnCalledFunction(
StringRef Kind)
const {
462 if (
auto *CE = dyn_cast<ConstantExpr>(V))
463 if (CE->getOpcode() == BitCast)
464 V = CE->getOperand(0);
466 if (
auto *
F = dyn_cast<Function>(V))
467 return F->getAttributes().hasFnAttr(Kind);
472template <
typename AK>
473Attribute CallBase::getFnAttrOnCalledFunction(AK Kind)
const {
474 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
477 assert(Kind != Attribute::Memory &&
"Use getMemoryEffects() instead");
481 if (
auto *CE = dyn_cast<ConstantExpr>(V))
482 if (
CE->getOpcode() == BitCast)
483 V =
CE->getOperand(0);
485 if (
auto *
F = dyn_cast<Function>(V))
486 return F->getAttributes().getFnAttr(Kind);
503 const unsigned BeginIndex) {
505 for (
auto &
B : Bundles)
506 It = std::copy(
B.input_begin(),
B.input_end(), It);
509 auto BI = Bundles.
begin();
510 unsigned CurrentIndex = BeginIndex;
513 assert(BI != Bundles.
end() &&
"Incorrect allocation?");
515 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
516 BOI.Begin = CurrentIndex;
517 BOI.End = CurrentIndex + BI->input_size();
518 CurrentIndex = BOI.End;
522 assert(BI == Bundles.
end() &&
"Incorrect allocation?");
533 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
539 assert(OpIdx >=
arg_size() &&
"the Idx is not in the operand bundles");
542 "The Idx isn't in the operand bundle");
546 constexpr unsigned NumberScaling = 1024;
552 while (Begin !=
End) {
553 unsigned ScaledOperandPerBundle =
554 NumberScaling * (std::prev(
End)->End - Begin->
Begin) / (
End - Begin);
555 Current = Begin + (((OpIdx - Begin->
Begin) * NumberScaling) /
556 ScaledOperandPerBundle);
558 Current = std::prev(
End);
559 assert(Current < End && Current >= Begin &&
560 "the operand bundle doesn't cover every value in the range");
561 if (OpIdx >= Current->
Begin && OpIdx < Current->
End)
563 if (OpIdx >= Current->
End)
570 "the operand bundle doesn't cover every value in the range");
583 return Create(CB, Bundles, InsertPt);
595 return Create(CB, Bundles, InsertPt);
601 bool CreateNew =
false;
605 if (Bundle.getTagID() ==
ID) {
612 return CreateNew ?
Create(CB, Bundles, InsertPt) : CB;
618 bool CreateNew =
false;
622 if (Bundle.getTagID() ==
ID) {
629 return CreateNew ?
Create(CB, Bundles, InsertPt) : CB;
727 "NumOperands not set up?");
732 "Calling a function with bad signature!");
734 for (
unsigned i = 0; i != Args.size(); ++i)
737 "Calling a function with a bad signature!");
783CallInst::CallInst(
const CallInst &CI)
786 CI.getNumOperands()) {
801 Args, OpB, CI->
getName(), InsertPt);
815 Args, OpB, CI->
getName(), InsertPt);
828 auto *ProfileData =
getMetadata(LLVMContext::MD_prof);
829 if (ProfileData ==
nullptr)
832 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
833 if (!ProfDataName || (!ProfDataName->getString().equals(
"branch_weights") &&
834 !ProfDataName->getString().equals(
"VP")))
838 LLVM_DEBUG(
dbgs() <<
"Attempting to update profile weights will result in "
839 "div by 0. Ignoring. Likely the function "
841 <<
" has 0 entry count, and contains call instructions "
842 "with non-zero prof info.");
848 Vals.
push_back(ProfileData->getOperand(0));
849 APInt APS(128, S), APT(128,
T);
850 if (ProfDataName->getString().equals(
"branch_weights") &&
851 ProfileData->getNumOperands() > 0) {
853 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
860 }
else if (ProfDataName->getString().equals(
"VP"))
861 for (
unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
863 Vals.
push_back(ProfileData->getOperand(i));
865 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
870 Vals.
push_back(ProfileData->getOperand(i + 1));
874 APInt Val(128, Count);
890 const Twine &NameStr) {
895 "NumOperands not set up?");
900 "Invoking a function with bad signature");
902 for (
unsigned i = 0, e = Args.size(); i != e; i++)
905 "Invoking a function with a bad signature!");
925 II.getNumOperands()) {
962 return cast<LandingPadInst>(
getUnwindDest()->getFirstNonPHI());
973 const Twine &NameStr) {
977 ComputeNumOperands(Args.size(), IndirectDests.
size(),
979 "NumOperands not set up?");
984 "Calling a function with bad signature");
986 for (
unsigned i = 0, e = Args.size(); i != e; i++)
989 "Calling a function with a bad signature!");
994 std::copy(Args.begin(), Args.end(),
op_begin());
995 NumIndirectDests = IndirectDests.
size();
997 for (
unsigned i = 0; i != NumIndirectDests; ++i)
1011 CBI.getNumOperands()) {
1017 NumIndirectDests = CBI.NumIndirectDests;
1031 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1046 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1057 RI.getNumOperands()) {
1128 CRI.getNumOperands(),
1129 CRI.getNumOperands()) {
1130 setSubclassData<Instruction::OpaqueField>(
1137void CleanupReturnInst::init(
Value *CleanupPad,
BasicBlock *UnwindBB) {
1139 setSubclassData<UnwindDestField>(
true);
1141 Op<0>() = CleanupPad;
1146CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
1152 Values, InsertBefore) {
1153 init(CleanupPad, UnwindBB);
1156CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
1161 Values, InsertBefore) {
1162 init(CleanupPad, UnwindBB);
1165CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
1170 Values, InsertAtEnd) {
1171 init(CleanupPad, UnwindBB);
1217CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1218 unsigned NumReservedValues,
1219 const Twine &NameStr,
1224 ++NumReservedValues;
1225 init(ParentPad, UnwindDest, NumReservedValues + 1);
1229CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1230 unsigned NumReservedValues,
1231 const Twine &NameStr,
1236 ++NumReservedValues;
1237 init(ParentPad, UnwindDest, NumReservedValues + 1);
1241CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1242 unsigned NumReservedValues,
1247 ++NumReservedValues;
1248 init(ParentPad, UnwindDest, NumReservedValues + 1);
1254 CSI.getNumOperands()) {
1259 for (
unsigned I = 1,
E = ReservedSpace;
I !=
E; ++
I)
1264 unsigned NumReservedValues) {
1265 assert(ParentPad && NumReservedValues);
1267 ReservedSpace = NumReservedValues;
1271 Op<0>() = ParentPad;
1273 setSubclassData<UnwindDestField>(
true);
1280void CatchSwitchInst::growOperands(
unsigned Size) {
1282 assert(NumOperands >= 1);
1283 if (ReservedSpace >= NumOperands +
Size)
1285 ReservedSpace = (NumOperands +
Size / 2) * 2;
1292 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
1300 for (
Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1301 *CurDst = *(CurDst + 1);
1312 const Twine &NameStr) {
1322 FPI.getNumOperands(),
1323 FPI.getNumOperands()) {
1330 const Twine &NameStr,
1335 init(ParentPad, Args, NameStr);
1344 init(ParentPad, Args, NameStr);
1353 init(ParentPad, Args, NameStr);
1376void BranchInst::AssertOK() {
1379 "May only branch on boolean predicates!");
1386 assert(IfTrue &&
"Branch destination may not be null!");
1394 assert(IfTrue &&
"Branch destination may not be null!");
1429 assert(IfTrue &&
"Branch destination may not be null!");
1449 BI.getNumOperands()) {
1453 Op<-3>() = BI.
Op<-3>();
1454 Op<-2>() = BI.
Op<-2>();
1456 Op<-1>() = BI.
Op<-1>();
1462 "Cannot swap successors of an unconditional branch");
1478 assert(!isa<BasicBlock>(Amt) &&
1479 "Passed basic block into allocation size parameter! Use other ctor");
1481 "Allocation array size is not an integer!");
1487 assert(BB &&
"Insertion BB cannot be null when alignment not provided!");
1489 "BB must be in a Function when alignment not provided!");
1491 return DL.getPrefTypeAlign(Ty);
1499 assert(
I &&
"Insertion position cannot be null when alignment not provided!");
1537 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1548 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1558 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1568 return !CI->isOne();
1588void LoadInst::AssertOK() {
1590 "Ptr must have pointer type.");
1594 assert(BB &&
"Insertion BB cannot be null when alignment not provided!");
1596 "BB must be in a Function when alignment not provided!");
1598 return DL.getABITypeAlign(Ty);
1606 assert(
I &&
"Insertion position cannot be null when alignment not provided!");
1640 SyncScope::System, InsertBef) {}
1645 SyncScope::System, InsertBef) {}
1650 SyncScope::System, InsertAE) {}
1689void StoreInst::AssertOK() {
1692 "Ptr must have pointer type!");
1725 SyncScope::System, InsertBefore) {}
1730 SyncScope::System, InsertAtEnd) {}
1735 SyncScope::System, InsertBefore) {}
1776 insertBefore(*InsertBefore->getParent(), InsertBefore);
1797 "All operands must be non-null!");
1799 "Ptr must have pointer type!");
1801 "Cmp type and NewVal type must be same!");
1814 Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1827 Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1840 Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1851 "atomicrmw instructions can only be atomic.");
1853 "atomicrmw instructions cannot be unordered.");
1862 "All operands must be non-null!");
1864 "Ptr must have pointer type!");
1866 "AtomicRMW instructions must be atomic!");
1934 return "<invalid operation>";
1974 "NumOperands not initialized?");
1983 GEPI.getNumOperands(),
1984 GEPI.getNumOperands()),
1985 SourceElementType(GEPI.SourceElementType),
1986 ResultElementType(GEPI.ResultElementType) {
1992 if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1997 if (!
Idx->getType()->isIntOrIntVectorTy())
1999 if (
auto *Array = dyn_cast<ArrayType>(Ty))
2000 return Array->getElementType();
2001 if (
auto *
Vector = dyn_cast<VectorType>(Ty))
2002 return Vector->getElementType();
2007 if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
2012 if (
auto *Array = dyn_cast<ArrayType>(Ty))
2013 return Array->getElementType();
2014 if (
auto *
Vector = dyn_cast<VectorType>(Ty))
2015 return Vector->getElementType();
2019template <
typename IndexTy>
2021 if (IdxList.
empty())
2023 for (IndexTy V : IdxList.
slice(1)) {
2050 if (!CI->isZero())
return false;
2070 cast<GEPOperator>(
this)->setIsInBounds(
B);
2074 return cast<GEPOperator>(
this)->isInBounds();
2080 return cast<GEPOperator>(
this)->accumulateConstantOffset(
DL,
Offset);
2086 APInt &ConstantOffset)
const {
2088 return cast<GEPOperator>(
this)->collectOffset(
DL,
BitWidth, VariableOffsets,
2103 "Invalid extractelement instruction operands!");
2117 "Invalid extractelement instruction operands!");
2131 "Invalid extractelement instruction operands!");
2155 "Invalid insertelement instruction operands!");
2169 "Invalid insertelement instruction operands!");
2183 "Invalid insertelement instruction operands!");
2196 if (Elt->
getType() != cast<VectorType>(Vec->
getType())->getElementType())
2199 if (!
Index->getType()->isIntegerTy())
2209 assert(V &&
"Cannot create placeholder of nullptr V");
2254 "Invalid shuffle vector instruction operands!");
2273 "Invalid shuffle vector instruction operands!");
2291 "Invalid shuffle vector instruction operands!");
2310 "Invalid shuffle vector instruction operands!");
2326 "Invalid shuffle vector instruction operands!");
2341 "Invalid shuffle vector instruction operands!");
2350 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2351 int NumMaskElts = ShuffleMask.
size();
2353 for (
int i = 0; i != NumMaskElts; ++i) {
2359 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts &&
"Out-of-range mask");
2360 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2361 NewMask[i] = MaskElt;
2370 if (!isa<VectorType>(V1->
getType()) || V1->
getType() != V2->getType())
2375 cast<VectorType>(V1->
getType())->getElementCount().getKnownMinValue();
2376 for (
int Elem : Mask)
2380 if (isa<ScalableVectorType>(V1->
getType()))
2388 const Value *Mask) {
2395 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2396 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2397 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->
getType()))
2401 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2404 if (
const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2405 unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
2406 for (
Value *
Op : MV->operands()) {
2407 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
2408 if (CI->uge(V1Size*2))
2410 }
else if (!isa<UndefValue>(
Op)) {
2417 if (
const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2418 unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
2419 for (
unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2421 if (CDS->getElementAsInteger(i) >= V1Size*2)
2431 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2433 if (isa<ConstantAggregateZero>(Mask)) {
2434 Result.resize(EC.getKnownMinValue(), 0);
2438 Result.reserve(EC.getKnownMinValue());
2440 if (EC.isScalable()) {
2441 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2442 "Scalable vector shuffle mask must be undef or zeroinitializer");
2443 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2444 for (
unsigned I = 0;
I < EC.getKnownMinValue(); ++
I)
2445 Result.emplace_back(MaskVal);
2449 unsigned NumElts = EC.getKnownMinValue();
2451 if (
auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2452 for (
unsigned i = 0; i != NumElts; ++i)
2453 Result.push_back(CDS->getElementAsInteger(i));
2456 for (
unsigned i = 0; i != NumElts; ++i) {
2457 Constant *
C = Mask->getAggregateElement(i);
2458 Result.push_back(isa<UndefValue>(
C) ? -1 :
2459 cast<ConstantInt>(
C)->getZExtValue());
2464 ShuffleMask.
assign(Mask.begin(), Mask.end());
2471 if (isa<ScalableVectorType>(ResultTy)) {
2479 for (
int Elem : Mask) {
2489 assert(!Mask.empty() &&
"Shuffle mask must contain elements");
2490 bool UsesLHS =
false;
2491 bool UsesRHS =
false;
2492 for (
int I : Mask) {
2495 assert(
I >= 0 &&
I < (NumOpElts * 2) &&
2496 "Out-of-bounds shuffle mask element");
2497 UsesLHS |= (
I < NumOpElts);
2498 UsesRHS |= (
I >= NumOpElts);
2499 if (UsesLHS && UsesRHS)
2503 return UsesLHS || UsesRHS;
2515 for (
int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2518 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2525 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
2533 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
2542 for (
int I = 0,
E = Mask.size();
I <
E; ++
I) {
2545 if (Mask[
I] != (NumSrcElts - 1 -
I) &&
2546 Mask[
I] != (NumSrcElts + NumSrcElts - 1 -
I))
2553 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
2557 for (
int I = 0,
E = Mask.size();
I <
E; ++
I) {
2560 if (Mask[
I] != 0 && Mask[
I] != NumSrcElts)
2567 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
2572 for (
int I = 0,
E = Mask.size();
I <
E; ++
I) {
2575 if (Mask[
I] !=
I && Mask[
I] != (NumSrcElts +
I))
2588 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
2591 int Sz = Mask.size();
2596 if (Mask[0] != 0 && Mask[0] != 1)
2601 if ((Mask[1] - Mask[0]) != NumSrcElts)
2606 for (
int I = 2;
I < Sz; ++
I) {
2607 int MaskEltVal = Mask[
I];
2608 if (MaskEltVal == -1)
2610 int MaskEltPrevVal = Mask[
I - 2];
2611 if (MaskEltVal - MaskEltPrevVal != 2)
2619 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
2622 int StartIndex = -1;
2623 for (
int I = 0,
E = Mask.size();
I !=
E; ++
I) {
2624 int MaskEltVal = Mask[
I];
2625 if (MaskEltVal == -1)
2628 if (StartIndex == -1) {
2631 if (MaskEltVal <
I || NumSrcElts <= (MaskEltVal -
I))
2634 StartIndex = MaskEltVal -
I;
2639 if (MaskEltVal != (StartIndex +
I))
2643 if (StartIndex == -1)
2652 int NumSrcElts,
int &
Index) {
2658 if (NumSrcElts <= (
int)Mask.size())
2663 for (
int i = 0, e = Mask.size(); i != e; ++i) {
2667 int Offset = (M % NumSrcElts) - i;
2668 if (0 <= SubIndex && SubIndex !=
Offset)
2673 if (0 <= SubIndex && SubIndex + (
int)Mask.size() <= NumSrcElts) {
2681 int NumSrcElts,
int &NumSubElts,
2683 int NumMaskElts = Mask.size();
2686 if (NumMaskElts < NumSrcElts)
2697 bool Src0Identity =
true;
2698 bool Src1Identity =
true;
2700 for (
int i = 0; i != NumMaskElts; ++i) {
2706 if (M < NumSrcElts) {
2708 Src0Identity &= (M == i);
2712 Src1Identity &= (M == (i + NumSrcElts));
2714 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2715 "unknown shuffle elements");
2717 "2-source shuffle not found");
2723 int Src0Hi = NumMaskElts - Src0Elts.
countl_zero();
2724 int Src1Hi = NumMaskElts - Src1Elts.
countl_zero();
2729 int NumSub1Elts = Src1Hi - Src1Lo;
2732 NumSubElts = NumSub1Elts;
2741 int NumSub0Elts = Src0Hi - Src0Lo;
2744 NumSubElts = NumSub0Elts;
2756 if (isa<ScalableVectorType>(
getType()))
2759 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2760 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2761 if (NumMaskElts <= NumOpElts)
2770 for (
int i = NumOpElts; i < NumMaskElts; ++i)
2780 if (isa<ScalableVectorType>(
getType()))
2783 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2784 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2785 if (NumMaskElts >= NumOpElts)
2793 if (isa<UndefValue>(
Op<0>()) || isa<UndefValue>(
Op<1>()))
2798 if (isa<ScalableVectorType>(
getType()))
2801 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2802 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2803 if (NumMaskElts != NumOpElts * 2)
2814 int ReplicationFactor,
int VF) {
2815 assert(Mask.size() == (
unsigned)ReplicationFactor * VF &&
2816 "Unexpected mask size.");
2818 for (
int CurrElt :
seq(VF)) {
2819 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2820 assert(CurrSubMask.
size() == (
unsigned)ReplicationFactor &&
2821 "Run out of mask?");
2822 Mask = Mask.drop_front(ReplicationFactor);
2823 if (!
all_of(CurrSubMask, [CurrElt](
int MaskElt) {
2828 assert(Mask.empty() &&
"Did not consume the whole mask?");
2834 int &ReplicationFactor,
int &VF) {
2838 Mask.take_while([](
int MaskElt) {
return MaskElt == 0; }).
size();
2839 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2841 VF = Mask.size() / ReplicationFactor;
2853 for (
int MaskElt : Mask) {
2857 if (MaskElt < Largest)
2859 Largest = std::max(Largest, MaskElt);
2863 for (
int PossibleReplicationFactor :
2864 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2865 if (Mask.size() % PossibleReplicationFactor != 0)
2867 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2871 ReplicationFactor = PossibleReplicationFactor;
2883 if (isa<ScalableVectorType>(
getType()))
2886 VF = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2887 if (ShuffleMask.
size() % VF != 0)
2889 ReplicationFactor = ShuffleMask.
size() / VF;
2895 if (VF <= 0 || Mask.size() <
static_cast<unsigned>(VF) ||
2896 Mask.size() % VF != 0)
2898 for (
unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2903 for (
int Idx : SubMask) {
2917 if (isa<ScalableVectorType>(
getType()))
2939 unsigned NumElts = Mask.size();
2940 if (NumElts % Factor)
2943 unsigned LaneLen = NumElts / Factor;
2947 StartIndexes.
resize(Factor);
2953 for (;
I < Factor;
I++) {
2954 unsigned SavedLaneValue;
2955 unsigned SavedNoUndefs = 0;
2958 for (J = 0; J < LaneLen - 1; J++) {
2960 unsigned Lane = J * Factor +
I;
2961 unsigned NextLane = Lane + Factor;
2962 int LaneValue = Mask[Lane];
2963 int NextLaneValue = Mask[NextLane];
2966 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2967 LaneValue + 1 != NextLaneValue)
2971 if (LaneValue >= 0 && NextLaneValue < 0) {
2972 SavedLaneValue = LaneValue;
2981 if (SavedNoUndefs > 0 && LaneValue < 0) {
2983 if (NextLaneValue >= 0 &&
2984 SavedLaneValue + SavedNoUndefs != (
unsigned)NextLaneValue)
2989 if (J < LaneLen - 1)
2995 StartMask = Mask[
I];
2996 }
else if (Mask[(LaneLen - 1) * Factor +
I] >= 0) {
2998 StartMask = Mask[(LaneLen - 1) * Factor +
I] - J;
2999 }
else if (SavedNoUndefs > 0) {
3001 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
3008 if (StartMask + LaneLen > NumInputElts)
3011 StartIndexes[
I] = StartMask;
3022 int NumElts = Mask.size();
3023 assert((NumElts % NumSubElts) == 0 &&
"Illegal shuffle mask");
3026 for (
int i = 0; i != NumElts; i += NumSubElts) {
3027 for (
int j = 0; j != NumSubElts; ++j) {
3028 int M = Mask[i + j];
3031 if (M < i || M >= i + NumSubElts)
3033 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
3034 if (0 <= RotateAmt &&
Offset != RotateAmt)
3043 ArrayRef<int> Mask,
unsigned EltSizeInBits,
unsigned MinSubElts,
3044 unsigned MaxSubElts,
unsigned &NumSubElts,
unsigned &RotateAmt) {
3045 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
3047 if (EltRotateAmt < 0)
3049 RotateAmt = EltRotateAmt * EltSizeInBits;
3068 assert(!Idxs.
empty() &&
"InsertValueInst must have at least one index");
3071 Val->
getType() &&
"Inserted value must match indexed type!");
3082 Indices(IVI.Indices) {
3097 assert(!Idxs.
empty() &&
"ExtractValueInst must have at least one index");
3105 Indices(EVI.Indices) {
3117 for (
unsigned Index : Idxs) {
3124 if (
ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
3125 if (
Index >= AT->getNumElements())
3127 Agg = AT->getElementType();
3128 }
else if (
StructType *ST = dyn_cast<StructType>(Agg)) {
3129 if (
Index >= ST->getNumElements())
3131 Agg = ST->getElementType(
Index);
3137 return const_cast<Type*
>(Agg);
3190void UnaryOperator::AssertOK() {
3197 "Unary operation should return same type as operand!");
3199 "Tried to create a floating-point operation on a "
3200 "non-floating-point type!");
3248void BinaryOperator::AssertOK() {
3250 (void)LHS; (void)RHS;
3252 "Binary operator operand types must match!");
3258 "Arithmetic operation should return same type as operands!");
3260 "Tried to create an integer operation on a non-integer type!");
3262 case FAdd:
case FSub:
3265 "Arithmetic operation should return same type as operands!");
3267 "Tried to create a floating-point operation on a "
3268 "non-floating-point type!");
3273 "Arithmetic operation should return same type as operands!");
3275 "Incorrect operand type (not integer) for S/UDIV");
3279 "Arithmetic operation should return same type as operands!");
3281 "Incorrect operand type (not floating point) for FDIV");
3286 "Arithmetic operation should return same type as operands!");
3288 "Incorrect operand type (not integer) for S/UREM");
3292 "Arithmetic operation should return same type as operands!");
3294 "Incorrect operand type (not floating point) for FREM");
3300 "Shift operation should return same type as operands!");
3302 "Tried to create a shift operation on a non-integral type!");
3307 "Logical operation should return same type as operands!");
3309 "Tried to create a logical operation on a non-integral type!");
3320 "Cannot create binary operator with two operands of differing type!");
3328 "Cannot create binary operator with two operands of differing type!");
3342 Value *Zero = ConstantInt::get(
Op->getType(), 0);
3349 Value *Zero = ConstantInt::get(
Op->getType(), 0);
3352 Op->getType(),
Name, InsertAtEnd);
3357 Value *Zero = ConstantInt::get(
Op->getType(), 0);
3358 return BinaryOperator::CreateNSWSub(Zero,
Op,
Name, InsertBefore);
3363 Value *Zero = ConstantInt::get(
Op->getType(), 0);
3364 return BinaryOperator::CreateNSWSub(Zero,
Op,
Name, InsertAtEnd);
3369 Value *Zero = ConstantInt::get(
Op->getType(), 0);
3370 return BinaryOperator::CreateNUWSub(Zero,
Op,
Name, InsertBefore);
3375 Value *Zero = ConstantInt::get(
Op->getType(), 0);
3376 return BinaryOperator::CreateNUWSub(Zero,
Op,
Name, InsertAtEnd);
3383 Op->getType(),
Name, InsertBefore);
3390 Op->getType(),
Name, InsertBefore);
3397 Op->getType(),
Name, InsertAtEnd);
3417 cast<Instruction>(
this)->getMetadata(LLVMContext::MD_fpmath);
3431 default:
return false;
3432 case Instruction::ZExt:
3433 case Instruction::SExt:
3434 case Instruction::Trunc:
3436 case Instruction::BitCast:
3457 case Instruction::Trunc:
3458 case Instruction::ZExt:
3459 case Instruction::SExt:
3460 case Instruction::FPTrunc:
3461 case Instruction::FPExt:
3462 case Instruction::UIToFP:
3463 case Instruction::SIToFP:
3464 case Instruction::FPToUI:
3465 case Instruction::FPToSI:
3466 case Instruction::AddrSpaceCast:
3469 case Instruction::BitCast:
3471 case Instruction::PtrToInt:
3472 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3474 case Instruction::IntToPtr:
3475 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3495 Type *DstIntPtrTy) {
3526 const unsigned numCastOps =
3527 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3528 static const uint8_t CastResults[numCastOps][numCastOps] = {
3534 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
3535 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0},
3536 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0},
3537 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
3538 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
3539 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
3540 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
3541 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
3542 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0},
3543 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0},
3544 { 99,99,99,99,99,99,99,99,99,11,99,15, 0},
3545 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14},
3546 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12},
3553 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3554 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3555 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3558 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3559 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3560 if (!AreBothBitcasts)
3563 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3564 [secondOp-Instruction::CastOpsBegin];
3609 return Instruction::BitCast;
3612 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3615 if (MidSize >= PtrSize)
3616 return Instruction::BitCast;
3626 return Instruction::BitCast;
3627 if (SrcSize < DstSize)
3629 if (SrcSize > DstSize)
3635 return Instruction::ZExt;
3643 if (SrcSize <= PtrSize && SrcSize == DstSize)
3644 return Instruction::BitCast;
3651 return Instruction::AddrSpaceCast;
3652 return Instruction::BitCast;
3663 "Illegal addrspacecast, bitcast sequence!");
3668 return Instruction::AddrSpaceCast;
3678 "Illegal inttoptr, bitcast sequence!");
3690 "Illegal bitcast, ptrtoint sequence!");
3695 return Instruction::UIToFP;
3711 case Trunc:
return new TruncInst (S, Ty,
Name, InsertBefore);
3712 case ZExt:
return new ZExtInst (S, Ty,
Name, InsertBefore);
3713 case SExt:
return new SExtInst (S, Ty,
Name, InsertBefore);
3715 case FPExt:
return new FPExtInst (S, Ty,
Name, InsertBefore);
3733 case Trunc:
return new TruncInst (S, Ty,
Name, InsertBefore);
3734 case ZExt:
return new ZExtInst (S, Ty,
Name, InsertBefore);
3735 case SExt:
return new SExtInst (S, Ty,
Name, InsertBefore);
3737 case FPExt:
return new FPExtInst (S, Ty,
Name, InsertBefore);
3755 case Trunc:
return new TruncInst (S, Ty,
Name, InsertAtEnd);
3756 case ZExt:
return new ZExtInst (S, Ty,
Name, InsertAtEnd);
3757 case SExt:
return new SExtInst (S, Ty,
Name, InsertAtEnd);
3759 case FPExt:
return new FPExtInst (S, Ty,
Name, InsertAtEnd);
3775 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3776 return Create(Instruction::ZExt, S, Ty,
Name, InsertBefore);
3783 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3784 return Create(Instruction::ZExt, S, Ty,
Name, InsertBefore);
3791 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3792 return Create(Instruction::ZExt, S, Ty,
Name, InsertAtEnd);
3798 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3799 return Create(Instruction::SExt, S, Ty,
Name, InsertBefore);
3806 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3807 return Create(Instruction::SExt, S, Ty,
Name, InsertBefore);
3814 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3815 return Create(Instruction::SExt, S, Ty,
Name, InsertAtEnd);
3821 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3822 return Create(Instruction::Trunc, S, Ty,
Name, InsertBefore);
3829 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3830 return Create(Instruction::Trunc, S, Ty,
Name, InsertBefore);
3837 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3838 return Create(Instruction::Trunc, S, Ty,
Name, InsertAtEnd);
3849 cast<VectorType>(Ty)->getElementCount() ==
3850 cast<VectorType>(S->
getType())->getElementCount()) &&
3854 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertAtEnd);
3867 cast<VectorType>(Ty)->getElementCount() ==
3868 cast<VectorType>(S->
getType())->getElementCount()) &&
3872 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3885 cast<VectorType>(Ty)->getElementCount() ==
3886 cast<VectorType>(S->
getType())->getElementCount()) &&
3890 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3903 return Create(Instruction::AddrSpaceCast, S, Ty,
Name, InsertAtEnd);
3905 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3914 return Create(Instruction::AddrSpaceCast, S, Ty,
Name, InsertBefore);
3916 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3925 return Create(Instruction::AddrSpaceCast, S, Ty,
Name, InsertBefore);
3927 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3934 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3936 return Create(Instruction::IntToPtr, S, Ty,
Name, InsertBefore);
3938 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3945 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3947 return Create(Instruction::IntToPtr, S, Ty,
Name, InsertBefore);
3949 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3956 "Invalid integer cast");
3957 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3960 (SrcBits == DstBits ? Instruction::BitCast :
3961 (SrcBits > DstBits ? Instruction::Trunc :
3962 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3970 "Invalid integer cast");
3971 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3974 (SrcBits == DstBits ? Instruction::BitCast :
3975 (SrcBits > DstBits ? Instruction::Trunc :
3976 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3985 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3988 (SrcBits == DstBits ? Instruction::BitCast :
3989 (SrcBits > DstBits ? Instruction::Trunc :
3990 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3998 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
4001 (SrcBits == DstBits ? Instruction::BitCast :
4002 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4011 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
4013 assert((
C->getType() == Ty || SrcBits != DstBits) &&
"Invalid cast");
4015 (SrcBits == DstBits ? Instruction::BitCast :
4016 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4025 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
4028 (SrcBits == DstBits ? Instruction::BitCast :
4029 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4037 if (SrcTy == DestTy)
4040 if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
4041 if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
4042 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4044 SrcTy = SrcVecTy->getElementType();
4045 DestTy = DestVecTy->getElementType();
4050 if (
PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
4051 if (
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
4052 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
4064 if (SrcBits != DestBits)
4076 if (
auto *PtrTy = dyn_cast<PointerType>(SrcTy))
4077 if (
auto *IntTy = dyn_cast<IntegerType>(DestTy))
4078 return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
4079 !
DL.isNonIntegralPointerType(PtrTy));
4080 if (
auto *PtrTy = dyn_cast<PointerType>(DestTy))
4081 if (
auto *IntTy = dyn_cast<IntegerType>(SrcTy))
4082 return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
4083 !
DL.isNonIntegralPointerType(PtrTy));
4096 const Value *Src,
bool SrcIsSigned,
Type *DestTy,
bool DestIsSigned) {
4097 Type *SrcTy = Src->getType();
4100 "Only first class types are castable!");
4102 if (SrcTy == DestTy)
4106 if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
4107 if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
4108 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4111 SrcTy = SrcVecTy->getElementType();
4112 DestTy = DestVecTy->getElementType();
4122 if (DestBits < SrcBits)
4124 else if (DestBits > SrcBits) {
4138 assert(DestBits == SrcBits &&
4139 "Casting vector to integer of different width");
4143 "Casting from a value that is not first-class type");
4153 if (DestBits < SrcBits) {
4155 }
else if (DestBits > SrcBits) {
4161 assert(DestBits == SrcBits &&
4162 "Casting vector to floating point of different width");
4167 assert(DestBits == SrcBits &&
4168 "Illegal cast to vector (wrong type or size)");
4173 return AddrSpaceCast;
4181 assert(DestBits == SrcBits &&
"Casting vector of wrong width to X86_MMX");
4205 bool SrcIsVec = isa<VectorType>(SrcTy);
4206 bool DstIsVec = isa<VectorType>(DstTy);
4213 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
4215 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
4220 default:
return false;
4221 case Instruction::Trunc:
4223 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4224 case Instruction::ZExt:
4226 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4227 case Instruction::SExt:
4229 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4230 case Instruction::FPTrunc:
4232 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4233 case Instruction::FPExt:
4235 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4236 case Instruction::UIToFP:
4237 case Instruction::SIToFP:
4240 case Instruction::FPToUI:
4241 case Instruction::FPToSI:
4244 case Instruction::PtrToInt:
4248 case Instruction::IntToPtr:
4252 case Instruction::BitCast: {
4258 if (!SrcPtrTy != !DstPtrTy)
4271 if (SrcIsVec && DstIsVec)
4272 return SrcEC == DstEC;
4280 case Instruction::AddrSpaceCast: {
4292 return SrcEC == DstEC;
4521) :
CastInst(Ty, AddrSpaceCast, S,
Name, InsertBefore) {
4578 if (
Op == Instruction::ICmp) {
4590 if (
Op == Instruction::ICmp) {
4610 if (
Op == Instruction::ICmp) {
4619 if (
ICmpInst *IC = dyn_cast<ICmpInst>(
this))
4622 cast<FCmpInst>(
this)->swapOperands();
4626 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(
this))
4627 return IC->isCommutative();
4628 return cast<FCmpInst>(
this)->isCommutative();
4674 default:
return "unknown";
4889 switch (predicate) {
4890 default:
return false;
4897 switch (predicate) {
4898 default:
return false;
4976 "Call only with non-equality predicates!");
4987 switch (predicate) {
4988 default:
return false;
4996 switch (predicate) {
4997 default:
return false;
5006 default:
return false;
5016 default:
return false;
5055 ReservedSpace = NumReserved;
5070 nullptr, 0, InsertBefore) {
5081 nullptr, 0, InsertBefore) {
5092 nullptr, 0, InsertAtEnd) {
5098 init(
SI.getCondition(),
SI.getDefaultDest(),
SI.getNumOperands());
5101 const Use *InOL =
SI.getOperandList();
5102 for (
unsigned i = 2,
E =
SI.getNumOperands(); i !=
E; i += 2) {
5104 OL[i+1] = InOL[i+1];
5114 if (OpNo+2 > ReservedSpace)
5117 assert(OpNo+1 < ReservedSpace &&
"Growing didn't work!");
5127 unsigned idx =
I->getCaseIndex();
5135 if (2 + (idx + 1) * 2 != NumOps) {
5136 OL[2 + idx * 2] = OL[NumOps - 2];
5137 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
5141 OL[NumOps-2].
set(
nullptr);
5142 OL[NumOps-2+1].
set(
nullptr);
5145 return CaseIt(
this, idx);
5151void SwitchInst::growOperands() {
5153 unsigned NumOps = e*3;
5155 ReservedSpace = NumOps;
5160 assert(Changed &&
"called only if metadata has changed");
5165 assert(SI.getNumSuccessors() == Weights->size() &&
5166 "num of prof branch_weights must accord with num of successors");
5168 bool AllZeroes =
all_of(*Weights, [](
uint32_t W) {
return W == 0; });
5170 if (AllZeroes || Weights->size() < 2)
5181 if (ProfileData->
getNumOperands() != SI.getNumSuccessors() + 1) {
5183 "not correspond to number of succesors");
5189 this->Weights = std::move(Weights);
5195 assert(SI.getNumSuccessors() == Weights->size() &&
5196 "num of prof branch_weights must accord with num of successors");
5201 (*Weights)[
I->getCaseIndex() + 1] = Weights->back();
5202 Weights->pop_back();
5204 return SI.removeCase(
I);
5210 SI.addCase(OnVal, Dest);
5212 if (!Weights && W && *W) {
5215 (*Weights)[SI.getNumSuccessors() - 1] = *W;
5216 }
else if (Weights) {
5218 Weights->push_back(W.value_or(0));
5221 assert(SI.getNumSuccessors() == Weights->size() &&
5222 "num of prof branch_weights must accord with num of successors");
5231 return SI.eraseFromParent();
5237 return std::nullopt;
5238 return (*Weights)[idx];
5250 auto &OldW = (*Weights)[idx];
5262 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
5263 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
5267 return std::nullopt;
5274void IndirectBrInst::init(
Value *
Address,
unsigned NumDests) {
5276 "Address of indirectbr must be a pointer");
5277 ReservedSpace = 1+NumDests;
5288void IndirectBrInst::growOperands() {
5290 unsigned NumOps = e*2;
5292 ReservedSpace = NumOps;
5296IndirectBrInst::IndirectBrInst(
Value *
Address,
unsigned NumCases,
5299 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5303IndirectBrInst::IndirectBrInst(
Value *
Address,
unsigned NumCases,
5306 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5310IndirectBrInst::IndirectBrInst(
Value *
Address,
unsigned NumCases,
5313 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
5319 nullptr, IBI.getNumOperands()) {
5321 Use *OL = getOperandList();
5332 if (OpNo+1 > ReservedSpace)
5335 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
5349 OL[idx+1] = OL[NumOps-1];
5352 OL[NumOps-1].
set(
nullptr);
5436 Result->setWeak(
isWeak());
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isSigned(unsigned int Opcode)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB)
Module.h This file contains the declarations for the Module class.
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
float convertToFloat() const
Converts this APFloat to host float value.
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
AllocaInst * cloneImpl() const
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, BasicBlock::iterator InsertBefore)
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
An instruction that atomically checks whether a specified value is in a memory location,...
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
void setOperation(BinOp Operation)
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
LLVM Basic Block Representation.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOps getOpcode() const
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNUWNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
void setCallingConv(CallingConv::ID CC)
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
void setOnlyReadsMemory()
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, BasicBlock::iterator InsertPt)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
Value * getCalledOperand() const
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
This is the base class for all instructions that perform data casts.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast or an AddrSpaceCast cast instruction.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a Trunc or BitCast cast instruction.
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a SExt or BitCast cast instruction.
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isEquality() const
Determine if this is an equals/not equals predicate.
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a compare instruction, given the opcode, the predicate and the two operands.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getUnsignedPredicate()
For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
bool isNonStrictPredicate() const
bool isFPPredicate() const
void swapOperands()
This is just a convenience that dispatches to the subclasses.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name, BasicBlock::iterator InsertBefore, Instruction *FlagsSource=nullptr)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
bool isStrictPredicate() const
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
bool isIntPredicate() const
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getFixed(ScalarTy MinVal)
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
This class represents a cast from floating point to unsigned integer.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr, BasicBlock::iterator InsertBefore)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
InstListType::iterator insertInto(BasicBlock *ParentBB, InstListType::iterator It)
Inserts an unlinked instruction into ParentBB at position It and returns the iterator of the inserted...
This class represents a cast from an integer to a pointer.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
BasicBlock * getNormalDest() const
This is an important class for using LLVM in a threaded context.
LLVMContextImpl *const pImpl
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
An instruction for reading from memory.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock::iterator InsertBefore)
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
ConstantAsMetadata * createConstant(Constant *C)
Return the given constant as metadata.
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
This class implements a map that also provides access to all stored values in a deterministic order.
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
bool doesNotAccessMemory() const
Whether this function accesses no memory.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
This class represents the LLVM 'select' instruction.
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
MDNode * buildProfBranchWeightsMD()
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
bool isAggregateType() const
Return true if the type is an aggregate type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryOperator * cloneImpl() const
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a unary instruction, given the opcode and an operand.
UnaryOps getOpcode() const
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
const Use * getOperandList() const
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
unsigned getPointerAddressSpace(const Type *T)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
@ Default
The result values are uniform if and only if all operands are uniform.
const uint64_t NOMORE_ICP_MAGICNUM
Magic number in the value profile metadata showing a target has been promoted for the instruction and...
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Describes an element of a Bitfield.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Compile-time customization of User operands.