54 "disable-i2p-p2i-opt",
cl::init(
false),
55 cl::desc(
"Disables inttoptr/ptrtoint roundtrip optimization"));
61std::optional<TypeSize>
68 assert(!
Size.isScalable() &&
"Array elements cannot have a scalable size");
78std::optional<TypeSize>
98 return "both values to select must have same type";
101 return "select values cannot have token type";
106 return "vector select condition element type must be i1";
109 return "selected values for vector select must be vectors";
111 return "vector select requires selected vectors to have "
112 "the same vector length as select condition";
114 return "select condition must be i1 or <n x i1>";
123PHINode::PHINode(
const PHINode &PN)
125 ReservedSpace(PN.getNumOperands()) {
146 Op<-1>().set(
nullptr);
159 bool DeletePHIIfEmpty) {
165 if (RemoveIndices.
empty())
170 return RemoveIndices.
contains(U.getOperandNo());
195void PHINode::growOperands() {
197 unsigned NumOps = e + e / 2;
198 if (NumOps < 2) NumOps = 2;
200 ReservedSpace = NumOps;
211 if (ConstantValue !=
this)
216 if (ConstantValue ==
this)
218 return ConstantValue;
227 Value *ConstantValue =
nullptr;
231 if (ConstantValue && ConstantValue !=
Incoming)
243LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
244 const Twine &NameStr,
247 init(NumReservedValues, NameStr);
252 LP.getNumOperands()),
253 ReservedSpace(LP.getNumOperands()) {
257 for (
unsigned I = 0, E = ReservedSpace;
I != E; ++
I)
264 const Twine &NameStr,
269void LandingPadInst::init(
unsigned NumReservedValues,
const Twine &NameStr) {
270 ReservedSpace = NumReservedValues;
279void LandingPadInst::growOperands(
unsigned Size) {
281 if (ReservedSpace >= e +
Size)
return;
282 ReservedSpace = (std::max(e, 1U) +
Size / 2) * 2;
289 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
301 case Instruction::Call:
303 case Instruction::Invoke:
305 case Instruction::CallBr:
317 if (ChildOB.getTagName() != OpB.
getTag())
328 return cast<CallBrInst>(
this)->getNumIndirectDests() + 1;
333 if (isa<Function>(V) || isa<Constant>(V))
341 if (
auto *CI = dyn_cast<CallInst>(
this))
342 return CI->isMustTailCall();
348 if (
auto *CI = dyn_cast<CallInst>(
this))
349 return CI->isTailCall();
355 return F->getIntrinsicID();
363 Mask |=
F->getAttributes().getRetNoFPClass();
371 Mask |=
F->getAttributes().getParamNoFPClass(i);
399 if (
F->getAttributes().hasAttrSomewhere(Kind, &
Index))
416 if (!
F->getAttributes().hasParamAttr(ArgNo, Kind))
421 case Attribute::ReadNone:
423 case Attribute::ReadOnly:
425 case Attribute::WriteOnly:
434 return F->getAttributes().hasFnAttr(Kind);
439bool CallBase::hasFnAttrOnCalledFunction(
StringRef Kind)
const {
441 return F->getAttributes().hasFnAttr(Kind);
446template <
typename AK>
447Attribute CallBase::getFnAttrOnCalledFunction(AK Kind)
const {
448 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
451 assert(Kind != Attribute::Memory &&
"Use getMemoryEffects() instead");
455 return F->getAttributes().getFnAttr(Kind);
464template <
typename AK>
465Attribute CallBase::getParamAttrOnCalledFunction(
unsigned ArgNo,
469 if (
auto *
F = dyn_cast<Function>(V))
470 return F->getAttributes().getParamAttr(ArgNo, Kind);
475CallBase::getParamAttrOnCalledFunction(
unsigned ArgNo,
477template Attribute CallBase::getParamAttrOnCalledFunction(
unsigned ArgNo,
488 const unsigned BeginIndex) {
490 for (
auto &
B : Bundles)
491 It = std::copy(
B.input_begin(),
B.input_end(), It);
494 auto BI = Bundles.
begin();
495 unsigned CurrentIndex = BeginIndex;
498 assert(BI != Bundles.
end() &&
"Incorrect allocation?");
500 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
501 BOI.Begin = CurrentIndex;
502 BOI.End = CurrentIndex + BI->input_size();
503 CurrentIndex = BOI.End;
507 assert(BI == Bundles.
end() &&
"Incorrect allocation?");
518 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
524 assert(OpIdx >=
arg_size() &&
"the Idx is not in the operand bundles");
527 "The Idx isn't in the operand bundle");
531 constexpr unsigned NumberScaling = 1024;
537 while (Begin !=
End) {
538 unsigned ScaledOperandPerBundle =
539 NumberScaling * (std::prev(
End)->End - Begin->
Begin) / (
End - Begin);
540 Current = Begin + (((OpIdx - Begin->
Begin) * NumberScaling) /
541 ScaledOperandPerBundle);
543 Current = std::prev(
End);
544 assert(Current < End && Current >= Begin &&
545 "the operand bundle doesn't cover every value in the range");
546 if (OpIdx >= Current->
Begin && OpIdx < Current->
End)
548 if (OpIdx >= Current->
End)
555 "the operand bundle doesn't cover every value in the range");
568 return Create(CB, Bundles, InsertPt);
574 bool CreateNew =
false;
578 if (Bundle.getTagID() ==
ID) {
585 return CreateNew ?
Create(CB, Bundles, InsertPt) : CB;
683 "NumOperands not set up?");
688 "Calling a function with bad signature!");
690 for (
unsigned i = 0; i != Args.size(); ++i)
693 "Calling a function with a bad signature!");
725CallInst::CallInst(
const CallInst &CI)
728 CI.getNumOperands()) {
743 Args, OpB, CI->
getName(), InsertPt);
757 LLVM_DEBUG(
dbgs() <<
"Attempting to update profile weights will result in "
758 "div by 0. Ignoring. Likely the function "
760 <<
" has 0 entry count, and contains call instructions "
761 "with non-zero prof info.");
774 const Twine &NameStr) {
779 "NumOperands not set up?");
784 "Invoking a function with bad signature");
786 for (
unsigned i = 0, e = Args.size(); i != e; i++)
789 "Invoking a function with a bad signature!");
809 II.getNumOperands()) {
812 std::copy(
II.bundle_op_info_begin(),
II.bundle_op_info_end(),
819 std::vector<Value *> Args(
II->arg_begin(),
II->arg_end());
822 II->getFunctionType(),
II->getCalledOperand(),
II->getNormalDest(),
823 II->getUnwindDest(), Args, OpB,
II->getName(), InsertPt);
824 NewII->setCallingConv(
II->getCallingConv());
825 NewII->SubclassOptionalData =
II->SubclassOptionalData;
826 NewII->setAttributes(
II->getAttributes());
827 NewII->setDebugLoc(
II->getDebugLoc());
832 return cast<LandingPadInst>(
getUnwindDest()->getFirstNonPHI());
837 LLVM_DEBUG(
dbgs() <<
"Attempting to update profile weights will result in "
838 "div by 0. Ignoring. Likely the function "
840 <<
" has 0 entry count, and contains call instructions "
841 "with non-zero prof info.");
855 const Twine &NameStr) {
859 ComputeNumOperands(Args.size(), IndirectDests.
size(),
861 "NumOperands not set up?");
866 "Calling a function with bad signature");
868 for (
unsigned i = 0, e = Args.size(); i != e; i++)
871 "Calling a function with a bad signature!");
876 std::copy(Args.begin(), Args.end(),
op_begin());
877 NumIndirectDests = IndirectDests.
size();
879 for (
unsigned i = 0; i != NumIndirectDests; ++i)
893 CBI.getNumOperands()) {
899 NumIndirectDests = CBI.NumIndirectDests;
913 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
924 RI.getNumOperands()) {
962 CRI.getNumOperands(),
963 CRI.getNumOperands()) {
964 setSubclassData<Instruction::OpaqueField>(
973 setSubclassData<UnwindDestField>(
true);
975 Op<0>() = CleanupPad;
980CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
986 Values, InsertBefore) {
987 init(CleanupPad, UnwindBB);
1017CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1018 unsigned NumReservedValues,
1019 const Twine &NameStr,
1024 ++NumReservedValues;
1025 init(ParentPad, UnwindDest, NumReservedValues + 1);
1031 CSI.getNumOperands()) {
1036 for (
unsigned I = 1, E = ReservedSpace;
I != E; ++
I)
1041 unsigned NumReservedValues) {
1042 assert(ParentPad && NumReservedValues);
1044 ReservedSpace = NumReservedValues;
1048 Op<0>() = ParentPad;
1050 setSubclassData<UnwindDestField>(
true);
1057void CatchSwitchInst::growOperands(
unsigned Size) {
1059 assert(NumOperands >= 1);
1060 if (ReservedSpace >= NumOperands +
Size)
1062 ReservedSpace = (NumOperands +
Size / 2) * 2;
1069 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
1077 for (
Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1078 *CurDst = *(CurDst + 1);
1089 const Twine &NameStr) {
1099 FPI.getNumOperands(),
1100 FPI.getNumOperands()) {
1107 const Twine &NameStr,
1112 init(ParentPad, Args, NameStr);
1128void BranchInst::AssertOK() {
1131 "May only branch on boolean predicates!");
1138 assert(IfTrue &&
"Branch destination may not be null!");
1159 BI.getNumOperands()) {
1163 Op<-3>() = BI.
Op<-3>();
1164 Op<-2>() = BI.
Op<-2>();
1166 Op<-1>() = BI.
Op<-1>();
1172 "Cannot swap successors of an unconditional branch");
1188 assert(!isa<BasicBlock>(Amt) &&
1189 "Passed basic block into allocation size parameter! Use other ctor");
1191 "Allocation array size is not an integer!");
1198 "Insertion position cannot be null when alignment not provided!");
1201 "BB must be in a Function when alignment not provided!");
1203 return DL.getPrefTypeAlign(Ty);
1220 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1229 return !CI->isOne();
1249void LoadInst::AssertOK() {
1251 "Ptr must have pointer type.");
1256 "Insertion position cannot be null when alignment not provided!");
1259 "BB must be in a Function when alignment not provided!");
1261 return DL.getABITypeAlign(Ty);
1276 SyncScope::System, InsertBef) {}
1293void StoreInst::AssertOK() {
1296 "Ptr must have pointer type!");
1311 SyncScope::System, InsertBefore) {}
1344 "All operands must be non-null!");
1346 "Ptr must have pointer type!");
1348 "Cmp type and NewVal type must be same!");
1361 Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1372 "atomicrmw instructions can only be atomic.");
1374 "atomicrmw instructions cannot be unordered.");
1384 "Ptr must have pointer type!");
1386 "AtomicRMW instructions must be atomic!");
1435 return "<invalid operation>";
1459 "NumOperands not initialized?");
1468 GEPI.getNumOperands(),
1469 GEPI.getNumOperands()),
1470 SourceElementType(GEPI.SourceElementType),
1471 ResultElementType(GEPI.ResultElementType) {
1477 if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1482 if (!
Idx->getType()->isIntOrIntVectorTy())
1484 if (
auto *Array = dyn_cast<ArrayType>(Ty))
1485 return Array->getElementType();
1486 if (
auto *
Vector = dyn_cast<VectorType>(Ty))
1487 return Vector->getElementType();
1492 if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1497 if (
auto *Array = dyn_cast<ArrayType>(Ty))
1498 return Array->getElementType();
1499 if (
auto *
Vector = dyn_cast<VectorType>(Ty))
1500 return Vector->getElementType();
1504template <
typename IndexTy>
1506 if (IdxList.
empty())
1508 for (IndexTy V : IdxList.
slice(1)) {
1535 if (!CI->isZero())
return false;
1568 return cast<GEPOperator>(
this)->getNoWrapFlags();
1572 return cast<GEPOperator>(
this)->isInBounds();
1576 return cast<GEPOperator>(
this)->hasNoUnsignedSignedWrap();
1580 return cast<GEPOperator>(
this)->hasNoUnsignedWrap();
1586 return cast<GEPOperator>(
this)->accumulateConstantOffset(
DL,
Offset);
1592 APInt &ConstantOffset)
const {
1594 return cast<GEPOperator>(
this)->collectOffset(
DL,
BitWidth, VariableOffsets,
1609 "Invalid extractelement instruction operands!");
1632 "Invalid insertelement instruction operands!");
1644 if (Elt->
getType() != cast<VectorType>(Vec->
getType())->getElementType())
1647 if (!
Index->getType()->isIntegerTy())
1657 assert(V &&
"Cannot create placeholder of nullptr V");
1681 "Invalid shuffle vector instruction operands!");
1700 "Invalid shuffle vector instruction operands!");
1708 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
1709 int NumMaskElts = ShuffleMask.
size();
1711 for (
int i = 0; i != NumMaskElts; ++i) {
1717 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts &&
"Out-of-range mask");
1718 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1719 NewMask[i] = MaskElt;
1728 if (!isa<VectorType>(V1->
getType()) || V1->
getType() != V2->getType())
1733 cast<VectorType>(V1->
getType())->getElementCount().getKnownMinValue();
1734 for (
int Elem : Mask)
1738 if (isa<ScalableVectorType>(V1->
getType()))
1746 const Value *Mask) {
1753 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1754 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1755 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->
getType()))
1759 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1762 if (
const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1763 unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
1764 for (
Value *
Op : MV->operands()) {
1765 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
1766 if (CI->uge(V1Size*2))
1768 }
else if (!isa<UndefValue>(
Op)) {
1775 if (
const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1776 unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
1777 for (
unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->
getNumElements();
1779 if (CDS->getElementAsInteger(i) >= V1Size*2)
1789 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1791 if (isa<ConstantAggregateZero>(Mask)) {
1792 Result.resize(EC.getKnownMinValue(), 0);
1796 Result.reserve(EC.getKnownMinValue());
1798 if (EC.isScalable()) {
1799 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
1800 "Scalable vector shuffle mask must be undef or zeroinitializer");
1801 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1802 for (
unsigned I = 0;
I < EC.getKnownMinValue(); ++
I)
1803 Result.emplace_back(MaskVal);
1807 unsigned NumElts = EC.getKnownMinValue();
1809 if (
auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1810 for (
unsigned i = 0; i != NumElts; ++i)
1811 Result.push_back(CDS->getElementAsInteger(i));
1814 for (
unsigned i = 0; i != NumElts; ++i) {
1815 Constant *
C = Mask->getAggregateElement(i);
1816 Result.push_back(isa<UndefValue>(
C) ? -1 :
1817 cast<ConstantInt>(
C)->getZExtValue());
1822 ShuffleMask.
assign(Mask.begin(), Mask.end());
1829 if (isa<ScalableVectorType>(ResultTy)) {
1837 for (
int Elem : Mask) {
1841 MaskConst.
push_back(ConstantInt::get(Int32Ty, Elem));
1847 assert(!Mask.empty() &&
"Shuffle mask must contain elements");
1848 bool UsesLHS =
false;
1849 bool UsesRHS =
false;
1850 for (
int I : Mask) {
1853 assert(
I >= 0 &&
I < (NumOpElts * 2) &&
1854 "Out-of-bounds shuffle mask element");
1855 UsesLHS |= (
I < NumOpElts);
1856 UsesRHS |= (
I >= NumOpElts);
1857 if (UsesLHS && UsesRHS)
1861 return UsesLHS || UsesRHS;
1873 for (
int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1876 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1883 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1891 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1900 for (
int I = 0, E = Mask.size();
I < E; ++
I) {
1903 if (Mask[
I] != (NumSrcElts - 1 -
I) &&
1904 Mask[
I] != (NumSrcElts + NumSrcElts - 1 -
I))
1911 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1915 for (
int I = 0, E = Mask.size();
I < E; ++
I) {
1918 if (Mask[
I] != 0 && Mask[
I] != NumSrcElts)
1925 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1930 for (
int I = 0, E = Mask.size();
I < E; ++
I) {
1933 if (Mask[
I] !=
I && Mask[
I] != (NumSrcElts +
I))
1946 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1949 int Sz = Mask.size();
1954 if (Mask[0] != 0 && Mask[0] != 1)
1959 if ((Mask[1] - Mask[0]) != NumSrcElts)
1964 for (
int I = 2;
I < Sz; ++
I) {
1965 int MaskEltVal = Mask[
I];
1966 if (MaskEltVal == -1)
1968 int MaskEltPrevVal = Mask[
I - 2];
1969 if (MaskEltVal - MaskEltPrevVal != 2)
1977 if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1980 int StartIndex = -1;
1981 for (
int I = 0, E = Mask.size();
I != E; ++
I) {
1982 int MaskEltVal = Mask[
I];
1983 if (MaskEltVal == -1)
1986 if (StartIndex == -1) {
1989 if (MaskEltVal <
I || NumSrcElts <= (MaskEltVal -
I))
1992 StartIndex = MaskEltVal -
I;
1997 if (MaskEltVal != (StartIndex +
I))
2001 if (StartIndex == -1)
2010 int NumSrcElts,
int &
Index) {
2016 if (NumSrcElts <= (
int)Mask.size())
2021 for (
int i = 0, e = Mask.size(); i != e; ++i) {
2025 int Offset = (M % NumSrcElts) - i;
2026 if (0 <= SubIndex && SubIndex !=
Offset)
2031 if (0 <= SubIndex && SubIndex + (
int)Mask.size() <= NumSrcElts) {
2039 int NumSrcElts,
int &NumSubElts,
2041 int NumMaskElts = Mask.size();
2044 if (NumMaskElts < NumSrcElts)
2055 bool Src0Identity =
true;
2056 bool Src1Identity =
true;
2058 for (
int i = 0; i != NumMaskElts; ++i) {
2064 if (M < NumSrcElts) {
2066 Src0Identity &= (M == i);
2070 Src1Identity &= (M == (i + NumSrcElts));
2072 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2073 "unknown shuffle elements");
2075 "2-source shuffle not found");
2081 int Src0Hi = NumMaskElts - Src0Elts.
countl_zero();
2082 int Src1Hi = NumMaskElts - Src1Elts.
countl_zero();
2087 int NumSub1Elts = Src1Hi - Src1Lo;
2090 NumSubElts = NumSub1Elts;
2099 int NumSub0Elts = Src0Hi - Src0Lo;
2102 NumSubElts = NumSub0Elts;
2114 if (isa<ScalableVectorType>(
getType()))
2117 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2118 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2119 if (NumMaskElts <= NumOpElts)
2128 for (
int i = NumOpElts; i < NumMaskElts; ++i)
2138 if (isa<ScalableVectorType>(
getType()))
2141 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2142 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2143 if (NumMaskElts >= NumOpElts)
2151 if (isa<UndefValue>(
Op<0>()) || isa<UndefValue>(
Op<1>()))
2156 if (isa<ScalableVectorType>(
getType()))
2159 int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2160 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2161 if (NumMaskElts != NumOpElts * 2)
2172 int ReplicationFactor,
int VF) {
2173 assert(Mask.size() == (
unsigned)ReplicationFactor * VF &&
2174 "Unexpected mask size.");
2176 for (
int CurrElt :
seq(VF)) {
2177 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2178 assert(CurrSubMask.
size() == (
unsigned)ReplicationFactor &&
2179 "Run out of mask?");
2180 Mask = Mask.drop_front(ReplicationFactor);
2181 if (!
all_of(CurrSubMask, [CurrElt](
int MaskElt) {
2186 assert(Mask.empty() &&
"Did not consume the whole mask?");
2192 int &ReplicationFactor,
int &VF) {
2196 Mask.take_while([](
int MaskElt) {
return MaskElt == 0; }).
size();
2197 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2199 VF = Mask.size() / ReplicationFactor;
2211 for (
int MaskElt : Mask) {
2215 if (MaskElt < Largest)
2217 Largest = std::max(Largest, MaskElt);
2221 for (
int PossibleReplicationFactor :
2222 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2223 if (Mask.size() % PossibleReplicationFactor != 0)
2225 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2229 ReplicationFactor = PossibleReplicationFactor;
2241 if (isa<ScalableVectorType>(
getType()))
2244 VF = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2245 if (ShuffleMask.
size() % VF != 0)
2247 ReplicationFactor = ShuffleMask.
size() / VF;
2253 if (VF <= 0 || Mask.size() <
static_cast<unsigned>(VF) ||
2254 Mask.size() % VF != 0)
2256 for (
unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2261 for (
int Idx : SubMask) {
2275 if (isa<ScalableVectorType>(
getType()))
2297 unsigned NumElts = Mask.size();
2298 if (NumElts % Factor)
2301 unsigned LaneLen = NumElts / Factor;
2305 StartIndexes.
resize(Factor);
2311 for (;
I < Factor;
I++) {
2312 unsigned SavedLaneValue;
2313 unsigned SavedNoUndefs = 0;
2316 for (J = 0; J < LaneLen - 1; J++) {
2318 unsigned Lane = J * Factor +
I;
2319 unsigned NextLane = Lane + Factor;
2320 int LaneValue = Mask[Lane];
2321 int NextLaneValue = Mask[NextLane];
2324 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2325 LaneValue + 1 != NextLaneValue)
2329 if (LaneValue >= 0 && NextLaneValue < 0) {
2330 SavedLaneValue = LaneValue;
2339 if (SavedNoUndefs > 0 && LaneValue < 0) {
2341 if (NextLaneValue >= 0 &&
2342 SavedLaneValue + SavedNoUndefs != (
unsigned)NextLaneValue)
2347 if (J < LaneLen - 1)
2353 StartMask = Mask[
I];
2354 }
else if (Mask[(LaneLen - 1) * Factor +
I] >= 0) {
2356 StartMask = Mask[(LaneLen - 1) * Factor +
I] - J;
2357 }
else if (SavedNoUndefs > 0) {
2359 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2366 if (StartMask + LaneLen > NumInputElts)
2369 StartIndexes[
I] = StartMask;
2382 for (
unsigned Idx = 0;
Idx < Factor;
Idx++) {
2387 for (;
I < Mask.size();
I++)
2388 if (Mask[
I] >= 0 &&
static_cast<unsigned>(Mask[
I]) !=
Idx +
I * Factor)
2391 if (
I == Mask.size()) {
2405 int NumElts = Mask.size();
2406 assert((NumElts % NumSubElts) == 0 &&
"Illegal shuffle mask");
2409 for (
int i = 0; i != NumElts; i += NumSubElts) {
2410 for (
int j = 0; j != NumSubElts; ++j) {
2411 int M = Mask[i + j];
2414 if (M < i || M >= i + NumSubElts)
2416 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
2417 if (0 <= RotateAmt &&
Offset != RotateAmt)
2426 ArrayRef<int> Mask,
unsigned EltSizeInBits,
unsigned MinSubElts,
2427 unsigned MaxSubElts,
unsigned &NumSubElts,
unsigned &RotateAmt) {
2428 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2430 if (EltRotateAmt < 0)
2432 RotateAmt = EltRotateAmt * EltSizeInBits;
2451 assert(!Idxs.
empty() &&
"InsertValueInst must have at least one index");
2454 Val->
getType() &&
"Inserted value must match indexed type!");
2465 Indices(IVI.Indices) {
2480 assert(!Idxs.
empty() &&
"ExtractValueInst must have at least one index");
2489 Indices(EVI.Indices) {
2501 for (
unsigned Index : Idxs) {
2508 if (
ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2509 if (
Index >= AT->getNumElements())
2511 Agg = AT->getElementType();
2512 }
else if (
StructType *ST = dyn_cast<StructType>(Agg)) {
2513 if (
Index >= ST->getNumElements())
2515 Agg = ST->getElementType(
Index);
2521 return const_cast<Type*
>(Agg);
2541void UnaryOperator::AssertOK() {
2548 "Unary operation should return same type as operand!");
2550 "Tried to create a floating-point operation on a "
2551 "non-floating-point type!");
2572void BinaryOperator::AssertOK() {
2574 (void)LHS; (void)RHS;
2576 "Binary operator operand types must match!");
2582 "Arithmetic operation should return same type as operands!");
2584 "Tried to create an integer operation on a non-integer type!");
2586 case FAdd:
case FSub:
2589 "Arithmetic operation should return same type as operands!");
2591 "Tried to create a floating-point operation on a "
2592 "non-floating-point type!");
2597 "Arithmetic operation should return same type as operands!");
2599 "Incorrect operand type (not integer) for S/UDIV");
2603 "Arithmetic operation should return same type as operands!");
2605 "Incorrect operand type (not floating point) for FDIV");
2610 "Arithmetic operation should return same type as operands!");
2612 "Incorrect operand type (not integer) for S/UREM");
2616 "Arithmetic operation should return same type as operands!");
2618 "Incorrect operand type (not floating point) for FREM");
2624 "Shift operation should return same type as operands!");
2626 "Tried to create a shift operation on a non-integral type!");
2631 "Logical operation should return same type as operands!");
2633 "Tried to create a logical operation on a non-integral type!");
2644 "Cannot create binary operator with two operands of differing type!");
2650 Value *Zero = ConstantInt::get(
Op->getType(), 0);
2657 Value *Zero = ConstantInt::get(
Op->getType(), 0);
2658 return BinaryOperator::CreateNSWSub(Zero,
Op,
Name, InsertBefore);
2665 Op->getType(),
Name, InsertBefore);
2685 cast<Instruction>(
this)->getMetadata(LLVMContext::MD_fpmath);
2699 default:
return false;
2700 case Instruction::ZExt:
2701 case Instruction::SExt:
2702 case Instruction::Trunc:
2704 case Instruction::BitCast:
2725 case Instruction::Trunc:
2726 case Instruction::ZExt:
2727 case Instruction::SExt:
2728 case Instruction::FPTrunc:
2729 case Instruction::FPExt:
2730 case Instruction::UIToFP:
2731 case Instruction::SIToFP:
2732 case Instruction::FPToUI:
2733 case Instruction::FPToSI:
2734 case Instruction::AddrSpaceCast:
2737 case Instruction::BitCast:
2739 case Instruction::PtrToInt:
2740 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2742 case Instruction::IntToPtr:
2743 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2763 Type *DstIntPtrTy) {
2794 const unsigned numCastOps =
2795 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2796 static const uint8_t CastResults[numCastOps][numCastOps] = {
2802 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
2803 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0},
2804 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0},
2805 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
2806 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
2807 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
2808 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
2809 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
2810 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0},
2811 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0},
2812 { 99,99,99,99,99,99,99,99,99,11,99,15, 0},
2813 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14},
2814 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12},
2821 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2822 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2823 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2826 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2827 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2828 if (!AreBothBitcasts)
2831 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2832 [secondOp-Instruction::CastOpsBegin];
2877 return Instruction::BitCast;
2880 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2883 if (MidSize >= PtrSize)
2884 return Instruction::BitCast;
2894 return Instruction::BitCast;
2895 if (SrcSize < DstSize)
2897 if (SrcSize > DstSize)
2903 return Instruction::ZExt;
2911 if (SrcSize <= PtrSize && SrcSize == DstSize)
2912 return Instruction::BitCast;
2919 return Instruction::AddrSpaceCast;
2920 return Instruction::BitCast;
2931 "Illegal addrspacecast, bitcast sequence!");
2936 return Instruction::AddrSpaceCast;
2946 "Illegal inttoptr, bitcast sequence!");
2958 "Illegal bitcast, ptrtoint sequence!");
2963 return Instruction::UIToFP;
2978 case Trunc:
return new TruncInst (S, Ty,
Name, InsertBefore);
2979 case ZExt:
return new ZExtInst (S, Ty,
Name, InsertBefore);
2980 case SExt:
return new SExtInst (S, Ty,
Name, InsertBefore);
2982 case FPExt:
return new FPExtInst (S, Ty,
Name, InsertBefore);
3001 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3002 return Create(Instruction::ZExt, S, Ty,
Name, InsertBefore);
3008 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3009 return Create(Instruction::SExt, S, Ty,
Name, InsertBefore);
3015 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3016 return Create(Instruction::Trunc, S, Ty,
Name, InsertBefore);
3027 cast<VectorType>(Ty)->getElementCount() ==
3028 cast<VectorType>(S->
getType())->getElementCount()) &&
3032 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3043 return Create(Instruction::AddrSpaceCast, S, Ty,
Name, InsertBefore);
3045 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3052 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3054 return Create(Instruction::IntToPtr, S, Ty,
Name, InsertBefore);
3056 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3063 "Invalid integer cast");
3064 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3067 (SrcBits == DstBits ? Instruction::BitCast :
3068 (SrcBits > DstBits ? Instruction::Trunc :
3069 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3077 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3079 assert((
C->getType() == Ty || SrcBits != DstBits) &&
"Invalid cast");
3081 (SrcBits == DstBits ? Instruction::BitCast :
3082 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3090 if (SrcTy == DestTy)
3093 if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3094 if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3095 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3097 SrcTy = SrcVecTy->getElementType();
3098 DestTy = DestVecTy->getElementType();
3103 if (
PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3104 if (
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3105 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3117 if (SrcBits != DestBits)
3126 if (
auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3127 if (
auto *IntTy = dyn_cast<IntegerType>(DestTy))
3128 return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
3129 !
DL.isNonIntegralPointerType(PtrTy));
3130 if (
auto *PtrTy = dyn_cast<PointerType>(DestTy))
3131 if (
auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3132 return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
3133 !
DL.isNonIntegralPointerType(PtrTy));
3146 const Value *Src,
bool SrcIsSigned,
Type *DestTy,
bool DestIsSigned) {
3147 Type *SrcTy = Src->getType();
3150 "Only first class types are castable!");
3152 if (SrcTy == DestTy)
3156 if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3157 if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3158 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3161 SrcTy = SrcVecTy->getElementType();
3162 DestTy = DestVecTy->getElementType();
3172 if (DestBits < SrcBits)
3174 else if (DestBits > SrcBits) {
3188 assert(DestBits == SrcBits &&
3189 "Casting vector to integer of different width");
3193 "Casting from a value that is not first-class type");
3203 if (DestBits < SrcBits) {
3205 }
else if (DestBits > SrcBits) {
3211 assert(DestBits == SrcBits &&
3212 "Casting vector to floating point of different width");
3217 assert(DestBits == SrcBits &&
3218 "Illegal cast to vector (wrong type or size)");
3223 return AddrSpaceCast;
3249 bool SrcIsVec = isa<VectorType>(SrcTy);
3250 bool DstIsVec = isa<VectorType>(DstTy);
3257 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3259 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3264 default:
return false;
3265 case Instruction::Trunc:
3267 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3268 case Instruction::ZExt:
3270 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3271 case Instruction::SExt:
3273 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3274 case Instruction::FPTrunc:
3276 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3277 case Instruction::FPExt:
3279 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3280 case Instruction::UIToFP:
3281 case Instruction::SIToFP:
3284 case Instruction::FPToUI:
3285 case Instruction::FPToSI:
3288 case Instruction::PtrToInt:
3292 case Instruction::IntToPtr:
3296 case Instruction::BitCast: {
3302 if (!SrcPtrTy != !DstPtrTy)
3315 if (SrcIsVec && DstIsVec)
3316 return SrcEC == DstEC;
3324 case Instruction::AddrSpaceCast: {
3336 return SrcEC == DstEC;
3438 if (
Op == Instruction::ICmp) {
3466 if (
ICmpInst *IC = dyn_cast<ICmpInst>(
this))
3469 cast<FCmpInst>(
this)->swapOperands();
3473 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(
this))
3474 return IC->isCommutative();
3475 return cast<FCmpInst>(
this)->isCommutative();
3521 default:
return "unknown";
3736 switch (predicate) {
3737 default:
return false;
3744 switch (predicate) {
3745 default:
return false;
3823 "Call only with non-equality predicates!");
3834 switch (predicate) {
3835 default:
return false;
3843 switch (predicate) {
3844 default:
return false;
3853 default:
return false;
3863 default:
return false;
3902 ReservedSpace = NumReserved;
3917 nullptr, 0, InsertBefore) {
3923 init(
SI.getCondition(),
SI.getDefaultDest(),
SI.getNumOperands());
3926 const Use *InOL =
SI.getOperandList();
3927 for (
unsigned i = 2, E =
SI.getNumOperands(); i != E; i += 2) {
3929 OL[i+1] = InOL[i+1];
3939 if (OpNo+2 > ReservedSpace)
3942 assert(OpNo+1 < ReservedSpace &&
"Growing didn't work!");
3952 unsigned idx =
I->getCaseIndex();
3960 if (2 + (idx + 1) * 2 != NumOps) {
3961 OL[2 + idx * 2] = OL[NumOps - 2];
3962 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3966 OL[NumOps-2].
set(
nullptr);
3967 OL[NumOps-2+1].
set(
nullptr);
3970 return CaseIt(
this, idx);
3976void SwitchInst::growOperands() {
3978 unsigned NumOps = e*3;
3980 ReservedSpace = NumOps;
3985 assert(Changed &&
"called only if metadata has changed");
3990 assert(SI.getNumSuccessors() == Weights->size() &&
3991 "num of prof branch_weights must accord with num of successors");
3993 bool AllZeroes =
all_of(*Weights, [](
uint32_t W) {
return W == 0; });
3995 if (AllZeroes || Weights->size() < 2)
4008 "not correspond to number of succesors");
4014 this->Weights = std::move(Weights);
4020 assert(SI.getNumSuccessors() == Weights->size() &&
4021 "num of prof branch_weights must accord with num of successors");
4026 (*Weights)[
I->getCaseIndex() + 1] = Weights->back();
4027 Weights->pop_back();
4029 return SI.removeCase(
I);
4035 SI.addCase(OnVal, Dest);
4037 if (!Weights && W && *W) {
4040 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4041 }
else if (Weights) {
4043 Weights->push_back(W.value_or(0));
4046 assert(SI.getNumSuccessors() == Weights->size() &&
4047 "num of prof branch_weights must accord with num of successors");
4056 return SI.eraseFromParent();
4062 return std::nullopt;
4063 return (*Weights)[idx];
4075 auto &OldW = (*Weights)[idx];
4087 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4088 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4092 return std::nullopt;
4099void IndirectBrInst::init(
Value *
Address,
unsigned NumDests) {
4101 "Address of indirectbr must be a pointer");
4102 ReservedSpace = 1+NumDests;
4113void IndirectBrInst::growOperands() {
4115 unsigned NumOps = e*2;
4117 ReservedSpace = NumOps;
4121IndirectBrInst::IndirectBrInst(
Value *
Address,
unsigned NumCases,
4124 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4130 nullptr, IBI.getNumOperands()) {
4132 Use *OL = getOperandList();
4143 if (OpNo+1 > ReservedSpace)
4146 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
4160 OL[idx+1] = OL[NumOps-1];
4163 OL[NumOps-1].
set(
nullptr);
4234 Result->setWeak(
isWeak());
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isSigned(unsigned int Opcode)
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
float convertToFloat() const
Converts this APFloat to host float value.
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
An instruction that atomically checks whether a specified value is in a memory location,...
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
BinOp getOperation() const
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
void setCallingConv(CallingConv::ID CC)
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
void setOnlyReadsMemory()
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Value * getCalledOperand() const
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isEquality() const
Determine if this is an equals/not equals predicate.
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getUnsignedPredicate()
For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
bool isFPPredicate() const
void swapOperands()
This is just a convenience that dispatches to the subclasses.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
bool isStrictPredicate() const
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
bool isIntPredicate() const
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getFixed(ScalarTy MinVal)
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
BasicBlock * getBasicBlock()
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
This class represents a cast from an integer to a pointer.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
LLVMContextImpl *const pImpl
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
An instruction for reading from memory.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
const MDOperand & getOperand(unsigned I) const
This class implements a map that also provides access to all stored values in a deterministic order.
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
bool doesNotAccessMemory() const
Whether this function accesses no memory.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
MDNode * buildProfBranchWeightsMD()
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
bool isAggregateType() const
Return true if the type is an aggregate type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
This function has undefined behavior.
UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
const Use * getOperandList() const
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
base_list_type::iterator iterator
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
unsigned getPointerAddressSpace(const Type *T)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
@ Default
The result values are uniform if and only if all operands are uniform.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Describes an element of a Bitfield.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Compile-time customization of User operands.