52 "disable-i2p-p2i-opt",
cl::init(
false),
53 cl::desc(
"Disables inttoptr/ptrtoint roundtrip optimization"));
59std::optional<TypeSize>
66 assert(!
Size.isScalable() &&
"Array elements cannot have a scalable size");
67 Size *=
C->getZExtValue();
72std::optional<TypeSize>
88 return "both values to select must have same type";
91 return "select values cannot have token type";
96 return "vector select condition element type must be i1";
99 return "selected values for vector select must be vectors";
101 return "vector select requires selected vectors to have "
102 "the same vector length as select condition";
104 return "select condition must be i1 or <n x i1>";
113PHINode::PHINode(
const PHINode &PN)
115 ReservedSpace(PN.getNumOperands()) {
136 Op<-1>().set(
nullptr);
152void PHINode::growOperands() {
154 unsigned NumOps = e + e / 2;
155 if (NumOps < 2) NumOps = 2;
157 ReservedSpace = NumOps;
168 if (ConstantValue !=
this)
173 if (ConstantValue ==
this)
175 return ConstantValue;
184 Value *ConstantValue =
nullptr;
187 if (Incoming !=
this && !isa<UndefValue>(Incoming)) {
188 if (ConstantValue && ConstantValue != Incoming)
190 ConstantValue = Incoming;
200LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
203 init(NumReservedValues, NameStr);
206LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
209 init(NumReservedValues, NameStr);
214 LP.getNumOperands()),
215 ReservedSpace(LP.getNumOperands()) {
219 for (
unsigned I = 0,
E = ReservedSpace;
I !=
E; ++
I)
226 const Twine &NameStr,
232 const Twine &NameStr,
237void LandingPadInst::init(
unsigned NumReservedValues,
const Twine &NameStr) {
238 ReservedSpace = NumReservedValues;
247void LandingPadInst::growOperands(
unsigned Size) {
249 if (ReservedSpace >= e +
Size)
return;
250 ReservedSpace = (std::max(e, 1U) +
Size / 2) * 2;
257 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
269 case Instruction::Call:
271 case Instruction::Invoke:
273 case Instruction::CallBr:
285 if (ChildOB.getTagName() != OpB.
getTag())
297 return cast<CallBrInst>(
this)->getNumIndirectDests() + 1;
302 if (isa<Function>(V) || isa<Constant>(V))
310 if (
auto *CI = dyn_cast<CallInst>(
this))
311 return CI->isMustTailCall();
317 if (
auto *CI = dyn_cast<CallInst>(
this))
318 return CI->isTailCall();
324 return F->getIntrinsicID();
332 Mask |=
F->getAttributes().getRetNoFPClass();
340 Mask |=
F->getAttributes().getParamNoFPClass(i);
361 if (
F->getAttributes().hasAttrSomewhere(Kind, &
Index))
378 if (!
F->getAttributes().hasParamAttr(ArgNo, Kind))
383 case Attribute::ReadNone:
385 case Attribute::ReadOnly:
387 case Attribute::WriteOnly:
396 if (
auto *CE = dyn_cast<ConstantExpr>(V))
397 if (CE->getOpcode() == BitCast)
398 V = CE->getOperand(0);
400 if (
auto *
F = dyn_cast<Function>(V))
401 return F->getAttributes().hasFnAttr(Kind);
406bool CallBase::hasFnAttrOnCalledFunction(
StringRef Kind)
const {
408 if (
auto *CE = dyn_cast<ConstantExpr>(V))
409 if (CE->getOpcode() == BitCast)
410 V = CE->getOperand(0);
412 if (
auto *
F = dyn_cast<Function>(V))
413 return F->getAttributes().hasFnAttr(Kind);
418template <
typename AK>
419Attribute CallBase::getFnAttrOnCalledFunction(AK Kind)
const {
420 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
423 assert(Kind != Attribute::Memory &&
"Use getMemoryEffects() instead");
427 if (
auto *CE = dyn_cast<ConstantExpr>(V))
428 if (
CE->getOpcode() == BitCast)
429 V =
CE->getOperand(0);
431 if (
auto *
F = dyn_cast<Function>(V))
432 return F->getAttributes().getFnAttr(Kind);
449 const unsigned BeginIndex) {
451 for (
auto &
B : Bundles)
452 It = std::copy(
B.input_begin(),
B.input_end(), It);
455 auto BI = Bundles.
begin();
456 unsigned CurrentIndex = BeginIndex;
459 assert(BI != Bundles.
end() &&
"Incorrect allocation?");
461 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
462 BOI.Begin = CurrentIndex;
463 BOI.End = CurrentIndex + BI->input_size();
464 CurrentIndex = BOI.End;
468 assert(BI == Bundles.
end() &&
"Incorrect allocation?");
479 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
485 assert(OpIdx >=
arg_size() &&
"the Idx is not in the operand bundles");
488 "The Idx isn't in the operand bundle");
492 constexpr unsigned NumberScaling = 1024;
498 while (Begin !=
End) {
499 unsigned ScaledOperandPerBundle =
500 NumberScaling * (std::prev(
End)->End - Begin->
Begin) / (
End - Begin);
501 Current = Begin + (((OpIdx - Begin->
Begin) * NumberScaling) /
502 ScaledOperandPerBundle);
504 Current = std::prev(
End);
505 assert(Current < End && Current >= Begin &&
506 "the operand bundle doesn't cover every value in the range");
507 if (OpIdx >= Current->
Begin && OpIdx < Current->
End)
509 if (OpIdx >= Current->
End)
516 "the operand bundle doesn't cover every value in the range");
529 return Create(CB, Bundles, InsertPt);
535 bool CreateNew =
false;
539 if (Bundle.getTagID() ==
ID) {
546 return CreateNew ?
Create(CB, Bundles, InsertPt) : CB;
644 "NumOperands not set up?");
649 "Calling a function with bad signature!");
651 for (
unsigned i = 0; i != Args.size(); ++i)
654 "Calling a function with a bad signature!");
693CallInst::CallInst(
const CallInst &CI)
696 CI.getNumOperands()) {
711 Args, OpB, CI->
getName(), InsertPt);
724 auto *ProfileData =
getMetadata(LLVMContext::MD_prof);
725 if (ProfileData ==
nullptr)
728 auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
729 if (!ProfDataName || (!ProfDataName->getString().equals(
"branch_weights") &&
730 !ProfDataName->getString().equals(
"VP")))
734 LLVM_DEBUG(
dbgs() <<
"Attempting to update profile weights will result in "
735 "div by 0. Ignoring. Likely the function "
737 <<
" has 0 entry count, and contains call instructions "
738 "with non-zero prof info.");
744 Vals.
push_back(ProfileData->getOperand(0));
745 APInt APS(128, S), APT(128,
T);
746 if (ProfDataName->getString().equals(
"branch_weights") &&
747 ProfileData->getNumOperands() > 0) {
749 APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
756 }
else if (ProfDataName->getString().equals(
"VP"))
757 for (
unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
759 Vals.
push_back(ProfileData->getOperand(i));
761 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
766 Vals.
push_back(ProfileData->getOperand(i + 1));
770 APInt Val(128, Count);
781 assert(val &&
"IsConstantOne does not work with nullptr val");
782 const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
783 return CVal && CVal->
isOne();
792 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
793 "createMalloc needs either InsertBefore or InsertAtEnd");
801 else if (ArraySize->
getType() != IntPtrTy) {
812 AllocSize = ArraySize;
813 }
else if (
Constant *CO = dyn_cast<Constant>(ArraySize)) {
821 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
822 "mallocsize", InsertBefore);
824 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
825 "mallocsize", InsertAtEnd);
829 assert(AllocSize->
getType() == IntPtrTy &&
"malloc arg is wrong size");
837 MallocFunc = M->getOrInsertFunction(
"malloc", BPTy, IntPtrTy);
845 if (Result->getType() != AllocPtrType)
851 if (Result->getType() != AllocPtrType) {
860 if (!
F->returnDoesNotAlias())
861 F->setReturnDoesNotAlias();
879 return createMalloc(InsertBefore,
nullptr, IntPtrTy, AllocTy, AllocSize,
880 ArraySize, std::nullopt, MallocF,
Name);
888 return createMalloc(InsertBefore,
nullptr, IntPtrTy, AllocTy, AllocSize,
889 ArraySize, OpB, MallocF,
Name);
904 return createMalloc(
nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
905 ArraySize, std::nullopt, MallocF,
Name);
912 return createMalloc(
nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
913 ArraySize, OpB, MallocF,
Name);
920 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
921 "createFree needs either InsertBefore or InsertAtEnd");
922 assert(Source->getType()->isPointerTy() &&
923 "Can not free something of nonpointer type!");
931 FunctionCallee FreeFunc = M->getOrInsertFunction(
"free", VoidTy, IntPtrTy);
933 Value *PtrCast = Source;
935 if (Source->getType() != IntPtrTy)
936 PtrCast =
new BitCastInst(Source, IntPtrTy,
"", InsertBefore);
939 if (Source->getType() != IntPtrTy)
940 PtrCast =
new BitCastInst(Source, IntPtrTy,
"", InsertAtEnd);
943 Result->setTailCall();
945 Result->setCallingConv(
F->getCallingConv());
952 return createFree(Source, std::nullopt, InsertBefore,
nullptr);
957 return createFree(Source, Bundles, InsertBefore,
nullptr);
965 createFree(Source, std::nullopt,
nullptr, InsertAtEnd);
966 assert(FreeCall &&
"CreateFree did not create a CallInst");
973 assert(FreeCall &&
"CreateFree did not create a CallInst");
984 const Twine &NameStr) {
989 "NumOperands not set up?");
994 "Invoking a function with bad signature");
996 for (
unsigned i = 0, e = Args.size(); i != e; i++)
999 "Invoking a function with a bad signature!");
1019 II.getNumOperands()) {
1042 return cast<LandingPadInst>(
getUnwindDest()->getFirstNonPHI());
1053 const Twine &NameStr) {
1057 ComputeNumOperands(Args.size(), IndirectDests.
size(),
1059 "NumOperands not set up?");
1064 "Calling a function with bad signature");
1066 for (
unsigned i = 0, e = Args.size(); i != e; i++)
1069 "Calling a function with a bad signature!");
1074 std::copy(Args.begin(), Args.end(),
op_begin());
1075 NumIndirectDests = IndirectDests.
size();
1077 for (
unsigned i = 0; i != NumIndirectDests; ++i)
1091 CBI.getNumOperands()) {
1097 NumIndirectDests = CBI.NumIndirectDests;
1111 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1122 RI.getNumOperands()) {
1124 Op<0>() = RI.
Op<0>();
1155 Op<0>() = RI.
Op<0>();
1177 CRI.getNumOperands(),
1178 CRI.getNumOperands()) {
1179 setSubclassData<Instruction::OpaqueField>(
1181 Op<0>() = CRI.
Op<0>();
1183 Op<1>() = CRI.
Op<1>();
1186void CleanupReturnInst::init(
Value *CleanupPad,
BasicBlock *UnwindBB) {
1188 setSubclassData<UnwindDestField>(
true);
1190 Op<0>() = CleanupPad;
1195CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
1200 Values, InsertBefore) {
1201 init(CleanupPad, UnwindBB);
1204CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
1209 Values, InsertAtEnd) {
1210 init(CleanupPad, UnwindBB);
1224 Op<0>() = CRI.
Op<0>();
1225 Op<1>() = CRI.
Op<1>();
1248CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1249 unsigned NumReservedValues,
1250 const Twine &NameStr,
1255 ++NumReservedValues;
1256 init(ParentPad, UnwindDest, NumReservedValues + 1);
1260CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1261 unsigned NumReservedValues,
1266 ++NumReservedValues;
1267 init(ParentPad, UnwindDest, NumReservedValues + 1);
1273 CSI.getNumOperands()) {
1278 for (
unsigned I = 1,
E = ReservedSpace;
I !=
E; ++
I)
1283 unsigned NumReservedValues) {
1284 assert(ParentPad && NumReservedValues);
1286 ReservedSpace = NumReservedValues;
1290 Op<0>() = ParentPad;
1292 setSubclassData<UnwindDestField>(
true);
1299void CatchSwitchInst::growOperands(
unsigned Size) {
1301 assert(NumOperands >= 1);
1302 if (ReservedSpace >= NumOperands +
Size)
1304 ReservedSpace = (NumOperands +
Size / 2) * 2;
1311 assert(OpNo < ReservedSpace &&
"Growing didn't work!");
1319 for (
Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1320 *CurDst = *(CurDst + 1);
1331 const Twine &NameStr) {
1341 FPI.getNumOperands(),
1342 FPI.getNumOperands()) {
1353 init(ParentPad, Args, NameStr);
1362 init(ParentPad, Args, NameStr);
1381void BranchInst::AssertOK() {
1384 "May only branch on boolean predicates!");
1391 assert(IfTrue &&
"Branch destination may not be null!");
1412 assert(IfTrue &&
"Branch destination may not be null!");
1432 BI.getNumOperands()) {
1436 Op<-3>() = BI.
Op<-3>();
1437 Op<-2>() = BI.
Op<-2>();
1439 Op<-1>() = BI.
Op<-1>();
1445 "Cannot swap successors of an unconditional branch");
1446 Op<-1>().swap(Op<-2>());
1461 assert(!isa<BasicBlock>(Amt) &&
1462 "Passed basic block into allocation size parameter! Use other ctor");
1464 "Allocation array size is not an integer!");
1470 assert(BB &&
"Insertion BB cannot be null when alignment not provided!");
1472 "BB must be in a Function when alignment not provided!");
1474 return DL.getPrefTypeAlign(Ty);
1478 assert(
I &&
"Insertion position cannot be null when alignment not provided!");
1506 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1516 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1526 return !CI->isOne();
1546void LoadInst::AssertOK() {
1548 "Ptr must have pointer type.");
1552 assert(BB &&
"Insertion BB cannot be null when alignment not provided!");
1554 "BB must be in a Function when alignment not provided!");
1556 return DL.getABITypeAlign(Ty);
1560 assert(
I &&
"Insertion position cannot be null when alignment not provided!");
1585 SyncScope::System, InsertBef) {}
1590 SyncScope::System, InsertAE) {}
1596 assert(cast<PointerType>(
Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1608 assert(cast<PointerType>(
Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
1620void StoreInst::AssertOK() {
1623 "Ptr must have pointer type!");
1626 "Ptr must be a pointer to Val type!");
1650 SyncScope::System, InsertBefore) {}
1655 SyncScope::System, InsertAtEnd) {}
1703 "All operands must be non-null!");
1705 "Ptr must have pointer type!");
1708 "Ptr must be a pointer to Cmp type!");
1711 "Ptr must be a pointer to NewVal type!");
1713 "Cmp type and NewVal type must be same!");
1726 Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1739 Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1750 "atomicrmw instructions can only be atomic.");
1752 "atomicrmw instructions cannot be unordered.");
1761 "All operands must be non-null!");
1763 "Ptr must have pointer type!");
1766 "Ptr must be a pointer to Val type!");
1768 "AtomicRMW instructions must be atomic!");
1826 return "<invalid operation>";
1859 "NumOperands not initialized?");
1868 GEPI.getNumOperands(),
1869 GEPI.getNumOperands()),
1870 SourceElementType(GEPI.SourceElementType),
1871 ResultElementType(GEPI.ResultElementType) {
1877 if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1882 if (!
Idx->getType()->isIntOrIntVectorTy())
1884 if (
auto *Array = dyn_cast<ArrayType>(Ty))
1885 return Array->getElementType();
1886 if (
auto *
Vector = dyn_cast<VectorType>(Ty))
1887 return Vector->getElementType();
1892 if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1897 if (
auto *Array = dyn_cast<ArrayType>(Ty))
1898 return Array->getElementType();
1899 if (
auto *
Vector = dyn_cast<VectorType>(Ty))
1900 return Vector->getElementType();
1904template <
typename IndexTy>
1906 if (IdxList.
empty())
1908 for (IndexTy V : IdxList.
slice(1)) {
1935 if (!CI->isZero())
return false;
1955 cast<GEPOperator>(
this)->setIsInBounds(
B);
1959 return cast<GEPOperator>(
this)->isInBounds();
1965 return cast<GEPOperator>(
this)->accumulateConstantOffset(
DL,
Offset);
1971 APInt &ConstantOffset)
const {
1973 return cast<GEPOperator>(
this)->collectOffset(
DL,
BitWidth, VariableOffsets,
1989 "Invalid extractelement instruction operands!");
2003 "Invalid extractelement instruction operands!");
2027 "Invalid insertelement instruction operands!");
2041 "Invalid insertelement instruction operands!");
2054 if (Elt->
getType() != cast<VectorType>(Vec->
getType())->getElementType())
2057 if (!
Index->getType()->isIntegerTy())
2067 assert(V &&
"Cannot create placeholder of nullptr V");
2101 "Invalid shuffle vector instruction operands!");
2119 "Invalid shuffle vector instruction operands!");
2138 "Invalid shuffle vector instruction operands!");
2153 "Invalid shuffle vector instruction operands!");
2162 int NumOpElts = cast<FixedVectorType>(Op<0>()->
getType())->getNumElements();
2163 int NumMaskElts = ShuffleMask.
size();
2165 for (
int i = 0; i != NumMaskElts; ++i) {
2171 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts &&
"Out-of-range mask");
2172 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2173 NewMask[i] = MaskElt;
2176 Op<0>().swap(Op<1>());
2182 if (!isa<VectorType>(V1->
getType()) || V1->
getType() != V2->getType())
2187 cast<VectorType>(V1->
getType())->getElementCount().getKnownMinValue();
2188 for (
int Elem : Mask)
2192 if (isa<ScalableVectorType>(V1->
getType()))
2200 const Value *Mask) {
2207 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2208 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2209 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->
getType()))
2213 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2216 if (
const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2217 unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
2218 for (
Value *
Op : MV->operands()) {
2219 if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
2220 if (CI->uge(V1Size*2))
2222 }
else if (!isa<UndefValue>(
Op)) {
2229 if (
const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2230 unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
2231 for (
unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2233 if (CDS->getElementAsInteger(i) >= V1Size*2)
2243 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2245 if (isa<ConstantAggregateZero>(Mask)) {
2246 Result.resize(EC.getKnownMinValue(), 0);
2250 Result.reserve(EC.getKnownMinValue());
2252 if (EC.isScalable()) {
2253 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2254 "Scalable vector shuffle mask must be undef or zeroinitializer");
2255 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2256 for (
unsigned I = 0;
I < EC.getKnownMinValue(); ++
I)
2257 Result.emplace_back(MaskVal);
2261 unsigned NumElts = EC.getKnownMinValue();
2263 if (
auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2264 for (
unsigned i = 0; i != NumElts; ++i)
2265 Result.push_back(CDS->getElementAsInteger(i));
2268 for (
unsigned i = 0; i != NumElts; ++i) {
2269 Constant *
C = Mask->getAggregateElement(i);
2270 Result.push_back(isa<UndefValue>(
C) ? -1 :
2271 cast<ConstantInt>(
C)->getZExtValue());
2276 ShuffleMask.
assign(Mask.begin(), Mask.end());
2283 if (isa<ScalableVectorType>(ResultTy)) {
2291 for (
int Elem : Mask) {
2301 assert(!Mask.empty() &&
"Shuffle mask must contain elements");
2302 bool UsesLHS =
false;
2303 bool UsesRHS =
false;
2304 for (
int I : Mask) {
2307 assert(
I >= 0 &&
I < (NumOpElts * 2) &&
2308 "Out-of-bounds shuffle mask element");
2309 UsesLHS |= (
I < NumOpElts);
2310 UsesRHS |= (
I >= NumOpElts);
2311 if (UsesLHS && UsesRHS)
2315 return UsesLHS || UsesRHS;
2327 for (
int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2330 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2347 int NumElts = Mask.size();
2351 for (
int i = 0; i < NumElts; ++i) {
2354 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
2363 for (
int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2366 if (Mask[i] != 0 && Mask[i] != NumElts)
2376 for (
int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2379 if (Mask[i] != i && Mask[i] != (NumElts + i))
2393 int NumElts = Mask.size();
2398 if (Mask[0] != 0 && Mask[0] != 1)
2403 if ((Mask[1] - Mask[0]) != NumElts)
2408 for (
int i = 2; i < NumElts; ++i) {
2409 int MaskEltVal = Mask[i];
2410 if (MaskEltVal == -1)
2412 int MaskEltPrevVal = Mask[i - 2];
2413 if (MaskEltVal - MaskEltPrevVal != 2)
2421 int StartIndex = -1;
2422 for (
int I = 0,
E = Mask.size();
I !=
E; ++
I) {
2423 int MaskEltVal = Mask[
I];
2424 if (MaskEltVal == -1)
2427 if (StartIndex == -1) {
2430 if (MaskEltVal <
I ||
E <= (MaskEltVal -
I))
2433 StartIndex = MaskEltVal -
I;
2438 if (MaskEltVal != (StartIndex +
I))
2442 if (StartIndex == -1)
2451 int NumSrcElts,
int &
Index) {
2457 if (NumSrcElts <= (
int)Mask.size())
2462 for (
int i = 0, e = Mask.size(); i != e; ++i) {
2466 int Offset = (M % NumSrcElts) - i;
2467 if (0 <= SubIndex && SubIndex !=
Offset)
2472 if (0 <= SubIndex && SubIndex + (
int)Mask.size() <= NumSrcElts) {
2480 int NumSrcElts,
int &NumSubElts,
2482 int NumMaskElts = Mask.size();
2485 if (NumMaskElts < NumSrcElts)
2496 bool Src0Identity =
true;
2497 bool Src1Identity =
true;
2499 for (
int i = 0; i != NumMaskElts; ++i) {
2505 if (M < NumSrcElts) {
2507 Src0Identity &= (M == i);
2511 Src1Identity &= (M == (i + NumSrcElts));
2513 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2514 "unknown shuffle elements");
2516 "2-source shuffle not found");
2522 int Src0Hi = NumMaskElts - Src0Elts.
countl_zero();
2523 int Src1Hi = NumMaskElts - Src1Elts.
countl_zero();
2528 int NumSub1Elts = Src1Hi - Src1Lo;
2531 NumSubElts = NumSub1Elts;
2540 int NumSub0Elts = Src0Hi - Src0Lo;
2543 NumSubElts = NumSub0Elts;
2553 if (isa<UndefValue>(Op<2>()))
2558 if (isa<ScalableVectorType>(
getType()))
2561 int NumOpElts = cast<FixedVectorType>(Op<0>()->
getType())->getNumElements();
2562 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2563 if (NumMaskElts <= NumOpElts)
2572 for (
int i = NumOpElts; i < NumMaskElts; ++i)
2580 if (isa<UndefValue>(Op<2>()))
2585 if (isa<ScalableVectorType>(
getType()))
2588 int NumOpElts = cast<FixedVectorType>(Op<0>()->
getType())->getNumElements();
2589 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2590 if (NumMaskElts >= NumOpElts)
2598 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2599 isa<UndefValue>(Op<2>()))
2604 if (isa<ScalableVectorType>(
getType()))
2607 int NumOpElts = cast<FixedVectorType>(Op<0>()->
getType())->getNumElements();
2608 int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2609 if (NumMaskElts != NumOpElts * 2)
2620 int ReplicationFactor,
int VF) {
2621 assert(Mask.size() == (
unsigned)ReplicationFactor * VF &&
2622 "Unexpected mask size.");
2624 for (
int CurrElt :
seq(0, VF)) {
2625 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2626 assert(CurrSubMask.
size() == (
unsigned)ReplicationFactor &&
2627 "Run out of mask?");
2628 Mask = Mask.drop_front(ReplicationFactor);
2629 if (!
all_of(CurrSubMask, [CurrElt](
int MaskElt) {
2634 assert(Mask.empty() &&
"Did not consume the whole mask?");
2640 int &ReplicationFactor,
int &VF) {
2644 Mask.take_while([](
int MaskElt) {
return MaskElt == 0; }).
size();
2645 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2647 VF = Mask.size() / ReplicationFactor;
2659 for (
int MaskElt : Mask) {
2663 if (MaskElt < Largest)
2665 Largest = std::max(Largest, MaskElt);
2669 for (
int PossibleReplicationFactor :
2670 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2671 if (Mask.size() % PossibleReplicationFactor != 0)
2673 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2677 ReplicationFactor = PossibleReplicationFactor;
2689 if (isa<ScalableVectorType>(
getType()))
2692 VF = cast<FixedVectorType>(Op<0>()->
getType())->getNumElements();
2693 if (ShuffleMask.
size() % VF != 0)
2695 ReplicationFactor = ShuffleMask.
size() / VF;
2701 if (VF <= 0 || Mask.size() <
static_cast<unsigned>(VF) ||
2702 Mask.size() % VF != 0)
2704 for (
unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2723 if (isa<ScalableVectorType>(
getType()))
2745 unsigned NumElts = Mask.size();
2746 if (NumElts % Factor)
2749 unsigned LaneLen = NumElts / Factor;
2753 StartIndexes.
resize(Factor);
2759 for (;
I < Factor;
I++) {
2760 unsigned SavedLaneValue;
2761 unsigned SavedNoUndefs = 0;
2764 for (J = 0; J < LaneLen - 1; J++) {
2766 unsigned Lane = J * Factor +
I;
2767 unsigned NextLane = Lane + Factor;
2768 int LaneValue = Mask[Lane];
2769 int NextLaneValue = Mask[NextLane];
2772 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2773 LaneValue + 1 != NextLaneValue)
2777 if (LaneValue >= 0 && NextLaneValue < 0) {
2778 SavedLaneValue = LaneValue;
2787 if (SavedNoUndefs > 0 && LaneValue < 0) {
2789 if (NextLaneValue >= 0 &&
2790 SavedLaneValue + SavedNoUndefs != (
unsigned)NextLaneValue)
2795 if (J < LaneLen - 1)
2801 StartMask = Mask[
I];
2802 }
else if (Mask[(LaneLen - 1) * Factor +
I] >= 0) {
2804 StartMask = Mask[(LaneLen - 1) * Factor +
I] - J;
2805 }
else if (SavedNoUndefs > 0) {
2807 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2814 if (StartMask + LaneLen > NumInputElts)
2817 StartIndexes[
I] = StartMask;
2835 assert(!Idxs.
empty() &&
"InsertValueInst must have at least one index");
2838 Val->
getType() &&
"Inserted value must match indexed type!");
2849 Indices(IVI.Indices) {
2864 assert(!Idxs.
empty() &&
"ExtractValueInst must have at least one index");
2872 Indices(EVI.Indices) {
2884 for (
unsigned Index : Idxs) {
2891 if (
ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2892 if (
Index >= AT->getNumElements())
2894 Agg = AT->getElementType();
2895 }
else if (
StructType *ST = dyn_cast<StructType>(Agg)) {
2896 if (
Index >= ST->getNumElements())
2898 Agg = ST->getElementType(
Index);
2904 return const_cast<Type*
>(Agg);
2943void UnaryOperator::AssertOK() {
2950 "Unary operation should return same type as operand!");
2952 "Tried to create a floating-point operation on a "
2953 "non-floating-point type!");
2990void BinaryOperator::AssertOK() {
2992 (void)LHS; (void)RHS;
2994 "Binary operator operand types must match!");
3000 "Arithmetic operation should return same type as operands!");
3002 "Tried to create an integer operation on a non-integer type!");
3004 case FAdd:
case FSub:
3007 "Arithmetic operation should return same type as operands!");
3009 "Tried to create a floating-point operation on a "
3010 "non-floating-point type!");
3015 "Arithmetic operation should return same type as operands!");
3017 "Incorrect operand type (not integer) for S/UDIV");
3021 "Arithmetic operation should return same type as operands!");
3023 "Incorrect operand type (not floating point) for FDIV");
3028 "Arithmetic operation should return same type as operands!");
3030 "Incorrect operand type (not integer) for S/UREM");
3034 "Arithmetic operation should return same type as operands!");
3036 "Incorrect operand type (not floating point) for FREM");
3042 "Shift operation should return same type as operands!");
3044 "Tried to create a shift operation on a non-integral type!");
3049 "Logical operation should return same type as operands!");
3051 "Tried to create a logical operation on a non-integral type!");
3062 "Cannot create binary operator with two operands of differing type!");
3093 return BinaryOperator::CreateNSWSub(Zero,
Op,
Name, InsertBefore);
3099 return BinaryOperator::CreateNSWSub(Zero,
Op,
Name, InsertAtEnd);
3105 return BinaryOperator::CreateNUWSub(Zero,
Op,
Name, InsertBefore);
3111 return BinaryOperator::CreateNUWSub(Zero,
Op,
Name, InsertAtEnd);
3135 Op<0>().swap(Op<1>());
3145 cast<Instruction>(
this)->getMetadata(LLVMContext::MD_fpmath);
3159 default:
return false;
3160 case Instruction::ZExt:
3161 case Instruction::SExt:
3162 case Instruction::Trunc:
3164 case Instruction::BitCast:
3185 case Instruction::Trunc:
3186 case Instruction::ZExt:
3187 case Instruction::SExt:
3188 case Instruction::FPTrunc:
3189 case Instruction::FPExt:
3190 case Instruction::UIToFP:
3191 case Instruction::SIToFP:
3192 case Instruction::FPToUI:
3193 case Instruction::FPToSI:
3194 case Instruction::AddrSpaceCast:
3197 case Instruction::BitCast:
3199 case Instruction::PtrToInt:
3200 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3202 case Instruction::IntToPtr:
3203 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3223 Type *DstIntPtrTy) {
3254 const unsigned numCastOps =
3255 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3256 static const uint8_t CastResults[numCastOps][numCastOps] = {
3262 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
3263 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0},
3264 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0},
3265 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
3266 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
3267 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
3268 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
3269 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
3270 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0},
3271 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0},
3272 { 99,99,99,99,99,99,99,99,99,11,99,15, 0},
3273 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14},
3274 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12},
3281 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3282 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3283 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3286 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3287 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3288 if (!AreBothBitcasts)
3291 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3292 [secondOp-Instruction::CastOpsBegin];
3343 return Instruction::BitCast;
3346 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3349 if (MidSize >= PtrSize)
3350 return Instruction::BitCast;
3360 return Instruction::BitCast;
3361 if (SrcSize < DstSize)
3363 if (SrcSize > DstSize)
3369 return Instruction::ZExt;
3377 if (SrcSize <= PtrSize && SrcSize == DstSize)
3378 return Instruction::BitCast;
3385 return Instruction::AddrSpaceCast;
3386 return Instruction::BitCast;
3397 "Illegal addrspacecast, bitcast sequence!");
3406 return Instruction::AddrSpaceCast;
3418 "Illegal inttoptr, bitcast sequence!");
3430 "Illegal bitcast, ptrtoint sequence!");
3435 return Instruction::UIToFP;
3450 case Trunc:
return new TruncInst (S, Ty,
Name, InsertBefore);
3451 case ZExt:
return new ZExtInst (S, Ty,
Name, InsertBefore);
3452 case SExt:
return new SExtInst (S, Ty,
Name, InsertBefore);
3454 case FPExt:
return new FPExtInst (S, Ty,
Name, InsertBefore);
3472 case Trunc:
return new TruncInst (S, Ty,
Name, InsertAtEnd);
3473 case ZExt:
return new ZExtInst (S, Ty,
Name, InsertAtEnd);
3474 case SExt:
return new SExtInst (S, Ty,
Name, InsertAtEnd);
3476 case FPExt:
return new FPExtInst (S, Ty,
Name, InsertAtEnd);
3493 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3494 return Create(Instruction::ZExt, S, Ty,
Name, InsertBefore);
3501 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3502 return Create(Instruction::ZExt, S, Ty,
Name, InsertAtEnd);
3509 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3510 return Create(Instruction::SExt, S, Ty,
Name, InsertBefore);
3517 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3518 return Create(Instruction::SExt, S, Ty,
Name, InsertAtEnd);
3525 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3526 return Create(Instruction::Trunc, S, Ty,
Name, InsertBefore);
3533 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3534 return Create(Instruction::Trunc, S, Ty,
Name, InsertAtEnd);
3545 cast<VectorType>(Ty)->getElementCount() ==
3546 cast<VectorType>(S->
getType())->getElementCount()) &&
3550 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertAtEnd);
3564 cast<VectorType>(Ty)->getElementCount() ==
3565 cast<VectorType>(S->
getType())->getElementCount()) &&
3569 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3582 return Create(Instruction::AddrSpaceCast, S, Ty,
Name, InsertAtEnd);
3584 return Create(Instruction::BitCast, S, Ty,
Name, InsertAtEnd);
3595 return Create(Instruction::AddrSpaceCast, S, Ty,
Name, InsertBefore);
3597 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3604 return Create(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3606 return Create(Instruction::IntToPtr, S, Ty,
Name, InsertBefore);
3608 return Create(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3615 "Invalid integer cast");
3616 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3619 (SrcBits == DstBits ? Instruction::BitCast :
3620 (SrcBits > DstBits ? Instruction::Trunc :
3621 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3630 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3633 (SrcBits == DstBits ? Instruction::BitCast :
3634 (SrcBits > DstBits ? Instruction::Trunc :
3635 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3644 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3647 (SrcBits == DstBits ? Instruction::BitCast :
3648 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3657 unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3660 (SrcBits == DstBits ? Instruction::BitCast :
3661 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3669 if (SrcTy == DestTy)
3672 if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3673 if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3674 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3676 SrcTy = SrcVecTy->getElementType();
3677 DestTy = DestVecTy->getElementType();
3682 if (
PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3683 if (
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3684 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3696 if (SrcBits != DestBits)
3708 if (
auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3709 if (
auto *IntTy = dyn_cast<IntegerType>(DestTy))
3710 return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
3711 !
DL.isNonIntegralPointerType(PtrTy));
3712 if (
auto *PtrTy = dyn_cast<PointerType>(DestTy))
3713 if (
auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3714 return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
3715 !
DL.isNonIntegralPointerType(PtrTy));
3728 const Value *Src,
bool SrcIsSigned,
Type *DestTy,
bool DestIsSigned) {
3729 Type *SrcTy = Src->getType();
3732 "Only first class types are castable!");
3734 if (SrcTy == DestTy)
3738 if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3739 if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3740 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3743 SrcTy = SrcVecTy->getElementType();
3744 DestTy = DestVecTy->getElementType();
3754 if (DestBits < SrcBits)
3756 else if (DestBits > SrcBits) {
3770 assert(DestBits == SrcBits &&
3771 "Casting vector to integer of different width");
3775 "Casting from a value that is not first-class type");
3785 if (DestBits < SrcBits) {
3787 }
else if (DestBits > SrcBits) {
3793 assert(DestBits == SrcBits &&
3794 "Casting vector to floating point of different width");
3799 assert(DestBits == SrcBits &&
3800 "Illegal cast to vector (wrong type or size)");
3805 return AddrSpaceCast;
3813 assert(DestBits == SrcBits &&
"Casting vector of wrong width to X86_MMX");
3837 bool SrcIsVec = isa<VectorType>(SrcTy);
3838 bool DstIsVec = isa<VectorType>(DstTy);
3845 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3847 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3852 default:
return false;
3853 case Instruction::Trunc:
3855 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3856 case Instruction::ZExt:
3858 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3859 case Instruction::SExt:
3861 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3862 case Instruction::FPTrunc:
3864 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3865 case Instruction::FPExt:
3867 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3868 case Instruction::UIToFP:
3869 case Instruction::SIToFP:
3872 case Instruction::FPToUI:
3873 case Instruction::FPToSI:
3876 case Instruction::PtrToInt:
3880 case Instruction::IntToPtr:
3884 case Instruction::BitCast: {
3890 if (!SrcPtrTy != !DstPtrTy)
3903 if (SrcIsVec && DstIsVec)
3904 return SrcEC == DstEC;
3912 case Instruction::AddrSpaceCast: {
3924 return SrcEC == DstEC;
4074) :
CastInst(Ty, AddrSpaceCast, S,
Name, InsertBefore) {
4118 if (
Op == Instruction::ICmp) {
4138 if (
Op == Instruction::ICmp) {
4147 if (
ICmpInst *IC = dyn_cast<ICmpInst>(
this))
4150 cast<FCmpInst>(
this)->swapOperands();
4154 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(
this))
4155 return IC->isCommutative();
4156 return cast<FCmpInst>(
this)->isCommutative();
4202 default:
return "unknown";
4417 switch (predicate) {
4418 default:
return false;
4425 switch (predicate) {