66#include "llvm/IR/IntrinsicsAMDGPU.h"
96#define DEBUG_TYPE "irtranslator"
102 cl::desc(
"Should enable CSE in irtranslator"),
120 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
124 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
125 R << (
" (in function: " + MF.getName() +
")").str();
127 if (TPC.isGlobalISelAbortEnabled())
144 DILocationVerifier() =
default;
145 ~DILocationVerifier() =
default;
147 const Instruction *getCurrentInst()
const {
return CurrInst; }
148 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
155 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
160 <<
" was copied to " <<
MI);
166 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
167 (
MI.isDebugInstr())) &&
168 "Line info was not transferred to all instructions");
191IRTranslator::allocateVRegs(
const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
199 Offsets->empty() ? Offsets :
nullptr);
200 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
211 return *VMap.getVRegs(Val);
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
219 "Don't know how to create an empty vreg");
223 Offsets->empty() ? Offsets :
nullptr);
225 if (!isa<Constant>(Val)) {
226 for (
auto Ty : SplitTys)
233 auto &
C = cast<Constant>(Val);
235 while (
auto Elt =
C.getAggregateElement(
Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
237 llvm::copy(EltRegs, std::back_inserter(*VRegs));
240 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
242 bool Success = translate(cast<Constant>(Val), VRegs->front());
247 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
256int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
257 auto MapEntry = FrameIndices.find(&AI);
258 if (MapEntry != FrameIndices.end())
259 return MapEntry->second;
263 ElementSize * cast<ConstantInt>(AI.
getArraySize())->getZExtValue();
266 Size = std::max<uint64_t>(
Size, 1u);
268 int &FI = FrameIndices[&AI];
274 if (
const StoreInst *SI = dyn_cast<StoreInst>(&
I))
275 return SI->getAlign();
276 if (
const LoadInst *LI = dyn_cast<LoadInst>(&
I))
277 return LI->getAlign();
284 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
291 assert(
MBB &&
"BasicBlock was not encountered before");
296 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
297 MachinePreds[Edge].push_back(NewPred);
300bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
306 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
307 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
310 if (isa<Instruction>(U)) {
319bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
321 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
324 if (isa<Instruction>(U)) {
333 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
336bool IRTranslator::translateCompare(
const User &U,
338 auto *CI = cast<CmpInst>(&U);
339 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
344 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1);
355 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
369 VRegs = getOrCreateVRegs(*Ret);
380 return CLI->
lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
383void IRTranslator::emitBranchForMergedCondition(
389 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
392 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
395 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
399 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
400 CurBuilder->getDebugLoc(), TProb, FProb);
401 SL->SwitchCases.push_back(CB);
409 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
410 SL->SwitchCases.push_back(CB);
415 return I->getParent() == BB;
419void IRTranslator::findMergedConditions(
424 using namespace PatternMatch;
425 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
426 "Expected Opc to be AND/OR");
432 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
438 const Value *BOpOp0, *BOpOp1;
452 if (BOpc == Instruction::And)
453 BOpc = Instruction::Or;
454 else if (BOpc == Instruction::Or)
455 BOpc = Instruction::And;
461 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
465 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
476 if (Opc == Instruction::Or) {
497 auto NewTrueProb = TProb / 2;
498 auto NewFalseProb = TProb / 2 + FProb;
500 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
501 NewFalseProb, InvertCond);
507 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
508 Probs[1], InvertCond);
510 assert(Opc == Instruction::And &&
"Unknown merge op!");
530 auto NewTrueProb = TProb + FProb / 2;
531 auto NewFalseProb = FProb / 2;
533 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
534 NewFalseProb, InvertCond);
540 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
541 Probs[1], InvertCond);
545bool IRTranslator::shouldEmitAsBranches(
546 const std::vector<SwitchCG::CaseBlock> &Cases) {
548 if (Cases.size() != 2)
553 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
554 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
555 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
556 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
562 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
563 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
564 isa<Constant>(Cases[0].CmpRHS) &&
565 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
567 Cases[0].TrueBB == Cases[1].ThisBB)
570 Cases[0].FalseBB == Cases[1].ThisBB)
578 const BranchInst &BrInst = cast<BranchInst>(U);
579 auto &CurMBB = MIRBuilder.
getMBB();
585 !CurMBB.isLayoutSuccessor(Succ0MBB))
590 CurMBB.addSuccessor(&getMBB(*Succ));
616 using namespace PatternMatch;
617 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
619 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
622 const Value *BOp0, *BOp1;
624 Opcode = Instruction::And;
626 Opcode = Instruction::Or;
630 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
631 getEdgeProbability(&CurMBB, Succ0MBB),
632 getEdgeProbability(&CurMBB, Succ1MBB),
634 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
637 if (shouldEmitAsBranches(SL->SwitchCases)) {
639 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
640 SL->SwitchCases.erase(SL->SwitchCases.begin());
646 for (
unsigned I = 1, E = SL->SwitchCases.size();
I != E; ++
I)
647 MF->
erase(SL->SwitchCases[
I].ThisBB);
649 SL->SwitchCases.clear();
656 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
657 CurBuilder->getDebugLoc());
661 emitSwitchCase(CB, &CurMBB, *CurBuilder);
669 Src->addSuccessorWithoutProb(Dst);
673 Prob = getEdgeProbability(Src, Dst);
674 Src->addSuccessor(Dst, Prob);
680 const BasicBlock *SrcBB = Src->getBasicBlock();
681 const BasicBlock *DstBB = Dst->getBasicBlock();
685 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
692 using namespace SwitchCG;
697 Clusters.reserve(
SI.getNumCases());
698 for (
const auto &
I :
SI.cases()) {
700 assert(Succ &&
"Could not find successor mbb in mapping");
705 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
718 if (Clusters.empty()) {
725 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
726 SL->findBitTestClusters(Clusters, &SI);
729 dbgs() <<
"Case clusters: ";
730 for (
const CaseCluster &
C : Clusters) {
731 if (
C.Kind == CC_JumpTable)
733 if (
C.Kind == CC_BitTests)
736 C.Low->getValue().print(
dbgs(),
true);
737 if (
C.Low !=
C.High) {
739 C.High->getValue().print(
dbgs(),
true);
746 assert(!Clusters.empty());
750 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
751 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
753 while (!WorkList.empty()) {
754 SwitchWorkListItem
W = WorkList.pop_back_val();
756 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
758 if (NumClusters > 3 &&
761 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
765 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
775 using namespace SwitchCG;
776 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
777 "Clusters not sorted?");
778 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
780 auto [LastLeft, FirstRight, LeftProb, RightProb] =
781 SL->computeSplitWorkItemInfo(W);
786 assert(PivotCluster >
W.FirstCluster);
787 assert(PivotCluster <=
W.LastCluster);
802 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
803 FirstLeft->Low ==
W.GE &&
804 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
805 LeftMBB = FirstLeft->MBB;
810 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
817 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
818 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
819 RightMBB = FirstRight->MBB;
824 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
832 if (
W.MBB == SwitchMBB)
833 emitSwitchCase(CB, SwitchMBB, MIB);
835 SL->SwitchCases.push_back(CB);
841 assert(
JT.Reg != -1U &&
"Should lower JT Header first!");
863 Register SwitchOpReg = getOrCreateVReg(SValue);
865 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
873 JT.Reg = Sub.getReg(0);
884 auto Cst = getOrCreateVReg(
921 const auto *CI = dyn_cast<ConstantInt>(CB.
CmpRHS);
939 "Can only handle SLE ranges");
945 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
951 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
986 bool FallthroughUnreachable) {
987 using namespace SwitchCG;
990 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
996 CurMF->
insert(BBI, JumpMBB);
1006 auto JumpProb =
I->Prob;
1007 auto FallthroughProb = UnhandledProbs;
1015 if (*SI == DefaultMBB) {
1016 JumpProb += DefaultProb / 2;
1017 FallthroughProb -= DefaultProb / 2;
1022 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1027 if (FallthroughUnreachable)
1028 JTH->FallthroughUnreachable =
true;
1030 if (!JTH->FallthroughUnreachable)
1031 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1032 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1037 JTH->HeaderBB = CurMBB;
1038 JT->Default = Fallthrough;
1041 if (CurMBB == SwitchMBB) {
1042 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1044 JTH->Emitted =
true;
1051 bool FallthroughUnreachable,
1056 using namespace SwitchCG;
1059 if (
I->Low ==
I->High) {
1075 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS,
I->MBB, Fallthrough,
1078 emitSwitchCase(CB, SwitchMBB, MIB);
1088 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1092 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1097 LLT MaskTy = SwitchOpTy;
1103 for (
unsigned I = 0, E =
B.Cases.size();
I != E; ++
I) {
1113 if (SwitchOpTy != MaskTy)
1121 if (!
B.FallthroughUnreachable)
1122 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1123 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1127 if (!
B.FallthroughUnreachable) {
1131 RangeSub, RangeCst);
1151 if (PopCount == 1) {
1154 auto MaskTrailingZeros =
1159 }
else if (PopCount == BB.
Range) {
1161 auto MaskTrailingOnes =
1168 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne, Reg);
1172 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1179 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1181 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1199bool IRTranslator::lowerBitTestWorkItem(
1205 bool FallthroughUnreachable) {
1206 using namespace SwitchCG;
1209 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1211 for (BitTestCase &BTC : BTB->Cases)
1212 CurMF->
insert(BBI, BTC.ThisBB);
1215 BTB->Parent = CurMBB;
1216 BTB->Default = Fallthrough;
1218 BTB->DefaultProb = UnhandledProbs;
1222 if (!BTB->ContiguousRange) {
1223 BTB->Prob += DefaultProb / 2;
1224 BTB->DefaultProb -= DefaultProb / 2;
1227 if (FallthroughUnreachable)
1228 BTB->FallthroughUnreachable =
true;
1231 if (CurMBB == SwitchMBB) {
1232 emitBitTestHeader(*BTB, SwitchMBB);
1233 BTB->Emitted =
true;
1243 using namespace SwitchCG;
1247 if (++BBI != FuncInfo.
MF->
end())
1256 [](
const CaseCluster &a,
const CaseCluster &b) {
1257 return a.Prob != b.Prob
1259 : a.Low->getValue().slt(b.Low->getValue());
1264 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1266 if (
I->Prob >
W.LastCluster->Prob)
1268 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1278 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1279 UnhandledProbs +=
I->Prob;
1282 for (CaseClusterIt
I =
W.FirstCluster, E =
W.LastCluster;
I <= E; ++
I) {
1283 bool FallthroughUnreachable =
false;
1285 if (
I ==
W.LastCluster) {
1287 Fallthrough = DefaultMBB;
1288 FallthroughUnreachable = isa<UnreachableInst>(
1292 CurMF->
insert(BBI, Fallthrough);
1294 UnhandledProbs -=
I->Prob;
1298 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1299 DefaultProb, UnhandledProbs,
I, Fallthrough,
1300 FallthroughUnreachable)) {
1308 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1309 UnhandledProbs,
I, Fallthrough,
1310 FallthroughUnreachable)) {
1317 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1318 FallthroughUnreachable, UnhandledProbs,
1319 CurMBB, MIB, SwitchMBB)) {
1326 CurMBB = Fallthrough;
1332bool IRTranslator::translateIndirectBr(
const User &U,
1346 if (!AddedSuccessors.
insert(Succ).second)
1355 if (
auto Arg = dyn_cast<Argument>(V))
1356 return Arg->hasSwiftErrorAttr();
1357 if (
auto AI = dyn_cast<AllocaInst>(V))
1363 const LoadInst &LI = cast<LoadInst>(U);
1378 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1396 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1401 Align BaseAlign = getMemOpAlign(LI);
1425 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1428 SI.getPointerOperand());
1435 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1440 Align BaseAlign = getMemOpAlign(SI);
1444 SI.getSyncScopeID(),
SI.getOrdering());
1451 const Value *Src = U.getOperand(0);
1457 Indices.
push_back(ConstantInt::get(Int32Ty, 0));
1460 for (
auto Idx : EVI->indices())
1462 }
else if (
const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1463 for (
auto Idx : IVI->indices())
1466 for (
unsigned i = 1; i < U.getNumOperands(); ++i)
1471 DL.getIndexedOffsetInType(Src->getType(), Indices));
1474bool IRTranslator::translateExtractValue(
const User &U,
1476 const Value *Src =
U.getOperand(0);
1481 auto &DstRegs = allocateVRegs(U);
1483 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1484 DstRegs[i] = SrcRegs[
Idx++];
1489bool IRTranslator::translateInsertValue(
const User &U,
1491 const Value *Src =
U.getOperand(0);
1493 auto &DstRegs = allocateVRegs(U);
1497 auto *InsertedIt = InsertedRegs.
begin();
1499 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1500 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1501 DstRegs[i] = *InsertedIt++;
1503 DstRegs[i] = SrcRegs[i];
1509bool IRTranslator::translateSelect(
const User &U,
1511 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1517 if (
const SelectInst *SI = dyn_cast<SelectInst>(&U))
1520 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1521 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1527bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1530 auto &Regs = *VMap.getVRegs(U);
1532 Regs.push_back(Src);
1533 VMap.getOffsets(U)->push_back(0);
1542bool IRTranslator::translateBitCast(
const User &U,
1549 if (isa<ConstantInt>(
U.getOperand(0)))
1550 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1552 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1555 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1558bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1560 if (
U.getType()->getScalarType()->isBFloatTy() ||
1561 U.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1574bool IRTranslator::translateGetElementPtr(
const User &U,
1576 Value &Op0 = *
U.getOperand(0);
1577 Register BaseReg = getOrCreateVReg(Op0);
1589 unsigned VectorWidth = 0;
1593 bool WantSplatVector =
false;
1594 if (
auto *VT = dyn_cast<VectorType>(
U.getType())) {
1595 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1597 WantSplatVector = VectorWidth > 1;
1602 if (WantSplatVector && !PtrTy.
isVector()) {
1603 BaseReg = MIRBuilder
1616 const Value *
Idx = GTI.getOperand();
1617 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
1618 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
1622 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1626 if (
const auto *CI = dyn_cast<ConstantInt>(
Idx)) {
1627 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1628 Offset += ElementSize * *Val;
1635 BaseReg = MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1642 if (IdxTy != OffsetTy) {
1643 if (!IdxTy.
isVector() && WantSplatVector) {
1656 if (ElementSize != 1) {
1662 GepOffsetReg = IdxReg;
1672 if (int64_t(
Offset) >= 0 && cast<GEPOperator>(U).isInBounds())
1675 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1680 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1684bool IRTranslator::translateMemFunc(
const CallInst &CI,
1689 if (isa<UndefValue>(SrcPtr))
1694 unsigned MinPtrSize = UINT_MAX;
1695 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1696 Register SrcReg = getOrCreateVReg(**AI);
1699 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1707 if (MRI->
getType(SizeOpReg) != SizeTy)
1721 if (
auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1722 DstAlign = MCI->getDestAlign().valueOrOne();
1723 SrcAlign = MCI->getSourceAlign().valueOrOne();
1724 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1725 }
else if (
auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1726 DstAlign = MCI->getDestAlign().valueOrOne();
1727 SrcAlign = MCI->getSourceAlign().valueOrOne();
1728 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1729 }
else if (
auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1730 DstAlign = MMI->getDestAlign().valueOrOne();
1731 SrcAlign = MMI->getSourceAlign().valueOrOne();
1732 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1734 auto *MSI = cast<MemSetInst>(&CI);
1735 DstAlign = MSI->getDestAlign().valueOrOne();
1738 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1754 if (AA && CopySize &&
1765 ICall.addMemOperand(
1767 StoreFlags, 1, DstAlign, AAInfo));
1768 if (Opcode != TargetOpcode::G_MEMSET)
1775bool IRTranslator::translateTrap(
const CallInst &CI,
1780 if (TrapFuncName.
empty()) {
1781 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1791 if (Opcode == TargetOpcode::G_UBSANTRAP)
1798 return CLI->
lowerCall(MIRBuilder, Info);
1801bool IRTranslator::translateVectorInterleave2Intrinsic(
1804 "This function can only be called on the interleave2 intrinsic!");
1808 Register Res = getOrCreateVReg(CI);
1817bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1820 "This function can only be called on the deinterleave2 intrinsic!");
1836void IRTranslator::getStackGuard(
Register DstReg,
1841 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1847 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1855 MIB.setMemRefs({
MemRef});
1858bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1862 Op, {ResRegs[0], ResRegs[1]},
1868bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1870 Register Dst = getOrCreateVReg(CI);
1874 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1882 case Intrinsic::acos:
1883 return TargetOpcode::G_FACOS;
1884 case Intrinsic::asin:
1885 return TargetOpcode::G_FASIN;
1886 case Intrinsic::atan:
1887 return TargetOpcode::G_FATAN;
1888 case Intrinsic::bswap:
1889 return TargetOpcode::G_BSWAP;
1890 case Intrinsic::bitreverse:
1891 return TargetOpcode::G_BITREVERSE;
1892 case Intrinsic::fshl:
1893 return TargetOpcode::G_FSHL;
1894 case Intrinsic::fshr:
1895 return TargetOpcode::G_FSHR;
1896 case Intrinsic::ceil:
1897 return TargetOpcode::G_FCEIL;
1898 case Intrinsic::cos:
1899 return TargetOpcode::G_FCOS;
1900 case Intrinsic::cosh:
1901 return TargetOpcode::G_FCOSH;
1902 case Intrinsic::ctpop:
1903 return TargetOpcode::G_CTPOP;
1904 case Intrinsic::exp:
1905 return TargetOpcode::G_FEXP;
1906 case Intrinsic::exp2:
1907 return TargetOpcode::G_FEXP2;
1908 case Intrinsic::exp10:
1909 return TargetOpcode::G_FEXP10;
1910 case Intrinsic::fabs:
1911 return TargetOpcode::G_FABS;
1912 case Intrinsic::copysign:
1913 return TargetOpcode::G_FCOPYSIGN;
1914 case Intrinsic::minnum:
1915 return TargetOpcode::G_FMINNUM;
1916 case Intrinsic::maxnum:
1917 return TargetOpcode::G_FMAXNUM;
1918 case Intrinsic::minimum:
1919 return TargetOpcode::G_FMINIMUM;
1920 case Intrinsic::maximum:
1921 return TargetOpcode::G_FMAXIMUM;
1922 case Intrinsic::canonicalize:
1923 return TargetOpcode::G_FCANONICALIZE;
1924 case Intrinsic::floor:
1925 return TargetOpcode::G_FFLOOR;
1926 case Intrinsic::fma:
1927 return TargetOpcode::G_FMA;
1928 case Intrinsic::log:
1929 return TargetOpcode::G_FLOG;
1930 case Intrinsic::log2:
1931 return TargetOpcode::G_FLOG2;
1932 case Intrinsic::log10:
1933 return TargetOpcode::G_FLOG10;
1934 case Intrinsic::ldexp:
1935 return TargetOpcode::G_FLDEXP;
1936 case Intrinsic::nearbyint:
1937 return TargetOpcode::G_FNEARBYINT;
1938 case Intrinsic::pow:
1939 return TargetOpcode::G_FPOW;
1940 case Intrinsic::powi:
1941 return TargetOpcode::G_FPOWI;
1942 case Intrinsic::rint:
1943 return TargetOpcode::G_FRINT;
1944 case Intrinsic::round:
1945 return TargetOpcode::G_INTRINSIC_ROUND;
1946 case Intrinsic::roundeven:
1947 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1948 case Intrinsic::sin:
1949 return TargetOpcode::G_FSIN;
1950 case Intrinsic::sinh:
1951 return TargetOpcode::G_FSINH;
1952 case Intrinsic::sqrt:
1953 return TargetOpcode::G_FSQRT;
1954 case Intrinsic::tan:
1955 return TargetOpcode::G_FTAN;
1956 case Intrinsic::tanh:
1957 return TargetOpcode::G_FTANH;
1958 case Intrinsic::trunc:
1959 return TargetOpcode::G_INTRINSIC_TRUNC;
1960 case Intrinsic::readcyclecounter:
1961 return TargetOpcode::G_READCYCLECOUNTER;
1962 case Intrinsic::readsteadycounter:
1963 return TargetOpcode::G_READSTEADYCOUNTER;
1964 case Intrinsic::ptrmask:
1965 return TargetOpcode::G_PTRMASK;
1966 case Intrinsic::lrint:
1967 return TargetOpcode::G_INTRINSIC_LRINT;
1968 case Intrinsic::llrint:
1969 return TargetOpcode::G_INTRINSIC_LLRINT;
1971 case Intrinsic::vector_reduce_fmin:
1972 return TargetOpcode::G_VECREDUCE_FMIN;
1973 case Intrinsic::vector_reduce_fmax:
1974 return TargetOpcode::G_VECREDUCE_FMAX;
1975 case Intrinsic::vector_reduce_fminimum:
1976 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1977 case Intrinsic::vector_reduce_fmaximum:
1978 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1979 case Intrinsic::vector_reduce_add:
1980 return TargetOpcode::G_VECREDUCE_ADD;
1981 case Intrinsic::vector_reduce_mul:
1982 return TargetOpcode::G_VECREDUCE_MUL;
1983 case Intrinsic::vector_reduce_and:
1984 return TargetOpcode::G_VECREDUCE_AND;
1985 case Intrinsic::vector_reduce_or:
1986 return TargetOpcode::G_VECREDUCE_OR;
1987 case Intrinsic::vector_reduce_xor:
1988 return TargetOpcode::G_VECREDUCE_XOR;
1989 case Intrinsic::vector_reduce_smax:
1990 return TargetOpcode::G_VECREDUCE_SMAX;
1991 case Intrinsic::vector_reduce_smin:
1992 return TargetOpcode::G_VECREDUCE_SMIN;
1993 case Intrinsic::vector_reduce_umax:
1994 return TargetOpcode::G_VECREDUCE_UMAX;
1995 case Intrinsic::vector_reduce_umin:
1996 return TargetOpcode::G_VECREDUCE_UMIN;
1997 case Intrinsic::lround:
1998 return TargetOpcode::G_LROUND;
1999 case Intrinsic::llround:
2000 return TargetOpcode::G_LLROUND;
2001 case Intrinsic::get_fpenv:
2002 return TargetOpcode::G_GET_FPENV;
2003 case Intrinsic::get_fpmode:
2004 return TargetOpcode::G_GET_FPMODE;
2009bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2013 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2021 for (
const auto &Arg : CI.
args())
2024 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2032 case Intrinsic::experimental_constrained_fadd:
2033 return TargetOpcode::G_STRICT_FADD;
2034 case Intrinsic::experimental_constrained_fsub:
2035 return TargetOpcode::G_STRICT_FSUB;
2036 case Intrinsic::experimental_constrained_fmul:
2037 return TargetOpcode::G_STRICT_FMUL;
2038 case Intrinsic::experimental_constrained_fdiv:
2039 return TargetOpcode::G_STRICT_FDIV;
2040 case Intrinsic::experimental_constrained_frem:
2041 return TargetOpcode::G_STRICT_FREM;
2042 case Intrinsic::experimental_constrained_fma:
2043 return TargetOpcode::G_STRICT_FMA;
2044 case Intrinsic::experimental_constrained_sqrt:
2045 return TargetOpcode::G_STRICT_FSQRT;
2046 case Intrinsic::experimental_constrained_ldexp:
2047 return TargetOpcode::G_STRICT_FLDEXP;
2053bool IRTranslator::translateConstrainedFPIntrinsic(
2073std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2074 auto VRegs = getOrCreateVRegs(Arg);
2075 if (VRegs.
size() != 1)
2076 return std::nullopt;
2080 if (!VRegDef || !VRegDef->isCopy())
2081 return std::nullopt;
2085bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2090 auto *Arg = dyn_cast<Argument>(Val);
2097 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2099 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2100 <<
": expression is entry_value but "
2101 <<
"couldn't find a physical register\n");
2121 case Intrinsic::experimental_convergence_anchor:
2122 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2123 case Intrinsic::experimental_convergence_entry:
2124 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2125 case Intrinsic::experimental_convergence_loop:
2126 return TargetOpcode::CONVERGENCECTRL_LOOP;
2130bool IRTranslator::translateConvergenceControlIntrinsic(
2133 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2136 if (
ID == Intrinsic::experimental_convergence_loop) {
2138 assert(Bundle &&
"Expected a convergence control token.");
2140 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2149 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2150 if (ORE->enabled()) {
2160 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2166 case Intrinsic::lifetime_start:
2167 case Intrinsic::lifetime_end: {
2172 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2173 : TargetOpcode::LIFETIME_END;
2182 for (
const Value *V : Allocas) {
2183 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2194 case Intrinsic::dbg_declare: {
2201 case Intrinsic::dbg_label: {
2207 "Expected inlined-at fields to agree");
2212 case Intrinsic::vaend:
2216 case Intrinsic::vastart: {
2221 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*
Ptr)})
2224 ListSize, Alignment));
2227 case Intrinsic::dbg_assign:
2234 case Intrinsic::dbg_value: {
2241 case Intrinsic::uadd_with_overflow:
2242 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2243 case Intrinsic::sadd_with_overflow:
2244 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2245 case Intrinsic::usub_with_overflow:
2246 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2247 case Intrinsic::ssub_with_overflow:
2248 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2249 case Intrinsic::umul_with_overflow:
2250 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2251 case Intrinsic::smul_with_overflow:
2252 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2253 case Intrinsic::uadd_sat:
2254 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2255 case Intrinsic::sadd_sat:
2256 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2257 case Intrinsic::usub_sat:
2258 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2259 case Intrinsic::ssub_sat:
2260 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2261 case Intrinsic::ushl_sat:
2262 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2263 case Intrinsic::sshl_sat:
2264 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2265 case Intrinsic::umin:
2266 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2267 case Intrinsic::umax:
2268 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2269 case Intrinsic::smin:
2270 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2271 case Intrinsic::smax:
2272 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2273 case Intrinsic::abs:
2275 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2276 case Intrinsic::smul_fix:
2277 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2278 case Intrinsic::umul_fix:
2279 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2280 case Intrinsic::smul_fix_sat:
2281 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2282 case Intrinsic::umul_fix_sat:
2283 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2284 case Intrinsic::sdiv_fix:
2285 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2286 case Intrinsic::udiv_fix:
2287 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2288 case Intrinsic::sdiv_fix_sat:
2289 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2290 case Intrinsic::udiv_fix_sat:
2291 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2292 case Intrinsic::fmuladd: {
2294 Register Dst = getOrCreateVReg(CI);
2303 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2314 case Intrinsic::convert_from_fp16:
2320 case Intrinsic::convert_to_fp16:
2326 case Intrinsic::frexp: {
2333 case Intrinsic::memcpy_inline:
2334 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2335 case Intrinsic::memcpy:
2336 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2337 case Intrinsic::memmove:
2338 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2339 case Intrinsic::memset:
2340 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2341 case Intrinsic::eh_typeid_for: {
2348 case Intrinsic::objectsize:
2351 case Intrinsic::is_constant:
2354 case Intrinsic::stackguard:
2355 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2357 case Intrinsic::stackprotector: {
2362 getStackGuard(GuardVal, MIRBuilder);
2367 int FI = getOrCreateFrameIndex(*Slot);
2371 GuardVal, getOrCreateVReg(*Slot),
2378 case Intrinsic::stacksave: {
2379 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2382 case Intrinsic::stackrestore: {
2383 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2387 case Intrinsic::cttz:
2388 case Intrinsic::ctlz: {
2390 bool isTrailing =
ID == Intrinsic::cttz;
2391 unsigned Opcode = isTrailing
2392 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2393 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2394 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2395 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2396 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2400 case Intrinsic::invariant_start: {
2406 case Intrinsic::invariant_end:
2408 case Intrinsic::expect:
2409 case Intrinsic::annotation:
2410 case Intrinsic::ptr_annotation:
2411 case Intrinsic::launder_invariant_group:
2412 case Intrinsic::strip_invariant_group: {
2414 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2418 case Intrinsic::assume:
2419 case Intrinsic::experimental_noalias_scope_decl:
2420 case Intrinsic::var_annotation:
2421 case Intrinsic::sideeffect:
2424 case Intrinsic::read_volatile_register:
2425 case Intrinsic::read_register: {
2428 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2429 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2432 case Intrinsic::write_register: {
2434 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2435 .
addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2439 case Intrinsic::localescape: {
2447 if (isa<ConstantPointerNull>(Arg))
2450 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2466 case Intrinsic::vector_reduce_fadd:
2467 case Intrinsic::vector_reduce_fmul: {
2470 Register Dst = getOrCreateVReg(CI);
2476 Opc =
ID == Intrinsic::vector_reduce_fadd
2477 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2478 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2479 MIRBuilder.
buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2486 if (
ID == Intrinsic::vector_reduce_fadd) {
2487 Opc = TargetOpcode::G_VECREDUCE_FADD;
2488 ScalarOpc = TargetOpcode::G_FADD;
2490 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2491 ScalarOpc = TargetOpcode::G_FMUL;
2496 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2501 case Intrinsic::trap:
2502 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2503 case Intrinsic::debugtrap:
2504 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2505 case Intrinsic::ubsantrap:
2506 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2507 case Intrinsic::allow_runtime_check:
2508 case Intrinsic::allow_ubsan_check:
2509 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2512 case Intrinsic::amdgcn_cs_chain:
2513 return translateCallBase(CI, MIRBuilder);
2514 case Intrinsic::fptrunc_round: {
2519 std::optional<RoundingMode> RoundMode =
2524 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2525 {getOrCreateVReg(CI)},
2527 .addImm((
int)*RoundMode);
2531 case Intrinsic::is_fpclass: {
2536 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2537 {getOrCreateVReg(*FpValue)})
2542 case Intrinsic::set_fpenv: {
2547 case Intrinsic::reset_fpenv:
2550 case Intrinsic::set_fpmode: {
2555 case Intrinsic::reset_fpmode:
2558 case Intrinsic::vscale: {
2562 case Intrinsic::prefetch: {
2564 unsigned RW = cast<ConstantInt>(CI.
getOperand(1))->getZExtValue();
2565 unsigned Locality = cast<ConstantInt>(CI.
getOperand(2))->getZExtValue();
2566 unsigned CacheType = cast<ConstantInt>(CI.
getOperand(3))->getZExtValue();
2578 case Intrinsic::vector_interleave2:
2579 case Intrinsic::vector_deinterleave2: {
2587 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2589 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2592#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2593 case Intrinsic::INTRINSIC:
2594#include "llvm/IR/ConstrainedOps.def"
2595 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2597 case Intrinsic::experimental_convergence_anchor:
2598 case Intrinsic::experimental_convergence_entry:
2599 case Intrinsic::experimental_convergence_loop:
2600 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2605bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2612 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2617 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2620bool IRTranslator::translateCallBase(
const CallBase &CB,
2627 for (
const auto &Arg : CB.
args()) {
2629 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2633 &CB, &MIRBuilder.
getMBB(), Arg));
2639 Args.push_back(getOrCreateVRegs(*Arg));
2642 if (
auto *CI = dyn_cast<CallInst>(&CB)) {
2643 if (ORE->enabled()) {
2651 std::optional<CallLowering::PtrAuthInfo> PAI;
2660 Register DiscReg = getOrCreateVReg(*Discriminator);
2667 const auto &Token = *Bundle->Inputs[0].get();
2668 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2675 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2680 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2689 const CallInst &CI = cast<CallInst>(U);
2695 if (
F && (
F->hasDLLImportStorageClass() ||
2697 F->hasExternalWeakLinkage())))
2705 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2709 return translateInlineAsm(CI, MIRBuilder);
2714 if (
F &&
F->isIntrinsic()) {
2715 ID =
F->getIntrinsicID();
2721 return translateCallBase(CI, MIRBuilder);
2725 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2730 ResultRegs = getOrCreateVRegs(CI);
2735 if (isa<FPMathOperator>(CI))
2742 if (
ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2745 assert(CI->getBitWidth() <= 64 &&
2746 "large intrinsic immediates not handled");
2747 MIB.
addImm(CI->getSExtValue());
2749 MIB.
addFPImm(cast<ConstantFP>(Arg.value()));
2751 }
else if (
auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2752 auto *MD = MDVal->getMetadata();
2753 auto *MDN = dyn_cast<MDNode>(MD);
2755 if (
auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2763 if (VRegs.
size() > 1)
2774 DL->getABITypeAlign(
Info.memVT.getTypeForEVT(
F->getContext())));
2775 LLT MemTy =
Info.memVT.isSimple()
2777 :
LLT::scalar(
Info.memVT.getStoreSizeInBits());
2784 else if (
Info.fallbackAddressSpace)
2792 auto *Token = Bundle->Inputs[0].get();
2793 Register TokenReg = getOrCreateVReg(*Token);
2801bool IRTranslator::findUnwindDestinations(
2821 if (isa<LandingPadInst>(Pad)) {
2823 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2826 if (isa<CleanupPadInst>(Pad)) {
2829 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2830 UnwindDests.
back().first->setIsEHScopeEntry();
2831 UnwindDests.back().first->setIsEHFuncletEntry();
2834 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2836 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2837 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2839 if (IsMSVCCXX || IsCoreCLR)
2840 UnwindDests.back().first->setIsEHFuncletEntry();
2842 UnwindDests.back().first->setIsEHScopeEntry();
2844 NewEHPadBB = CatchSwitch->getUnwindDest();
2850 if (BPI && NewEHPadBB)
2852 EHPadBB = NewEHPadBB;
2857bool IRTranslator::translateInvoke(
const User &U,
2865 const Function *Fn =
I.getCalledFunction();
2872 if (
I.hasDeoptState())
2890 bool LowerInlineAsm =
I.isInlineAsm();
2891 bool NeedEHLabel =
true;
2897 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2902 if (LowerInlineAsm) {
2903 if (!translateInlineAsm(
I, MIRBuilder))
2905 }
else if (!translateCallBase(
I, MIRBuilder))
2921 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2925 &ReturnMBB = getMBB(*ReturnBB);
2927 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2928 for (
auto &UnwindDest : UnwindDests) {
2929 UnwindDest.first->setIsEHPad();
2930 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2935 assert(BeginSymbol &&
"Expected a begin symbol!");
2936 assert(EndSymbol &&
"Expected an end symbol!");
2937 MF->
addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2940 MIRBuilder.
buildBr(ReturnMBB);
2944bool IRTranslator::translateCallBr(
const User &U,
2950bool IRTranslator::translateLandingPad(
const User &U,
2974 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
2980 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
2988 for (
Type *Ty : cast<StructType>(LP.
getType())->elements())
2990 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
2999 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3007 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3008 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3013bool IRTranslator::translateAlloca(
const User &U,
3015 auto &AI = cast<AllocaInst>(U);
3021 Register Res = getOrCreateVReg(AI);
3022 int FI = getOrCreateFrameIndex(AI);
3035 if (MRI->
getType(NumElts) != IntPtrTy) {
3045 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy,
DL->getTypeAllocSize(Ty)));
3046 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3053 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3057 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3060 if (Alignment <= StackAlign)
3061 Alignment =
Align(1);
3074 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3075 {getOrCreateVReg(*
U.getOperand(0)),
3076 DL->getABITypeAlign(
U.getType()).value()});
3084 auto &UI = cast<UnreachableInst>(U);
3088 if (&UI != &BB.
front()) {
3091 if (
const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
3092 if (
Call->doesNotReturn())
3102bool IRTranslator::translateInsertElement(
const User &U,
3106 if (
auto *FVT = dyn_cast<FixedVectorType>(
U.getType());
3107 FVT && FVT->getNumElements() == 1)
3108 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3111 Register Val = getOrCreateVReg(*
U.getOperand(0));
3112 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3115 if (
auto *CI = dyn_cast<ConstantInt>(
U.getOperand(2))) {
3116 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3117 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3118 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3119 Idx = getOrCreateVReg(*NewIdxCI);
3123 Idx = getOrCreateVReg(*
U.getOperand(2));
3132bool IRTranslator::translateExtractElement(
const User &U,
3136 if (cast<FixedVectorType>(
U.getOperand(0)->getType())->getNumElements() == 1)
3137 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3140 Register Val = getOrCreateVReg(*
U.getOperand(0));
3143 if (
auto *CI = dyn_cast<ConstantInt>(
U.getOperand(1))) {
3144 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3145 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3146 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3147 Idx = getOrCreateVReg(*NewIdxCI);
3151 Idx = getOrCreateVReg(*
U.getOperand(1));
3160bool IRTranslator::translateShuffleVector(
const User &U,
3166 if (
U.getOperand(0)->getType()->isScalableTy()) {
3167 Value *Op0 =
U.getOperand(0);
3170 getOrCreateVReg(*Op0), 0);
3176 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3177 Mask = SVI->getShuffleMask();
3179 Mask = cast<ConstantExpr>(U).getShuffleMask();
3182 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3183 {getOrCreateVReg(*
U.getOperand(0)),
3184 getOrCreateVReg(*
U.getOperand(1))})
3185 .addShuffleMask(MaskAlloc);
3190 const PHINode &PI = cast<PHINode>(U);
3193 for (
auto Reg : getOrCreateVRegs(PI)) {
3194 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3198 PendingPHIs.emplace_back(&PI, std::move(Insts));
3202bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3208 auto Res = getOrCreateVRegs(
I);
3212 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3213 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3216 OldValRes, SuccessRes,
Addr, Cmp, NewVal,
3219 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3220 I.getSuccessOrdering(),
I.getFailureOrdering()));
3224bool IRTranslator::translateAtomicRMW(
const User &U,
3231 Register Val = getOrCreateVReg(*
I.getValOperand());
3233 unsigned Opcode = 0;
3234 switch (
I.getOperation()) {
3238 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3241 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3244 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3247 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3250 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3253 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3256 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3259 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3262 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3265 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3268 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3271 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3274 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3277 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3280 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3283 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3286 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3291 Opcode, Res,
Addr, Val,
3293 Flags, MRI->
getType(Val), getMemOpAlign(
I),
3294 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3299bool IRTranslator::translateFence(
const User &U,
3301 const FenceInst &Fence = cast<FenceInst>(U);
3307bool IRTranslator::translateFreeze(
const User &U,
3313 "Freeze with different source and destination type?");
3315 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3322void IRTranslator::finishPendingPhis() {
3328 for (
auto &Phi : PendingPHIs) {
3343 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3347 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3357void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3363 "Expected inlined-at fields to agree");
3367 if (!V || HasArgList) {
3374 if (
const auto *CI = dyn_cast<Constant>(V)) {
3379 if (
auto *AI = dyn_cast<AllocaInst>(V);
3384 auto ExprOperands =
Expression->getElements();
3385 auto *ExprDerefRemoved =
3391 if (translateIfEntryValueArgument(
false, V, Variable,
Expression, DL,
3394 for (
Register Reg : getOrCreateVRegs(*V)) {
3404void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3410 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3415 "Expected inlined-at fields to agree");
3416 auto AI = dyn_cast<AllocaInst>(
Address);
3421 getOrCreateFrameIndex(*AI), DL);
3425 if (translateIfEntryValueArgument(
true,
Address, Variable,
3438void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3443 assert(DLR->getLabel() &&
"Missing label");
3444 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3446 "Expected inlined-at fields to agree");
3463bool IRTranslator::translate(
const Instruction &Inst) {
3465 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3466 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3472#define HANDLE_INST(NUM, OPCODE, CLASS) \
3473 case Instruction::OPCODE: \
3474 return translate##OPCODE(Inst, *CurBuilder.get());
3475#include "llvm/IR/Instruction.def"
3484 if (
auto CurrInstDL = CurBuilder->getDL())
3485 EntryBuilder->setDebugLoc(
DebugLoc());
3487 if (
auto CI = dyn_cast<ConstantInt>(&
C))
3488 EntryBuilder->buildConstant(Reg, *CI);
3489 else if (
auto CF = dyn_cast<ConstantFP>(&
C))
3490 EntryBuilder->buildFConstant(Reg, *CF);
3491 else if (isa<UndefValue>(
C))
3492 EntryBuilder->buildUndef(Reg);
3493 else if (isa<ConstantPointerNull>(
C))
3494 EntryBuilder->buildConstant(Reg, 0);
3495 else if (
auto GV = dyn_cast<GlobalValue>(&
C))
3496 EntryBuilder->buildGlobalValue(Reg, GV);
3497 else if (
auto CPA = dyn_cast<ConstantPtrAuth>(&
C)) {
3499 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3500 EntryBuilder->buildConstantPtrAuth(Reg, CPA,
Addr, AddrDisc);
3501 }
else if (
auto CAZ = dyn_cast<ConstantAggregateZero>(&
C)) {
3502 if (!isa<FixedVectorType>(CAZ->getType()))
3505 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3507 return translateCopy(
C, *CAZ->getElementValue(0u), *EntryBuilder);
3509 for (
unsigned I = 0;
I < NumElts; ++
I) {
3510 Constant &Elt = *CAZ->getElementValue(
I);
3513 EntryBuilder->buildBuildVector(Reg, Ops);
3514 }
else if (
auto CV = dyn_cast<ConstantDataVector>(&
C)) {
3516 if (CV->getNumElements() == 1)
3517 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3519 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3520 Constant &Elt = *CV->getElementAsConstant(i);
3523 EntryBuilder->buildBuildVector(Reg, Ops);
3524 }
else if (
auto CE = dyn_cast<ConstantExpr>(&
C)) {
3525 switch(
CE->getOpcode()) {
3526#define HANDLE_INST(NUM, OPCODE, CLASS) \
3527 case Instruction::OPCODE: \
3528 return translate##OPCODE(*CE, *EntryBuilder.get());
3529#include "llvm/IR/Instruction.def"
3533 }
else if (
auto CV = dyn_cast<ConstantVector>(&
C)) {
3534 if (CV->getNumOperands() == 1)
3535 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3537 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3538 Ops.
push_back(getOrCreateVReg(*CV->getOperand(i)));
3540 EntryBuilder->buildBuildVector(Reg, Ops);
3541 }
else if (
auto *BA = dyn_cast<BlockAddress>(&
C)) {
3542 EntryBuilder->buildBlockAddress(Reg, BA);
3549bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3551 for (
auto &BTB : SL->BitTestCases) {
3554 emitBitTestHeader(BTB, BTB.Parent);
3557 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3558 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3570 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3573 NextMBB = BTB.Cases[
j + 1].TargetBB;
3574 }
else if (j + 1 == ej) {
3576 NextMBB = BTB.Default;
3579 NextMBB = BTB.Cases[
j + 1].ThisBB;
3582 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3584 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3588 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3589 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3592 BTB.Cases.pop_back();
3598 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3599 BTB.Default->getBasicBlock()};
3600 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3601 if (!BTB.ContiguousRange) {
3602 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3605 SL->BitTestCases.clear();
3607 for (
auto &JTCase : SL->JTCases) {
3609 if (!JTCase.first.Emitted)
3610 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3612 emitJumpTable(JTCase.second, JTCase.second.MBB);
3614 SL->JTCases.clear();
3616 for (
auto &SwCase : SL->SwitchCases)
3617 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3618 SL->SwitchCases.clear();
3623 bool FunctionBasedInstrumentation =
3625 SPDescriptor.
initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3645 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3649 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3654 if (FailureMBB->
empty()) {
3655 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3667 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3677 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3684 ->buildLoad(PtrMemTy, StackSlotPtr,
3690 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
3708 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
3710 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3713 {GuardVal, FnTy->getParamType(0), {
Flags}});
3716 Info.OrigArgs.push_back(GuardArgInfo);
3717 Info.CallConv = GuardCheckFn->getCallingConv();
3720 if (!CLI->
lowerCall(MIRBuilder, Info)) {
3721 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
3733 getStackGuard(Guard, *CurBuilder);
3737 Register GuardPtr = getOrCreateVReg(*IRGuard);
3740 ->buildLoad(PtrMemTy, GuardPtr,
3759 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
3769 if (!CLI->
lowerCall(*CurBuilder, Info)) {
3770 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
3780 if (
TM.getTargetTriple().isPS() ||
TM.getTargetTriple().isWasm()) {
3781 LLVM_DEBUG(
dbgs() <<
"Unhandled trap emission for stack protector fail\n");
3787void IRTranslator::finalizeFunction() {
3790 PendingPHIs.clear();
3792 FrameIndices.clear();
3793 MachinePreds.clear();
3797 EntryBuilder.reset();
3812 const auto *CI = dyn_cast<CallInst>(&
I);
3821 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3824 TPC = &getAnalysis<TargetPassConfig>();
3831 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3833 EntryBuilder->setCSEInfo(CSEInfo);
3834 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3835 CurBuilder->setCSEInfo(CSEInfo);
3837 EntryBuilder = std::make_unique<MachineIRBuilder>();
3838 CurBuilder = std::make_unique<MachineIRBuilder>();
3841 CurBuilder->setMF(*MF);
3842 EntryBuilder->setMF(*MF);
3844 DL = &
F.getDataLayout();
3845 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
3847 TM.resetTargetOptions(
F);
3851 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3852 FuncInfo.
BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3855 FuncInfo.
BPI =
nullptr;
3858 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3860 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
3863 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
3864 SL->init(*TLI,
TM, *
DL);
3866 assert(PendingPHIs.empty() &&
"stale PHIs");
3873 F.getSubprogram(), &
F.getEntryBlock());
3874 R <<
"unable to translate in big endian mode";
3879 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
3884 EntryBuilder->setMBB(*EntryBB);
3886 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3890 bool IsVarArg =
F.isVarArg();
3891 bool HasMustTailInVarArgFn =
false;
3895 auto *&
MBB = BBToMBB[&BB];
3903 if (!HasMustTailInVarArgFn)
3910 EntryBB->addSuccessor(&getMBB(
F.front()));
3914 F.getSubprogram(), &
F.getEntryBlock());
3915 R <<
"unable to lower function: " <<
ore::NV(
"Prototype",
F.getType());
3923 if (
DL->getTypeStoreSize(Arg.
getType()).isZero())
3928 if (Arg.hasSwiftErrorAttr()) {
3929 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
3936 F.getSubprogram(), &
F.getEntryBlock());
3937 R <<
"unable to lower arguments: " <<
ore::NV(
"Prototype",
F.getType());
3944 if (EnableCSE && CSEInfo)
3958 CurBuilder->setMBB(
MBB);
3959 HasTailCall =
false;
3973 translateDbgInfo(Inst, *CurBuilder.get());
3975 if (translate(Inst))
3980 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
3982 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
3983 std::string InstStrStorage;
3987 R <<
": '" << InstStr.
str() <<
"'";
3994 if (!finalizeBasicBlock(*BB,
MBB)) {
3996 BB->getTerminator()->getDebugLoc(), BB);
3997 R <<
"unable to translate basic block";
4007 finishPendingPhis();
4014 assert(EntryBB->succ_size() == 1 &&
4015 "Custom BB used for lowering should have only one successor");
4019 "LLVM-IR entry block has a predecessor!?");
4022 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4031 EntryBB->removeSuccessor(&NewEntryBB);
4036 "New entry wasn't next in the list of basic block!");
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
Legalize the Machine IR a function s Machine IR
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
StringRef getValueAsString() const
Return the attribute's value as a string.
LLVM Basic Block Representation.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
InstListType::const_iterator const_iterator
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction & back() const
Legacy analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
virtual bool enableBigEndian() const
For targets which want to use big-endian can enable it with enableBigEndian() hook.
virtual bool supportSwiftError() const
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
virtual bool fallBackToDAGISel(const MachineFunction &MF) const
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
BranchProbabilityInfo * BPI
void clear()
clear - Clear out all the function-specific state.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
This instruction compares its operands according to the predicate given to the constructor.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Indirect Branch Instruction.
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
Context object for machine code objects.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
void setAddressTakenIRBlock(BasicBlock *BB)
Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
void setHasMustTailInVarArgFunc(bool B)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
void remove(iterator MBBI)
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable in a stack slot.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static uint32_t copyFlagsFromInstruction(const Instruction &I)
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MCContext & getContext() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
void addPhysRegsUsedFromRegMask(const uint32_t *RegMask)
addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
A simple RAII based Delegate installer.
A simple RAII based Observer installer.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, bool FunctionBasedInstrumentation)
Initialize the stack protector descriptor structure for a new basic block.
MachineBasicBlock * getSuccessMBB()
void resetPerBBState()
Reset state that changes when we handle different basic blocks.
void resetPerFunctionState()
Reset state that only changes when we switch functions.
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitStackProtector() const
Returns true if all fields of the stack protector descriptor are initialized implying that we should/...
bool shouldEmitFunctionBasedCheckStackProtector() const
bool shouldEmitSDCheck(const BasicBlock &BB) const
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
bool createEntriesInEntryBlock(DebugLoc DbgLoc)
Create initial definitions of swifterror values in the entry block of the current function.
void setFunction(MachineFunction &MF)
Initialize data structures for specified new function.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
void propagateVRegs()
Propagate assigned swifterror vregs through a function, synthesizing PHI nodes when needed to maintai...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual unsigned getVaListSizeInBits(const DataLayout &DL) const
Returns the size of the platform's va_list object.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool fallBackToDAGISel(const Instruction &Inst) const
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Primary interface to the complete machine description for the target machine.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
const Triple & getTargetTriple() const
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
virtual std::unique_ptr< CSEConfigBase > getCSEConfig() const
Returns the CSEConfig object to use for the current optimization level.
virtual bool isGISelCSEEnabled() const
Check whether continuous CSE should be enabled in GISel passes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const InlineAsmLowering * getInlineAsmLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const CallLowering * getCallLowering() const
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSWindows() const
Tests whether the OS is Windows.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
TypeID
Definitions of all of the base types for the Type system.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
OutputIt copy(R &&Range, OutputIt Out)
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
unsigned succ_size(const MachineBasicBlock *BB)
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB