93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
117 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
121 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
122 R << (
" (in function: " + MF.getName() +
")").str();
124 if (TPC.isGlobalISelAbortEnabled())
141 DILocationVerifier() =
default;
142 ~DILocationVerifier() =
default;
144 const Instruction *getCurrentInst()
const {
return CurrInst; }
145 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
152 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
157 <<
" was copied to " <<
MI);
163 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc())) &&
164 "Line info was not transferred to all instructions");
187IRTranslator::allocateVRegs(
const Value &Val) {
188 auto VRegsIt = VMap.findVRegs(Val);
189 if (VRegsIt != VMap.vregs_end())
190 return *VRegsIt->second;
191 auto *Regs = VMap.getVRegs(Val);
192 auto *Offsets = VMap.getOffsets(Val);
195 Offsets->empty() ? Offsets :
nullptr);
196 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
202 auto VRegsIt = VMap.findVRegs(Val);
203 if (VRegsIt != VMap.vregs_end())
204 return *VRegsIt->second;
207 return *VMap.getVRegs(Val);
210 auto *VRegs = VMap.getVRegs(Val);
211 auto *Offsets = VMap.getOffsets(Val);
214 "Don't know how to create an empty vreg");
218 Offsets->empty() ? Offsets :
nullptr);
220 if (!isa<Constant>(Val)) {
221 for (
auto Ty : SplitTys)
228 auto &
C = cast<Constant>(Val);
230 while (
auto Elt =
C.getAggregateElement(
Idx++)) {
231 auto EltRegs = getOrCreateVRegs(*Elt);
232 llvm::copy(EltRegs, std::back_inserter(*VRegs));
235 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
237 bool Success = translate(cast<Constant>(Val), VRegs->front());
242 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
251int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
252 auto MapEntry = FrameIndices.find(&AI);
253 if (MapEntry != FrameIndices.end())
254 return MapEntry->second;
258 ElementSize * cast<ConstantInt>(AI.
getArraySize())->getZExtValue();
261 Size = std::max<uint64_t>(
Size, 1u);
263 int &FI = FrameIndices[&AI];
269 if (
const StoreInst *SI = dyn_cast<StoreInst>(&
I))
270 return SI->getAlign();
271 if (
const LoadInst *LI = dyn_cast<LoadInst>(&
I))
272 return LI->getAlign();
279 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
286 assert(
MBB &&
"BasicBlock was not encountered before");
291 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
292 MachinePreds[Edge].push_back(NewPred);
295bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
301 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
302 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
305 if (isa<Instruction>(U)) {
314bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
316 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
319 if (isa<Instruction>(U)) {
328 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
331bool IRTranslator::translateCompare(
const User &U,
333 auto *CI = dyn_cast<CmpInst>(&U);
334 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
335 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
339 cast<ConstantExpr>(U).getPredicate());
341 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1);
352 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
366 VRegs = getOrCreateVRegs(*Ret);
377 return CLI->
lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
380void IRTranslator::emitBranchForMergedCondition(
386 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
389 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
392 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
396 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
397 CurBuilder->getDebugLoc(), TProb, FProb);
398 SL->SwitchCases.push_back(CB);
406 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
407 SL->SwitchCases.push_back(CB);
412 return I->getParent() == BB;
416void IRTranslator::findMergedConditions(
421 using namespace PatternMatch;
422 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
423 "Expected Opc to be AND/OR");
429 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
435 const Value *BOpOp0, *BOpOp1;
449 if (BOpc == Instruction::And)
450 BOpc = Instruction::Or;
451 else if (BOpc == Instruction::Or)
452 BOpc = Instruction::And;
458 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
462 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
473 if (Opc == Instruction::Or) {
494 auto NewTrueProb = TProb / 2;
495 auto NewFalseProb = TProb / 2 + FProb;
497 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
498 NewFalseProb, InvertCond);
504 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
505 Probs[1], InvertCond);
507 assert(Opc == Instruction::And &&
"Unknown merge op!");
527 auto NewTrueProb = TProb + FProb / 2;
528 auto NewFalseProb = FProb / 2;
530 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
531 NewFalseProb, InvertCond);
537 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
538 Probs[1], InvertCond);
542bool IRTranslator::shouldEmitAsBranches(
543 const std::vector<SwitchCG::CaseBlock> &Cases) {
545 if (Cases.size() != 2)
550 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
551 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
552 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
553 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
559 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
560 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
561 isa<Constant>(Cases[0].CmpRHS) &&
562 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
564 Cases[0].TrueBB == Cases[1].ThisBB)
567 Cases[0].FalseBB == Cases[1].ThisBB)
575 const BranchInst &BrInst = cast<BranchInst>(U);
576 auto &CurMBB = MIRBuilder.
getMBB();
582 !CurMBB.isLayoutSuccessor(Succ0MBB))
587 CurMBB.addSuccessor(&getMBB(*Succ));
615 using namespace PatternMatch;
616 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
617 if (!TLI.isJumpExpensive() && CondI && CondI->
hasOneUse() &&
618 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
621 const Value *BOp0, *BOp1;
623 Opcode = Instruction::And;
625 Opcode = Instruction::Or;
629 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
630 getEdgeProbability(&CurMBB, Succ0MBB),
631 getEdgeProbability(&CurMBB, Succ1MBB),
633 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
636 if (shouldEmitAsBranches(SL->SwitchCases)) {
638 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
639 SL->SwitchCases.erase(SL->SwitchCases.begin());
645 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
646 MF->
erase(SL->SwitchCases[
I].ThisBB);
648 SL->SwitchCases.clear();
655 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
656 CurBuilder->getDebugLoc());
660 emitSwitchCase(CB, &CurMBB, *CurBuilder);
668 Src->addSuccessorWithoutProb(Dst);
672 Prob = getEdgeProbability(Src, Dst);
673 Src->addSuccessor(Dst, Prob);
679 const BasicBlock *SrcBB = Src->getBasicBlock();
680 const BasicBlock *DstBB = Dst->getBasicBlock();
684 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
691 using namespace SwitchCG;
696 Clusters.reserve(
SI.getNumCases());
697 for (
const auto &
I :
SI.cases()) {
699 assert(Succ &&
"Could not find successor mbb in mapping");
704 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
717 if (Clusters.empty()) {
724 SL->findJumpTables(Clusters, &SI, DefaultMBB,
nullptr,
nullptr);
725 SL->findBitTestClusters(Clusters, &SI);
728 dbgs() <<
"Case clusters: ";
729 for (
const CaseCluster &
C : Clusters) {
730 if (
C.Kind == CC_JumpTable)
732 if (
C.Kind == CC_BitTests)
735 C.Low->getValue().print(
dbgs(),
true);
736 if (
C.Low !=
C.High) {
738 C.High->getValue().print(
dbgs(),
true);
745 assert(!Clusters.empty());
749 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
750 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
754 while (!WorkList.empty()) {
755 SwitchWorkListItem
W = WorkList.pop_back_val();
756 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
765 assert(
JT.Reg != -1U &&
"Should lower JT Header first!");
787 Register SwitchOpReg = getOrCreateVReg(SValue);
789 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
797 JT.Reg = Sub.getReg(0);
808 auto Cst = getOrCreateVReg(
845 const auto *CI = dyn_cast<ConstantInt>(CB.
CmpRHS);
863 "Can only handle SLE ranges");
869 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
875 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
910 bool FallthroughUnreachable) {
911 using namespace SwitchCG;
914 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
920 CurMF->
insert(BBI, JumpMBB);
930 auto JumpProb =
I->Prob;
931 auto FallthroughProb = UnhandledProbs;
939 if (*SI == DefaultMBB) {
940 JumpProb += DefaultProb / 2;
941 FallthroughProb -= DefaultProb / 2;
946 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
951 if (FallthroughUnreachable)
952 JTH->FallthroughUnreachable =
true;
954 if (!JTH->FallthroughUnreachable)
955 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
956 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
961 JTH->HeaderBB = CurMBB;
962 JT->Default = Fallthrough;
965 if (CurMBB == SwitchMBB) {
966 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
975 bool FallthroughUnreachable,
980 using namespace SwitchCG;
983 if (
I->Low ==
I->High) {
999 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS,
I->MBB, Fallthrough,
1002 emitSwitchCase(CB, SwitchMBB, MIB);
1012 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1016 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1021 LLT MaskTy = SwitchOpTy;
1027 for (
unsigned I = 0,
E =
B.Cases.size();
I !=
E; ++
I) {
1037 if (SwitchOpTy != MaskTy)
1045 if (!
B.FallthroughUnreachable)
1046 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1047 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1051 if (!
B.FallthroughUnreachable) {
1055 RangeSub, RangeCst);
1075 if (PopCount == 1) {
1078 auto MaskTrailingZeros =
1083 }
else if (PopCount == BB.
Range) {
1085 auto MaskTrailingOnes =
1092 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne, Reg);
1096 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1103 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1105 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1123bool IRTranslator::lowerBitTestWorkItem(
1129 bool FallthroughUnreachable) {
1130 using namespace SwitchCG;
1133 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1135 for (BitTestCase &BTC : BTB->Cases)
1136 CurMF->
insert(BBI, BTC.ThisBB);
1139 BTB->Parent = CurMBB;
1140 BTB->Default = Fallthrough;
1142 BTB->DefaultProb = UnhandledProbs;
1146 if (!BTB->ContiguousRange) {
1147 BTB->Prob += DefaultProb / 2;
1148 BTB->DefaultProb -= DefaultProb / 2;
1151 if (FallthroughUnreachable)
1152 BTB->FallthroughUnreachable =
true;
1155 if (CurMBB == SwitchMBB) {
1156 emitBitTestHeader(*BTB, SwitchMBB);
1157 BTB->Emitted =
true;
1167 using namespace SwitchCG;
1171 if (++BBI != FuncInfo.
MF->
end())
1180 [](
const CaseCluster &a,
const CaseCluster &b) {
1181 return a.Prob != b.Prob
1183 : a.Low->getValue().slt(b.Low->getValue());
1188 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1190 if (
I->Prob >
W.LastCluster->Prob)
1192 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1202 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1203 UnhandledProbs +=
I->Prob;
1206 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1207 bool FallthroughUnreachable =
false;
1209 if (
I ==
W.LastCluster) {
1211 Fallthrough = DefaultMBB;
1212 FallthroughUnreachable = isa<UnreachableInst>(
1216 CurMF->
insert(BBI, Fallthrough);
1218 UnhandledProbs -=
I->Prob;
1222 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1223 DefaultProb, UnhandledProbs,
I, Fallthrough,
1224 FallthroughUnreachable)) {
1232 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1233 UnhandledProbs,
I, Fallthrough,
1234 FallthroughUnreachable)) {
1241 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1242 FallthroughUnreachable, UnhandledProbs,
1243 CurMBB, MIB, SwitchMBB)) {
1250 CurMBB = Fallthrough;
1256bool IRTranslator::translateIndirectBr(
const User &U,
1270 if (!AddedSuccessors.
insert(Succ).second)
1279 if (
auto Arg = dyn_cast<Argument>(V))
1280 return Arg->hasSwiftErrorAttr();
1281 if (
auto AI = dyn_cast<AllocaInst>(V))
1287 const LoadInst &LI = cast<LoadInst>(U);
1303 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1312 TLI.getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1322 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1327 Align BaseAlign = getMemOpAlign(LI);
1351 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1354 SI.getPointerOperand());
1362 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1367 Align BaseAlign = getMemOpAlign(SI);
1371 SI.getSyncScopeID(),
SI.getOrdering());
1378 const Value *Src = U.getOperand(0);
1387 for (
auto Idx : EVI->indices())
1389 }
else if (
const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1390 for (
auto Idx : IVI->indices())
1393 for (
unsigned i = 1; i < U.getNumOperands(); ++i)
1398 DL.getIndexedOffsetInType(Src->getType(), Indices));
1401bool IRTranslator::translateExtractValue(
const User &U,
1403 const Value *Src =
U.getOperand(0);
1408 auto &DstRegs = allocateVRegs(U);
1410 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1411 DstRegs[i] = SrcRegs[
Idx++];
1416bool IRTranslator::translateInsertValue(
const User &U,
1418 const Value *Src =
U.getOperand(0);
1420 auto &DstRegs = allocateVRegs(U);
1424 auto *InsertedIt = InsertedRegs.
begin();
1426 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1427 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1428 DstRegs[i] = *InsertedIt++;
1430 DstRegs[i] = SrcRegs[i];
1436bool IRTranslator::translateSelect(
const User &U,
1438 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1444 if (
const SelectInst *SI = dyn_cast<SelectInst>(&U))
1447 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1448 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1454bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1457 auto &Regs = *VMap.getVRegs(U);
1459 Regs.push_back(Src);
1460 VMap.getOffsets(U)->push_back(0);
1469bool IRTranslator::translateBitCast(
const User &U,
1476 if (isa<ConstantInt>(
U.getOperand(0)))
1477 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1479 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1482 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1485bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1493bool IRTranslator::translateGetElementPtr(
const User &U,
1495 Value &Op0 = *
U.getOperand(0);
1496 Register BaseReg = getOrCreateVReg(Op0);
1504 unsigned VectorWidth = 0;
1508 bool WantSplatVector =
false;
1509 if (
auto *VT = dyn_cast<VectorType>(
U.getType())) {
1510 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1512 WantSplatVector = VectorWidth > 1;
1517 if (WantSplatVector && !PtrTy.
isVector()) {
1531 const Value *
Idx = GTI.getOperand();
1532 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
1533 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
1541 if (
const auto *CI = dyn_cast<ConstantInt>(
Idx)) {
1542 Offset += ElementSize * CI->getSExtValue();
1548 BaseReg = MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1555 if (IdxTy != OffsetTy) {
1556 if (!IdxTy.
isVector() && WantSplatVector) {
1567 if (ElementSize != 1) {
1573 GepOffsetReg = IdxReg;
1582 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1586 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1590bool IRTranslator::translateMemFunc(
const CallInst &CI,
1595 if (isa<UndefValue>(SrcPtr))
1600 unsigned MinPtrSize = UINT_MAX;
1601 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1602 Register SrcReg = getOrCreateVReg(**AI);
1605 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1613 if (MRI->
getType(SizeOpReg) != SizeTy)
1627 if (
auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1628 DstAlign = MCI->getDestAlign().valueOrOne();
1629 SrcAlign = MCI->getSourceAlign().valueOrOne();
1630 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1631 }
else if (
auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1632 DstAlign = MCI->getDestAlign().valueOrOne();
1633 SrcAlign = MCI->getSourceAlign().valueOrOne();
1634 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1635 }
else if (
auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1636 DstAlign = MMI->getDestAlign().valueOrOne();
1637 SrcAlign = MMI->getSourceAlign().valueOrOne();
1638 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1640 auto *MSI = cast<MemSetInst>(&CI);
1641 DstAlign = MSI->getDestAlign().valueOrOne();
1644 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1660 if (AA && CopySize &&
1671 ICall.addMemOperand(
1673 StoreFlags, 1, DstAlign, AAInfo));
1674 if (Opcode != TargetOpcode::G_MEMSET)
1681void IRTranslator::getStackGuard(
Register DstReg,
1686 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1693 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1701 MIB.setMemRefs({
MemRef});
1704bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1708 Op, {ResRegs[0], ResRegs[1]},
1714bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1716 Register Dst = getOrCreateVReg(CI);
1720 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1728 case Intrinsic::bswap:
1729 return TargetOpcode::G_BSWAP;
1730 case Intrinsic::bitreverse:
1731 return TargetOpcode::G_BITREVERSE;
1732 case Intrinsic::fshl:
1733 return TargetOpcode::G_FSHL;
1734 case Intrinsic::fshr:
1735 return TargetOpcode::G_FSHR;
1736 case Intrinsic::ceil:
1737 return TargetOpcode::G_FCEIL;
1738 case Intrinsic::cos:
1739 return TargetOpcode::G_FCOS;
1740 case Intrinsic::ctpop:
1741 return TargetOpcode::G_CTPOP;
1742 case Intrinsic::exp:
1743 return TargetOpcode::G_FEXP;
1744 case Intrinsic::exp2:
1745 return TargetOpcode::G_FEXP2;
1746 case Intrinsic::exp10:
1747 return TargetOpcode::G_FEXP10;
1748 case Intrinsic::fabs:
1749 return TargetOpcode::G_FABS;
1750 case Intrinsic::copysign:
1751 return TargetOpcode::G_FCOPYSIGN;
1752 case Intrinsic::minnum:
1753 return TargetOpcode::G_FMINNUM;
1754 case Intrinsic::maxnum:
1755 return TargetOpcode::G_FMAXNUM;
1756 case Intrinsic::minimum:
1757 return TargetOpcode::G_FMINIMUM;
1758 case Intrinsic::maximum:
1759 return TargetOpcode::G_FMAXIMUM;
1760 case Intrinsic::canonicalize:
1761 return TargetOpcode::G_FCANONICALIZE;
1762 case Intrinsic::floor:
1763 return TargetOpcode::G_FFLOOR;
1764 case Intrinsic::fma:
1765 return TargetOpcode::G_FMA;
1766 case Intrinsic::log:
1767 return TargetOpcode::G_FLOG;
1768 case Intrinsic::log2:
1769 return TargetOpcode::G_FLOG2;
1770 case Intrinsic::log10:
1771 return TargetOpcode::G_FLOG10;
1772 case Intrinsic::ldexp:
1773 return TargetOpcode::G_FLDEXP;
1774 case Intrinsic::nearbyint:
1775 return TargetOpcode::G_FNEARBYINT;
1776 case Intrinsic::pow:
1777 return TargetOpcode::G_FPOW;
1778 case Intrinsic::powi:
1779 return TargetOpcode::G_FPOWI;
1780 case Intrinsic::rint:
1781 return TargetOpcode::G_FRINT;
1782 case Intrinsic::round:
1783 return TargetOpcode::G_INTRINSIC_ROUND;
1784 case Intrinsic::roundeven:
1785 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1786 case Intrinsic::sin:
1787 return TargetOpcode::G_FSIN;
1788 case Intrinsic::sqrt:
1789 return TargetOpcode::G_FSQRT;
1790 case Intrinsic::trunc:
1791 return TargetOpcode::G_INTRINSIC_TRUNC;
1792 case Intrinsic::readcyclecounter:
1793 return TargetOpcode::G_READCYCLECOUNTER;
1794 case Intrinsic::ptrmask:
1795 return TargetOpcode::G_PTRMASK;
1796 case Intrinsic::lrint:
1797 return TargetOpcode::G_INTRINSIC_LRINT;
1799 case Intrinsic::vector_reduce_fmin:
1800 return TargetOpcode::G_VECREDUCE_FMIN;
1801 case Intrinsic::vector_reduce_fmax:
1802 return TargetOpcode::G_VECREDUCE_FMAX;
1803 case Intrinsic::vector_reduce_fminimum:
1804 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1805 case Intrinsic::vector_reduce_fmaximum:
1806 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1807 case Intrinsic::vector_reduce_add:
1808 return TargetOpcode::G_VECREDUCE_ADD;
1809 case Intrinsic::vector_reduce_mul:
1810 return TargetOpcode::G_VECREDUCE_MUL;
1811 case Intrinsic::vector_reduce_and:
1812 return TargetOpcode::G_VECREDUCE_AND;
1813 case Intrinsic::vector_reduce_or:
1814 return TargetOpcode::G_VECREDUCE_OR;
1815 case Intrinsic::vector_reduce_xor:
1816 return TargetOpcode::G_VECREDUCE_XOR;
1817 case Intrinsic::vector_reduce_smax:
1818 return TargetOpcode::G_VECREDUCE_SMAX;
1819 case Intrinsic::vector_reduce_smin:
1820 return TargetOpcode::G_VECREDUCE_SMIN;
1821 case Intrinsic::vector_reduce_umax:
1822 return TargetOpcode::G_VECREDUCE_UMAX;
1823 case Intrinsic::vector_reduce_umin:
1824 return TargetOpcode::G_VECREDUCE_UMIN;
1825 case Intrinsic::lround:
1826 return TargetOpcode::G_LROUND;
1827 case Intrinsic::llround:
1828 return TargetOpcode::G_LLROUND;
1833bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
1837 unsigned Op = getSimpleIntrinsicOpcode(
ID);
1845 for (
const auto &Arg : CI.
args())
1848 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
1856 case Intrinsic::experimental_constrained_fadd:
1857 return TargetOpcode::G_STRICT_FADD;
1858 case Intrinsic::experimental_constrained_fsub:
1859 return TargetOpcode::G_STRICT_FSUB;
1860 case Intrinsic::experimental_constrained_fmul:
1861 return TargetOpcode::G_STRICT_FMUL;
1862 case Intrinsic::experimental_constrained_fdiv:
1863 return TargetOpcode::G_STRICT_FDIV;
1864 case Intrinsic::experimental_constrained_frem:
1865 return TargetOpcode::G_STRICT_FREM;
1866 case Intrinsic::experimental_constrained_fma:
1867 return TargetOpcode::G_STRICT_FMA;
1868 case Intrinsic::experimental_constrained_sqrt:
1869 return TargetOpcode::G_STRICT_FSQRT;
1870 case Intrinsic::experimental_constrained_ldexp:
1871 return TargetOpcode::G_STRICT_FLDEXP;
1877bool IRTranslator::translateConstrainedFPIntrinsic(
1900std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
1901 auto VRegs = getOrCreateVRegs(Arg);
1902 if (VRegs.
size() != 1)
1903 return std::nullopt;
1907 if (!VRegDef || !VRegDef->isCopy())
1908 return std::nullopt;
1912bool IRTranslator::translateIfEntryValueArgument(
const DbgValueInst &DebugInst,
1914 auto *Arg = dyn_cast<Argument>(DebugInst.
getValue());
1922 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
1924 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
1925 "couldn't find a physical register\n"
1926 << DebugInst <<
"\n");
1935bool IRTranslator::translateIfEntryValueArgument(
1937 auto *Arg = dyn_cast<Argument>(DebugInst.
getAddress());
1945 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
1958 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1959 if (ORE->enabled()) {
1969 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
1975 case Intrinsic::lifetime_start:
1976 case Intrinsic::lifetime_end: {
1981 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1982 : TargetOpcode::LIFETIME_END;
1991 for (
const Value *V : Allocas) {
1992 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2003 case Intrinsic::dbg_declare: {
2015 "Expected inlined-at fields to agree");
2016 auto AI = dyn_cast<AllocaInst>(
Address);
2025 if (translateIfEntryValueArgument(DI))
2034 case Intrinsic::dbg_label: {
2040 "Expected inlined-at fields to agree");
2045 case Intrinsic::vaend:
2049 case Intrinsic::vastart: {
2052 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
2055 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*
Ptr)})
2058 ListSize,
Align(1)));
2061 case Intrinsic::dbg_value: {
2067 "Expected inlined-at fields to agree");
2074 if (
const auto *CI = dyn_cast<Constant>(V)) {
2078 if (
auto *AI = dyn_cast<AllocaInst>(V);
2084 auto *ExprDerefRemoved =
2090 if (translateIfEntryValueArgument(DI, MIRBuilder))
2092 for (
Register Reg : getOrCreateVRegs(*V)) {
2101 case Intrinsic::uadd_with_overflow:
2102 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2103 case Intrinsic::sadd_with_overflow:
2104 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2105 case Intrinsic::usub_with_overflow:
2106 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2107 case Intrinsic::ssub_with_overflow:
2108 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2109 case Intrinsic::umul_with_overflow:
2110 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2111 case Intrinsic::smul_with_overflow:
2112 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2113 case Intrinsic::uadd_sat:
2114 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2115 case Intrinsic::sadd_sat:
2116 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2117 case Intrinsic::usub_sat:
2118 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2119 case Intrinsic::ssub_sat:
2120 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2121 case Intrinsic::ushl_sat:
2122 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2123 case Intrinsic::sshl_sat:
2124 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2125 case Intrinsic::umin:
2126 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2127 case Intrinsic::umax:
2128 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2129 case Intrinsic::smin:
2130 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2131 case Intrinsic::smax:
2132 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2133 case Intrinsic::abs:
2135 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2136 case Intrinsic::smul_fix:
2137 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2138 case Intrinsic::umul_fix:
2139 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2140 case Intrinsic::smul_fix_sat:
2141 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2142 case Intrinsic::umul_fix_sat:
2143 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2144 case Intrinsic::sdiv_fix:
2145 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2146 case Intrinsic::udiv_fix:
2147 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2148 case Intrinsic::sdiv_fix_sat:
2149 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2150 case Intrinsic::udiv_fix_sat:
2151 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2152 case Intrinsic::fmuladd: {
2155 Register Dst = getOrCreateVReg(CI);
2164 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2175 case Intrinsic::convert_from_fp16:
2181 case Intrinsic::convert_to_fp16:
2187 case Intrinsic::frexp: {
2194 case Intrinsic::memcpy_inline:
2195 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2196 case Intrinsic::memcpy:
2197 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2198 case Intrinsic::memmove:
2199 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2200 case Intrinsic::memset:
2201 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2202 case Intrinsic::eh_typeid_for: {
2209 case Intrinsic::objectsize:
2212 case Intrinsic::is_constant:
2215 case Intrinsic::stackguard:
2216 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2218 case Intrinsic::stackprotector: {
2224 getStackGuard(GuardVal, MIRBuilder);
2229 int FI = getOrCreateFrameIndex(*Slot);
2233 GuardVal, getOrCreateVReg(*Slot),
2240 case Intrinsic::stacksave: {
2241 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2244 case Intrinsic::stackrestore: {
2245 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2249 case Intrinsic::cttz:
2250 case Intrinsic::ctlz: {
2252 bool isTrailing =
ID == Intrinsic::cttz;
2253 unsigned Opcode = isTrailing
2254 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2255 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2256 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2257 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2258 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2262 case Intrinsic::invariant_start: {
2268 case Intrinsic::invariant_end:
2270 case Intrinsic::expect:
2271 case Intrinsic::annotation:
2272 case Intrinsic::ptr_annotation:
2273 case Intrinsic::launder_invariant_group:
2274 case Intrinsic::strip_invariant_group: {
2276 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2280 case Intrinsic::assume:
2281 case Intrinsic::experimental_noalias_scope_decl:
2282 case Intrinsic::var_annotation:
2283 case Intrinsic::sideeffect:
2286 case Intrinsic::read_volatile_register:
2287 case Intrinsic::read_register: {
2290 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2291 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2294 case Intrinsic::write_register: {
2296 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2297 .
addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2301 case Intrinsic::localescape: {
2309 if (isa<ConstantPointerNull>(Arg))
2312 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2328 case Intrinsic::vector_reduce_fadd:
2329 case Intrinsic::vector_reduce_fmul: {
2332 Register Dst = getOrCreateVReg(CI);
2338 Opc =
ID == Intrinsic::vector_reduce_fadd
2339 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2340 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2341 MIRBuilder.
buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2348 if (
ID == Intrinsic::vector_reduce_fadd) {
2349 Opc = TargetOpcode::G_VECREDUCE_FADD;
2350 ScalarOpc = TargetOpcode::G_FADD;
2352 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2353 ScalarOpc = TargetOpcode::G_FMUL;
2358 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2363 case Intrinsic::trap:
2364 case Intrinsic::debugtrap:
2365 case Intrinsic::ubsantrap: {
2368 if (TrapFuncName.
empty())
2371 if (
ID == Intrinsic::ubsantrap) {
2378 return CLI->
lowerCall(MIRBuilder, Info);
2380 case Intrinsic::fptrunc_round: {
2385 std::optional<RoundingMode> RoundMode =
2390 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2391 {getOrCreateVReg(CI)},
2393 .addImm((
int)*RoundMode);
2397 case Intrinsic::is_fpclass: {
2402 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2403 {getOrCreateVReg(*FpValue)})
2408#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2409 case Intrinsic::INTRINSIC:
2410#include "llvm/IR/ConstrainedOps.def"
2411 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2418bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2425 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2430 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2433bool IRTranslator::translateCallBase(
const CallBase &CB,
2440 for (
const auto &Arg : CB.
args()) {
2442 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2446 &CB, &MIRBuilder.
getMBB(), Arg));
2452 Args.push_back(getOrCreateVRegs(*Arg));
2455 if (
auto *CI = dyn_cast<CallInst>(&CB)) {
2456 if (ORE->enabled()) {
2468 CLI->
lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2473 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2482 const CallInst &CI = cast<CallInst>(U);
2488 if (
F && (
F->hasDLLImportStorageClass() ||
2490 F->hasExternalWeakLinkage())))
2498 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2502 return translateInlineAsm(CI, MIRBuilder);
2507 if (
F &&
F->isIntrinsic()) {
2508 ID =
F->getIntrinsicID();
2514 return translateCallBase(CI, MIRBuilder);
2518 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2523 ResultRegs = getOrCreateVRegs(CI);
2528 if (isa<FPMathOperator>(CI))
2535 if (
ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2538 assert(CI->getBitWidth() <= 64 &&
2539 "large intrinsic immediates not handled");
2540 MIB.
addImm(CI->getSExtValue());
2542 MIB.
addFPImm(cast<ConstantFP>(Arg.value()));
2544 }
else if (
auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2545 auto *MD = MDVal->getMetadata();
2546 auto *MDN = dyn_cast<MDNode>(MD);
2548 if (
auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2556 if (VRegs.
size() > 1)
2569 LLT MemTy =
Info.memVT.isSimple()
2571 :
LLT::scalar(
Info.memVT.getStoreSizeInBits());
2578 else if (
Info.fallbackAddressSpace)
2587bool IRTranslator::findUnwindDestinations(
2607 if (isa<LandingPadInst>(Pad)) {
2609 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2612 if (isa<CleanupPadInst>(Pad)) {
2615 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2616 UnwindDests.
back().first->setIsEHScopeEntry();
2617 UnwindDests.back().first->setIsEHFuncletEntry();
2620 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2622 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2623 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2625 if (IsMSVCCXX || IsCoreCLR)
2626 UnwindDests.back().first->setIsEHFuncletEntry();
2628 UnwindDests.back().first->setIsEHScopeEntry();
2630 NewEHPadBB = CatchSwitch->getUnwindDest();
2636 if (BPI && NewEHPadBB)
2638 EHPadBB = NewEHPadBB;
2643bool IRTranslator::translateInvoke(
const User &U,
2651 const Function *Fn =
I.getCalledFunction();
2676 bool LowerInlineAsm =
I.isInlineAsm();
2677 bool NeedEHLabel =
true;
2683 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2684 BeginSymbol =
Context.createTempSymbol();
2688 if (LowerInlineAsm) {
2689 if (!translateInlineAsm(
I, MIRBuilder))
2691 }
else if (!translateCallBase(
I, MIRBuilder))
2696 EndSymbol =
Context.createTempSymbol();
2707 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2711 &ReturnMBB = getMBB(*ReturnBB);
2713 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2714 for (
auto &UnwindDest : UnwindDests) {
2715 UnwindDest.first->setIsEHPad();
2716 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2721 assert(BeginSymbol &&
"Expected a begin symbol!");
2722 assert(EndSymbol &&
"Expected an end symbol!");
2723 MF->
addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2726 MIRBuilder.
buildBr(ReturnMBB);
2730bool IRTranslator::translateCallBr(
const User &U,
2736bool IRTranslator::translateLandingPad(
const User &U,
2761 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
2767 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
2775 for (
Type *Ty : cast<StructType>(LP.
getType())->elements())
2777 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
2786 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
2794 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
2795 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
2800bool IRTranslator::translateAlloca(
const User &U,
2802 auto &AI = cast<AllocaInst>(U);
2808 Register Res = getOrCreateVReg(AI);
2809 int FI = getOrCreateFrameIndex(AI);
2822 if (MRI->
getType(NumElts) != IntPtrTy) {
2833 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
2840 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2844 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
2847 if (Alignment <= StackAlign)
2848 Alignment =
Align(1);
2861 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2862 {getOrCreateVReg(*
U.getOperand(0)),
2871 auto &UI = cast<UnreachableInst>(U);
2875 if (&UI != &BB.
front()) {
2878 if (
const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2879 if (
Call->doesNotReturn())
2889bool IRTranslator::translateInsertElement(
const User &U,
2893 if (cast<FixedVectorType>(
U.getType())->getNumElements() == 1)
2894 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
2897 Register Val = getOrCreateVReg(*
U.getOperand(0));
2898 Register Elt = getOrCreateVReg(*
U.getOperand(1));
2904bool IRTranslator::translateExtractElement(
const User &U,
2908 if (cast<FixedVectorType>(
U.getOperand(0)->getType())->getNumElements() == 1)
2909 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
2912 Register Val = getOrCreateVReg(*
U.getOperand(0));
2916 if (
auto *CI = dyn_cast<ConstantInt>(
U.getOperand(1))) {
2917 if (CI->getBitWidth() != PreferredVecIdxWidth) {
2918 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
2920 Idx = getOrCreateVReg(*NewIdxCI);
2924 Idx = getOrCreateVReg(*
U.getOperand(1));
2933bool IRTranslator::translateShuffleVector(
const User &U,
2936 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2937 Mask = SVI->getShuffleMask();
2939 Mask = cast<ConstantExpr>(U).getShuffleMask();
2942 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2943 {getOrCreateVReg(*
U.getOperand(0)),
2944 getOrCreateVReg(*
U.getOperand(1))})
2945 .addShuffleMask(MaskAlloc);
2950 const PHINode &PI = cast<PHINode>(U);
2953 for (
auto Reg : getOrCreateVRegs(PI)) {
2954 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
2958 PendingPHIs.emplace_back(&PI, std::move(Insts));
2962bool IRTranslator::translateAtomicCmpXchg(
const User &U,
2969 auto Res = getOrCreateVRegs(
I);
2973 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
2974 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
2977 OldValRes, SuccessRes,
Addr, Cmp, NewVal,
2980 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
2981 I.getSuccessOrdering(),
I.getFailureOrdering()));
2985bool IRTranslator::translateAtomicRMW(
const User &U,
2993 Register Val = getOrCreateVReg(*
I.getValOperand());
2995 unsigned Opcode = 0;
2996 switch (
I.getOperation()) {
3000 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3003 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3006 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3009 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3012 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3015 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3018 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3021 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3024 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3027 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3030 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3033 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3036 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3039 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3042 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3045 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3048 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3053 Opcode, Res,
Addr, Val,
3055 Flags, MRI->
getType(Val), getMemOpAlign(
I),
3056 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3061bool IRTranslator::translateFence(
const User &U,
3063 const FenceInst &Fence = cast<FenceInst>(U);
3069bool IRTranslator::translateFreeze(
const User &U,
3075 "Freeze with different source and destination type?");
3077 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3084void IRTranslator::finishPendingPhis() {
3090 for (
auto &Phi : PendingPHIs) {
3103 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3107 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3117bool IRTranslator::translate(
const Instruction &Inst) {
3119 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3126#define HANDLE_INST(NUM, OPCODE, CLASS) \
3127 case Instruction::OPCODE: \
3128 return translate##OPCODE(Inst, *CurBuilder.get());
3129#include "llvm/IR/Instruction.def"
3138 if (
auto CurrInstDL = CurBuilder->getDL())
3139 EntryBuilder->setDebugLoc(
DebugLoc());
3141 if (
auto CI = dyn_cast<ConstantInt>(&
C))
3142 EntryBuilder->buildConstant(Reg, *CI);
3143 else if (
auto CF = dyn_cast<ConstantFP>(&
C))
3144 EntryBuilder->buildFConstant(Reg, *CF);
3145 else if (isa<UndefValue>(
C))
3146 EntryBuilder->buildUndef(Reg);
3147 else if (isa<ConstantPointerNull>(
C))
3148 EntryBuilder->buildConstant(Reg, 0);
3149 else if (
auto GV = dyn_cast<GlobalValue>(&
C))
3150 EntryBuilder->buildGlobalValue(Reg, GV);
3151 else if (
auto CAZ = dyn_cast<ConstantAggregateZero>(&
C)) {
3152 if (!isa<FixedVectorType>(CAZ->getType()))
3155 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3157 return translateCopy(
C, *CAZ->getElementValue(0u), *EntryBuilder);
3159 for (
unsigned I = 0;
I < NumElts; ++
I) {
3160 Constant &Elt = *CAZ->getElementValue(
I);
3163 EntryBuilder->buildBuildVector(Reg, Ops);
3164 }
else if (
auto CV = dyn_cast<ConstantDataVector>(&
C)) {
3166 if (CV->getNumElements() == 1)
3167 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3169 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3170 Constant &Elt = *CV->getElementAsConstant(i);
3173 EntryBuilder->buildBuildVector(Reg, Ops);
3174 }
else if (
auto CE = dyn_cast<ConstantExpr>(&
C)) {
3175 switch(
CE->getOpcode()) {
3176#define HANDLE_INST(NUM, OPCODE, CLASS) \
3177 case Instruction::OPCODE: \
3178 return translate##OPCODE(*CE, *EntryBuilder.get());
3179#include "llvm/IR/Instruction.def"
3183 }
else if (
auto CV = dyn_cast<ConstantVector>(&
C)) {
3184 if (CV->getNumOperands() == 1)
3185 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3187 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3188 Ops.
push_back(getOrCreateVReg(*CV->getOperand(i)));
3190 EntryBuilder->buildBuildVector(Reg, Ops);
3191 }
else if (
auto *BA = dyn_cast<BlockAddress>(&
C)) {
3192 EntryBuilder->buildBlockAddress(Reg, BA);
3199bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3201 for (
auto &BTB : SL->BitTestCases) {
3204 emitBitTestHeader(BTB, BTB.Parent);
3207 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3208 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3220 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3223 NextMBB = BTB.Cases[
j + 1].TargetBB;
3224 }
else if (j + 1 == ej) {
3226 NextMBB = BTB.Default;
3229 NextMBB = BTB.Cases[
j + 1].ThisBB;
3232 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3234 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3238 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3239 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3242 BTB.Cases.pop_back();
3248 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3249 BTB.Default->getBasicBlock()};
3250 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3251 if (!BTB.ContiguousRange) {
3252 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3255 SL->BitTestCases.clear();
3257 for (
auto &JTCase : SL->JTCases) {
3259 if (!JTCase.first.Emitted)
3260 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3262 emitJumpTable(JTCase.second, JTCase.second.MBB);
3264 SL->JTCases.clear();
3266 for (
auto &SwCase : SL->SwitchCases)
3267 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3268 SL->SwitchCases.clear();
3274 bool FunctionBasedInstrumentation =
3276 SPDescriptor.
initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3296 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3300 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3305 if (FailureMBB->
empty()) {
3306 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3318 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3329 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3336 ->buildLoad(PtrMemTy, StackSlotPtr,
3342 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
3360 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
3362 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3365 {GuardVal, FnTy->getParamType(0), {
Flags}});
3368 Info.OrigArgs.push_back(GuardArgInfo);
3369 Info.CallConv = GuardCheckFn->getCallingConv();
3372 if (!CLI->
lowerCall(MIRBuilder, Info)) {
3373 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
3385 getStackGuard(Guard, *CurBuilder);
3389 Register GuardPtr = getOrCreateVReg(*IRGuard);
3392 ->buildLoad(PtrMemTy, GuardPtr,
3411 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
3422 if (!CLI->
lowerCall(*CurBuilder, Info)) {
3423 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
3433 if (
TM.getTargetTriple().isPS() ||
TM.getTargetTriple().isWasm()) {
3434 LLVM_DEBUG(
dbgs() <<
"Unhandled trap emission for stack protector fail\n");
3440void IRTranslator::finalizeFunction() {
3443 PendingPHIs.clear();
3445 FrameIndices.clear();
3446 MachinePreds.clear();
3450 EntryBuilder.reset();
3465 const auto *CI = dyn_cast<CallInst>(&
I);
3474 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3477 TPC = &getAnalysis<TargetPassConfig>();
3483 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3485 EntryBuilder->setCSEInfo(CSEInfo);
3486 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3487 CurBuilder->setCSEInfo(CSEInfo);
3489 EntryBuilder = std::make_unique<MachineIRBuilder>();
3490 CurBuilder = std::make_unique<MachineIRBuilder>();
3493 CurBuilder->setMF(*MF);
3494 EntryBuilder->setMF(*MF);
3496 DL = &
F.getParent()->getDataLayout();
3497 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
3499 TM.resetTargetOptions(
F);
3503 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3504 FuncInfo.
BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3507 FuncInfo.
BPI =
nullptr;
3510 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3512 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
3517 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
3518 SL->init(TLI,
TM, *DL);
3522 assert(PendingPHIs.empty() &&
"stale PHIs");
3529 F.getSubprogram(), &
F.getEntryBlock());
3530 R <<
"unable to translate in big endian mode";
3535 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
3540 EntryBuilder->setMBB(*EntryBB);
3542 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3546 bool IsVarArg =
F.isVarArg();
3547 bool HasMustTailInVarArgFn =
false;
3551 auto *&
MBB = BBToMBB[&BB];
3559 if (!HasMustTailInVarArgFn)
3566 EntryBB->addSuccessor(&getMBB(
F.front()));
3570 F.getSubprogram(), &
F.getEntryBlock());
3571 R <<
"unable to lower function: " <<
ore::NV(
"Prototype",
F.getType());
3584 if (Arg.hasSwiftErrorAttr()) {
3585 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
3592 F.getSubprogram(), &
F.getEntryBlock());
3593 R <<
"unable to lower arguments: " <<
ore::NV(
"Prototype",
F.getType());
3600 if (EnableCSE && CSEInfo)
3614 CurBuilder->setMBB(
MBB);
3615 HasTailCall =
false;
3627 if (translate(Inst))
3632 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
3634 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
3635 std::string InstStrStorage;
3639 R <<
": '" << InstStr.
str() <<
"'";
3646 if (!finalizeBasicBlock(*BB,
MBB)) {
3648 BB->getTerminator()->getDebugLoc(), BB);
3649 R <<
"unable to translate basic block";
3659 finishPendingPhis();
3666 assert(EntryBB->succ_size() == 1 &&
3667 "Custom BB used for lowering should have only one successor");
3671 "LLVM-IR entry block has a predecessor!?");
3674 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
3683 EntryBB->removeSuccessor(&NewEntryBB);
3688 "New entry wasn't next in the list of basic block!");
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
Legalize the Machine IR a function s Machine IR
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
StringRef getValueAsString() const
Return the attribute's value as a string.
LLVM Basic Block Representation.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
InstListType::const_iterator const_iterator
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction & back() const
Legacy analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
virtual bool enableBigEndian() const
For targets which want to use big-endian can enable it with enableBigEndian() hook.
virtual bool supportSwiftError() const
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
virtual bool fallBackToDAGISel(const MachineFunction &MF) const
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
bool isLittleEndian() const
Layout endianness...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
This instruction compares its operands according to the predicate given to the constructor.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
BranchProbabilityInfo * BPI
void clear()
clear - Clear out all the function-specific state.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
DISubprogram * getSubprogram() const
Get the attached subprogram.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
This instruction compares its operands according to the predicate given to the constructor.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Indirect Branch Instruction.
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
const BasicBlock * getParent() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
Context object for machine code objects.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
void setAddressTakenIRBlock(BasicBlock *BB)
Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
void setHasMustTailInVarArgFunc(bool B)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
void remove(iterator MBBI)
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable in a stack slot.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static uint32_t copyFlagsFromInstruction(const Instruction &I)
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MCContext & getContext() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
void addPhysRegsUsedFromRegMask(const uint32_t *RegMask)
addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
A simple RAII based Delegate installer.
A simple RAII based Observer installer.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, bool FunctionBasedInstrumentation)
Initialize the stack protector descriptor structure for a new basic block.
MachineBasicBlock * getSuccessMBB()
void resetPerBBState()
Reset state that changes when we handle different basic blocks.
void resetPerFunctionState()
Reset state that only changes when we switch functions.
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitStackProtector() const
Returns true if all fields of the stack protector descriptor are initialized implying that we should/...
bool shouldEmitFunctionBasedCheckStackProtector() const
bool shouldEmitSDCheck(const BasicBlock &BB) const
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
bool createEntriesInEntryBlock(DebugLoc DbgLoc)
Create initial definitions of swifterror values in the entry block of the current function.
void setFunction(MachineFunction &MF)
Initialize data structures for specified new function.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
void propagateVRegs()
Propagate assigned swifterror vregs through a function, synthesizing PHI nodes when needed to maintai...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool fallBackToDAGISel(const Instruction &Inst) const
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Primary interface to the complete machine description for the target machine.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
const Triple & getTargetTriple() const
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
virtual std::unique_ptr< CSEConfigBase > getCSEConfig() const
Returns the CSEConfig object to use for the current optimization level.
virtual bool isGISelCSEEnabled() const
Check whether continuous CSE should be enabled in GISel passes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const InlineAsmLowering * getInlineAsmLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const CallLowering * getCallLowering() const
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSWindows() const
Tests whether the OS is Windows.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
TypeID
Definitions of all of the base types for the Type system.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static IntegerType * getInt32Ty(LLVMContext &C)
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
constexpr bool isZero() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)