92#define DEBUG_TYPE "irtranslator"
98 cl::desc(
"Should enable CSE in irtranslator"),
116 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
120 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
121 R << (
" (in function: " + MF.getName() +
")").str();
123 if (TPC.isGlobalISelAbortEnabled())
140 DILocationVerifier() =
default;
141 ~DILocationVerifier() =
default;
143 const Instruction *getCurrentInst()
const {
return CurrInst; }
144 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
151 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
156 <<
" was copied to " <<
MI);
162 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc())) &&
163 "Line info was not transferred to all instructions");
186IRTranslator::allocateVRegs(
const Value &Val) {
187 auto VRegsIt = VMap.findVRegs(Val);
188 if (VRegsIt != VMap.vregs_end())
189 return *VRegsIt->second;
190 auto *Regs = VMap.getVRegs(Val);
191 auto *Offsets = VMap.getOffsets(Val);
194 Offsets->empty() ? Offsets :
nullptr);
195 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
201 auto VRegsIt = VMap.findVRegs(Val);
202 if (VRegsIt != VMap.vregs_end())
203 return *VRegsIt->second;
206 return *VMap.getVRegs(Val);
209 auto *VRegs = VMap.getVRegs(Val);
210 auto *Offsets = VMap.getOffsets(Val);
213 "Don't know how to create an empty vreg");
217 Offsets->empty() ? Offsets :
nullptr);
219 if (!isa<Constant>(Val)) {
220 for (
auto Ty : SplitTys)
227 auto &
C = cast<Constant>(Val);
229 while (
auto Elt =
C.getAggregateElement(
Idx++)) {
230 auto EltRegs = getOrCreateVRegs(*Elt);
231 llvm::copy(EltRegs, std::back_inserter(*VRegs));
234 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
236 bool Success = translate(cast<Constant>(Val), VRegs->front());
241 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
250int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
251 auto MapEntry = FrameIndices.find(&AI);
252 if (MapEntry != FrameIndices.end())
253 return MapEntry->second;
257 ElementSize * cast<ConstantInt>(AI.
getArraySize())->getZExtValue();
260 Size = std::max<uint64_t>(
Size, 1u);
262 int &FI = FrameIndices[&AI];
268 if (
const StoreInst *SI = dyn_cast<StoreInst>(&
I))
269 return SI->getAlign();
270 if (
const LoadInst *LI = dyn_cast<LoadInst>(&
I))
271 return LI->getAlign();
278 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
285 assert(
MBB &&
"BasicBlock was not encountered before");
290 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
291 MachinePreds[Edge].push_back(NewPred);
294bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
300 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
301 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
304 if (isa<Instruction>(U)) {
313bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
315 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
318 if (isa<Instruction>(U)) {
327 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
330bool IRTranslator::translateCompare(
const User &U,
332 auto *CI = dyn_cast<CmpInst>(&U);
333 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
334 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
338 cast<ConstantExpr>(U).getPredicate());
340 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1);
351 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
365 VRegs = getOrCreateVRegs(*Ret);
379void IRTranslator::emitBranchForMergedCondition(
385 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
388 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
391 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
395 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
396 CurBuilder->getDebugLoc(), TProb, FProb);
397 SL->SwitchCases.push_back(CB);
405 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
406 SL->SwitchCases.push_back(CB);
411 return I->getParent() == BB;
415void IRTranslator::findMergedConditions(
420 using namespace PatternMatch;
421 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
422 "Expected Opc to be AND/OR");
428 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
434 const Value *BOpOp0, *BOpOp1;
448 if (BOpc == Instruction::And)
449 BOpc = Instruction::Or;
450 else if (BOpc == Instruction::Or)
451 BOpc = Instruction::And;
457 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
461 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
472 if (Opc == Instruction::Or) {
493 auto NewTrueProb = TProb / 2;
494 auto NewFalseProb = TProb / 2 + FProb;
496 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
497 NewFalseProb, InvertCond);
503 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
504 Probs[1], InvertCond);
506 assert(Opc == Instruction::And &&
"Unknown merge op!");
526 auto NewTrueProb = TProb + FProb / 2;
527 auto NewFalseProb = FProb / 2;
529 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
530 NewFalseProb, InvertCond);
536 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
537 Probs[1], InvertCond);
541bool IRTranslator::shouldEmitAsBranches(
542 const std::vector<SwitchCG::CaseBlock> &Cases) {
544 if (Cases.size() != 2)
549 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
550 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
551 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
552 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
558 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
559 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
560 isa<Constant>(Cases[0].CmpRHS) &&
561 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
563 Cases[0].TrueBB == Cases[1].ThisBB)
566 Cases[0].FalseBB == Cases[1].ThisBB)
574 const BranchInst &BrInst = cast<BranchInst>(U);
575 auto &CurMBB = MIRBuilder.
getMBB();
585 CurMBB.addSuccessor(&getMBB(*Succ));
613 using namespace PatternMatch;
614 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
615 if (!TLI.isJumpExpensive() && CondI && CondI->
hasOneUse() &&
616 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
619 const Value *BOp0, *BOp1;
621 Opcode = Instruction::And;
623 Opcode = Instruction::Or;
627 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
628 getEdgeProbability(&CurMBB, Succ0MBB),
629 getEdgeProbability(&CurMBB, Succ1MBB),
631 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
634 if (shouldEmitAsBranches(SL->SwitchCases)) {
636 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
637 SL->SwitchCases.erase(SL->SwitchCases.begin());
643 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
644 MF->
erase(SL->SwitchCases[
I].ThisBB);
646 SL->SwitchCases.clear();
653 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
654 CurBuilder->getDebugLoc());
658 emitSwitchCase(CB, &CurMBB, *CurBuilder);
666 Src->addSuccessorWithoutProb(Dst);
670 Prob = getEdgeProbability(Src, Dst);
671 Src->addSuccessor(Dst, Prob);
677 const BasicBlock *SrcBB = Src->getBasicBlock();
678 const BasicBlock *DstBB = Dst->getBasicBlock();
682 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
685 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
689 using namespace SwitchCG;
694 Clusters.reserve(
SI.getNumCases());
695 for (
const auto &
I :
SI.cases()) {
697 assert(Succ &&
"Could not find successor mbb in mapping");
702 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
715 if (Clusters.empty()) {
722 SL->findJumpTables(Clusters, &SI, DefaultMBB,
nullptr,
nullptr);
723 SL->findBitTestClusters(Clusters, &SI);
726 dbgs() <<
"Case clusters: ";
727 for (
const CaseCluster &
C : Clusters) {
728 if (
C.Kind == CC_JumpTable)
730 if (
C.Kind == CC_BitTests)
733 C.Low->getValue().print(
dbgs(),
true);
734 if (
C.Low !=
C.High) {
736 C.High->getValue().print(
dbgs(),
true);
743 assert(!Clusters.empty());
747 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
748 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
752 while (!WorkList.empty()) {
753 SwitchWorkListItem
W = WorkList.pop_back_val();
754 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
763 assert(
JT.Reg != -1U &&
"Should lower JT Header first!");
785 Register SwitchOpReg = getOrCreateVReg(SValue);
787 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
795 JT.Reg = Sub.getReg(0);
806 auto Cst = getOrCreateVReg(
843 const auto *CI = dyn_cast<ConstantInt>(CB.
CmpRHS);
861 "Can only handle SLE ranges");
867 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
873 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
908 bool FallthroughUnreachable) {
909 using namespace SwitchCG;
912 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
918 CurMF->
insert(BBI, JumpMBB);
928 auto JumpProb =
I->Prob;
929 auto FallthroughProb = UnhandledProbs;
937 if (*SI == DefaultMBB) {
938 JumpProb += DefaultProb / 2;
939 FallthroughProb -= DefaultProb / 2;
944 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
949 if (FallthroughUnreachable)
950 JTH->FallthroughUnreachable =
true;
952 if (!JTH->FallthroughUnreachable)
953 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
954 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
959 JTH->HeaderBB = CurMBB;
960 JT->Default = Fallthrough;
963 if (CurMBB == SwitchMBB) {
964 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
973 bool FallthroughUnreachable,
978 using namespace SwitchCG;
981 if (
I->Low ==
I->High) {
997 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS,
I->MBB, Fallthrough,
1000 emitSwitchCase(CB, SwitchMBB, MIB);
1010 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1014 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1019 LLT MaskTy = SwitchOpTy;
1025 for (
unsigned I = 0,
E =
B.Cases.size();
I !=
E; ++
I) {
1035 if (SwitchOpTy != MaskTy)
1043 if (!
B.FallthroughUnreachable)
1044 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1045 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1049 if (!
B.FallthroughUnreachable) {
1053 RangeSub, RangeCst);
1073 if (PopCount == 1) {
1076 auto MaskTrailingZeros =
1081 }
else if (PopCount == BB.
Range) {
1083 auto MaskTrailingOnes =
1090 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne, Reg);
1094 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1101 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1103 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1121bool IRTranslator::lowerBitTestWorkItem(
1127 bool FallthroughUnreachable) {
1128 using namespace SwitchCG;
1131 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1133 for (BitTestCase &BTC : BTB->Cases)
1134 CurMF->
insert(BBI, BTC.ThisBB);
1137 BTB->Parent = CurMBB;
1138 BTB->Default = Fallthrough;
1140 BTB->DefaultProb = UnhandledProbs;
1144 if (!BTB->ContiguousRange) {
1145 BTB->Prob += DefaultProb / 2;
1146 BTB->DefaultProb -= DefaultProb / 2;
1149 if (FallthroughUnreachable)
1150 BTB->FallthroughUnreachable =
true;
1153 if (CurMBB == SwitchMBB) {
1154 emitBitTestHeader(*BTB, SwitchMBB);
1155 BTB->Emitted =
true;
1165 using namespace SwitchCG;
1178 [](
const CaseCluster &a,
const CaseCluster &b) {
1179 return a.Prob != b.Prob
1181 : a.Low->getValue().slt(b.Low->getValue());
1186 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1188 if (
I->Prob >
W.LastCluster->Prob)
1190 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1200 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1201 UnhandledProbs +=
I->Prob;
1204 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1205 bool FallthroughUnreachable =
false;
1207 if (
I ==
W.LastCluster) {
1209 Fallthrough = DefaultMBB;
1210 FallthroughUnreachable = isa<UnreachableInst>(
1214 CurMF->
insert(BBI, Fallthrough);
1216 UnhandledProbs -=
I->Prob;
1220 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1221 DefaultProb, UnhandledProbs,
I, Fallthrough,
1222 FallthroughUnreachable)) {
1230 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1231 UnhandledProbs,
I, Fallthrough,
1232 FallthroughUnreachable)) {
1239 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1240 FallthroughUnreachable, UnhandledProbs,
1241 CurMBB, MIB, SwitchMBB)) {
1248 CurMBB = Fallthrough;
1254bool IRTranslator::translateIndirectBr(
const User &U,
1268 if (!AddedSuccessors.
insert(Succ).second)
1277 if (
auto Arg = dyn_cast<Argument>(V))
1278 return Arg->hasSwiftErrorAttr();
1279 if (
auto AI = dyn_cast<AllocaInst>(V))
1285 const LoadInst &LI = cast<LoadInst>(U);
1301 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1310 TLI.getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1320 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1325 Align BaseAlign = getMemOpAlign(LI);
1349 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1352 SI.getPointerOperand());
1360 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1365 Align BaseAlign = getMemOpAlign(SI);
1369 SI.getSyncScopeID(),
SI.getOrdering());
1376 const Value *Src = U.getOperand(0);
1385 for (
auto Idx : EVI->indices())
1387 }
else if (
const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1388 for (
auto Idx : IVI->indices())
1391 for (
unsigned i = 1; i < U.getNumOperands(); ++i)
1396 DL.getIndexedOffsetInType(Src->getType(), Indices));
1399bool IRTranslator::translateExtractValue(
const User &U,
1401 const Value *Src =
U.getOperand(0);
1406 auto &DstRegs = allocateVRegs(U);
1408 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1409 DstRegs[i] = SrcRegs[
Idx++];
1414bool IRTranslator::translateInsertValue(
const User &U,
1416 const Value *Src =
U.getOperand(0);
1418 auto &DstRegs = allocateVRegs(U);
1422 auto *InsertedIt = InsertedRegs.
begin();
1424 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1425 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1426 DstRegs[i] = *InsertedIt++;
1428 DstRegs[i] = SrcRegs[i];
1434bool IRTranslator::translateSelect(
const User &U,
1436 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1442 if (
const SelectInst *SI = dyn_cast<SelectInst>(&U))
1445 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1446 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1452bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1455 auto &Regs = *VMap.getVRegs(U);
1457 Regs.push_back(Src);
1458 VMap.getOffsets(U)->push_back(0);
1467bool IRTranslator::translateBitCast(
const User &U,
1472 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1474 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1477bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1485bool IRTranslator::translateGetElementPtr(
const User &U,
1487 Value &Op0 = *
U.getOperand(0);
1488 Register BaseReg = getOrCreateVReg(Op0);
1496 unsigned VectorWidth = 0;
1500 bool WantSplatVector =
false;
1501 if (
auto *VT = dyn_cast<VectorType>(
U.getType())) {
1502 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1504 WantSplatVector = VectorWidth > 1;
1509 if (WantSplatVector && !PtrTy.
isVector()) {
1523 const Value *
Idx = GTI.getOperand();
1524 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
1525 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
1533 if (
const auto *CI = dyn_cast<ConstantInt>(
Idx)) {
1534 Offset += ElementSize * CI->getSExtValue();
1540 BaseReg = MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1547 if (IdxTy != OffsetTy) {
1548 if (!IdxTy.
isVector() && WantSplatVector) {
1559 if (ElementSize != 1) {
1565 GepOffsetReg = IdxReg;
1574 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1578 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1582bool IRTranslator::translateMemFunc(
const CallInst &CI,
1587 if (isa<UndefValue>(SrcPtr))
1592 unsigned MinPtrSize = UINT_MAX;
1593 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1594 Register SrcReg = getOrCreateVReg(**AI);
1597 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1605 if (MRI->
getType(SizeOpReg) != SizeTy)
1619 if (
auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1620 DstAlign = MCI->getDestAlign().valueOrOne();
1621 SrcAlign = MCI->getSourceAlign().valueOrOne();
1622 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1623 }
else if (
auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1624 DstAlign = MCI->getDestAlign().valueOrOne();
1625 SrcAlign = MCI->getSourceAlign().valueOrOne();
1626 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1627 }
else if (
auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1628 DstAlign = MMI->getDestAlign().valueOrOne();
1629 SrcAlign = MMI->getSourceAlign().valueOrOne();
1630 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1632 auto *MSI = cast<MemSetInst>(&CI);
1633 DstAlign = MSI->getDestAlign().valueOrOne();
1636 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1652 if (AA && CopySize &&
1663 ICall.addMemOperand(
1665 StoreFlags, 1, DstAlign, AAInfo));
1666 if (Opcode != TargetOpcode::G_MEMSET)
1673void IRTranslator::getStackGuard(
Register DstReg,
1678 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1685 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1693 MIB.setMemRefs({
MemRef});
1696bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1700 Op, {ResRegs[0], ResRegs[1]},
1706bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1708 Register Dst = getOrCreateVReg(CI);
1712 MIRBuilder.
buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1720 case Intrinsic::bswap:
1721 return TargetOpcode::G_BSWAP;
1722 case Intrinsic::bitreverse:
1723 return TargetOpcode::G_BITREVERSE;
1724 case Intrinsic::fshl:
1725 return TargetOpcode::G_FSHL;
1726 case Intrinsic::fshr:
1727 return TargetOpcode::G_FSHR;
1728 case Intrinsic::ceil:
1729 return TargetOpcode::G_FCEIL;
1730 case Intrinsic::cos:
1731 return TargetOpcode::G_FCOS;
1732 case Intrinsic::ctpop:
1733 return TargetOpcode::G_CTPOP;
1734 case Intrinsic::exp:
1735 return TargetOpcode::G_FEXP;
1736 case Intrinsic::exp2:
1737 return TargetOpcode::G_FEXP2;
1738 case Intrinsic::fabs:
1739 return TargetOpcode::G_FABS;
1740 case Intrinsic::copysign:
1741 return TargetOpcode::G_FCOPYSIGN;
1742 case Intrinsic::minnum:
1743 return TargetOpcode::G_FMINNUM;
1744 case Intrinsic::maxnum:
1745 return TargetOpcode::G_FMAXNUM;
1746 case Intrinsic::minimum:
1747 return TargetOpcode::G_FMINIMUM;
1748 case Intrinsic::maximum:
1749 return TargetOpcode::G_FMAXIMUM;
1750 case Intrinsic::canonicalize:
1751 return TargetOpcode::G_FCANONICALIZE;
1752 case Intrinsic::floor:
1753 return TargetOpcode::G_FFLOOR;
1754 case Intrinsic::fma:
1755 return TargetOpcode::G_FMA;
1756 case Intrinsic::log:
1757 return TargetOpcode::G_FLOG;
1758 case Intrinsic::log2:
1759 return TargetOpcode::G_FLOG2;
1760 case Intrinsic::log10:
1761 return TargetOpcode::G_FLOG10;
1762 case Intrinsic::nearbyint:
1763 return TargetOpcode::G_FNEARBYINT;
1764 case Intrinsic::pow:
1765 return TargetOpcode::G_FPOW;
1766 case Intrinsic::powi:
1767 return TargetOpcode::G_FPOWI;
1768 case Intrinsic::rint:
1769 return TargetOpcode::G_FRINT;
1770 case Intrinsic::round:
1771 return TargetOpcode::G_INTRINSIC_ROUND;
1772 case Intrinsic::roundeven:
1773 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1774 case Intrinsic::sin:
1775 return TargetOpcode::G_FSIN;
1776 case Intrinsic::sqrt:
1777 return TargetOpcode::G_FSQRT;
1778 case Intrinsic::trunc:
1779 return TargetOpcode::G_INTRINSIC_TRUNC;
1780 case Intrinsic::readcyclecounter:
1781 return TargetOpcode::G_READCYCLECOUNTER;
1782 case Intrinsic::ptrmask:
1783 return TargetOpcode::G_PTRMASK;
1784 case Intrinsic::lrint:
1785 return TargetOpcode::G_INTRINSIC_LRINT;
1787 case Intrinsic::vector_reduce_fmin:
1788 return TargetOpcode::G_VECREDUCE_FMIN;
1789 case Intrinsic::vector_reduce_fmax:
1790 return TargetOpcode::G_VECREDUCE_FMAX;
1791 case Intrinsic::vector_reduce_add:
1792 return TargetOpcode::G_VECREDUCE_ADD;
1793 case Intrinsic::vector_reduce_mul:
1794 return TargetOpcode::G_VECREDUCE_MUL;
1795 case Intrinsic::vector_reduce_and:
1796 return TargetOpcode::G_VECREDUCE_AND;
1797 case Intrinsic::vector_reduce_or:
1798 return TargetOpcode::G_VECREDUCE_OR;
1799 case Intrinsic::vector_reduce_xor:
1800 return TargetOpcode::G_VECREDUCE_XOR;
1801 case Intrinsic::vector_reduce_smax:
1802 return TargetOpcode::G_VECREDUCE_SMAX;
1803 case Intrinsic::vector_reduce_smin:
1804 return TargetOpcode::G_VECREDUCE_SMIN;
1805 case Intrinsic::vector_reduce_umax:
1806 return TargetOpcode::G_VECREDUCE_UMAX;
1807 case Intrinsic::vector_reduce_umin:
1808 return TargetOpcode::G_VECREDUCE_UMIN;
1809 case Intrinsic::lround:
1810 return TargetOpcode::G_LROUND;
1811 case Intrinsic::llround:
1812 return TargetOpcode::G_LLROUND;
1817bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
1821 unsigned Op = getSimpleIntrinsicOpcode(
ID);
1829 for (
const auto &
Arg : CI.
args())
1832 MIRBuilder.
buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1840 case Intrinsic::experimental_constrained_fadd:
1841 return TargetOpcode::G_STRICT_FADD;
1842 case Intrinsic::experimental_constrained_fsub:
1843 return TargetOpcode::G_STRICT_FSUB;
1844 case Intrinsic::experimental_constrained_fmul:
1845 return TargetOpcode::G_STRICT_FMUL;
1846 case Intrinsic::experimental_constrained_fdiv:
1847 return TargetOpcode::G_STRICT_FDIV;
1848 case Intrinsic::experimental_constrained_frem:
1849 return TargetOpcode::G_STRICT_FREM;
1850 case Intrinsic::experimental_constrained_fma:
1851 return TargetOpcode::G_STRICT_FMA;
1852 case Intrinsic::experimental_constrained_sqrt:
1853 return TargetOpcode::G_STRICT_FSQRT;
1859bool IRTranslator::translateConstrainedFPIntrinsic(
1884 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1885 if (ORE->enabled()) {
1895 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
1901 case Intrinsic::lifetime_start:
1902 case Intrinsic::lifetime_end: {
1907 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1908 : TargetOpcode::LIFETIME_END;
1917 for (
const Value *V : Allocas) {
1918 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1929 case Intrinsic::dbg_declare: {
1941 "Expected inlined-at fields to agree");
1942 auto AI = dyn_cast<AllocaInst>(
Address);
1956 case Intrinsic::dbg_label: {
1962 "Expected inlined-at fields to agree");
1967 case Intrinsic::vaend:
1971 case Intrinsic::vastart: {
1974 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1977 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*
Ptr)})
1980 ListSize,
Align(1)));
1983 case Intrinsic::dbg_value: {
1989 "Expected inlined-at fields to agree");
1994 }
else if (
const auto *CI = dyn_cast<Constant>(V)) {
1997 for (
Register Reg : getOrCreateVRegs(*V)) {
2007 case Intrinsic::uadd_with_overflow:
2008 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2009 case Intrinsic::sadd_with_overflow:
2010 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2011 case Intrinsic::usub_with_overflow:
2012 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2013 case Intrinsic::ssub_with_overflow:
2014 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2015 case Intrinsic::umul_with_overflow:
2016 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2017 case Intrinsic::smul_with_overflow:
2018 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2019 case Intrinsic::uadd_sat:
2020 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2021 case Intrinsic::sadd_sat:
2022 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2023 case Intrinsic::usub_sat:
2024 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2025 case Intrinsic::ssub_sat:
2026 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2027 case Intrinsic::ushl_sat:
2028 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2029 case Intrinsic::sshl_sat:
2030 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2031 case Intrinsic::umin:
2032 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2033 case Intrinsic::umax:
2034 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2035 case Intrinsic::smin:
2036 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2037 case Intrinsic::smax:
2038 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2039 case Intrinsic::abs:
2041 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2042 case Intrinsic::smul_fix:
2043 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2044 case Intrinsic::umul_fix:
2045 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2046 case Intrinsic::smul_fix_sat:
2047 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2048 case Intrinsic::umul_fix_sat:
2049 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2050 case Intrinsic::sdiv_fix:
2051 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2052 case Intrinsic::udiv_fix:
2053 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2054 case Intrinsic::sdiv_fix_sat:
2055 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2056 case Intrinsic::udiv_fix_sat:
2057 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2058 case Intrinsic::fmuladd: {
2061 Register Dst = getOrCreateVReg(CI);
2070 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2081 case Intrinsic::convert_from_fp16:
2087 case Intrinsic::convert_to_fp16:
2093 case Intrinsic::memcpy_inline:
2094 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2095 case Intrinsic::memcpy:
2096 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2097 case Intrinsic::memmove:
2098 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2099 case Intrinsic::memset:
2100 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2101 case Intrinsic::eh_typeid_for: {
2108 case Intrinsic::objectsize:
2111 case Intrinsic::is_constant:
2114 case Intrinsic::stackguard:
2115 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2117 case Intrinsic::stackprotector: {
2123 getStackGuard(GuardVal, MIRBuilder);
2128 int FI = getOrCreateFrameIndex(*Slot);
2132 GuardVal, getOrCreateVReg(*Slot),
2139 case Intrinsic::stacksave: {
2153 case Intrinsic::stackrestore: {
2167 case Intrinsic::cttz:
2168 case Intrinsic::ctlz: {
2170 bool isTrailing =
ID == Intrinsic::cttz;
2171 unsigned Opcode = isTrailing
2172 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2173 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2174 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2175 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2176 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2180 case Intrinsic::invariant_start: {
2186 case Intrinsic::invariant_end:
2188 case Intrinsic::expect:
2189 case Intrinsic::annotation:
2190 case Intrinsic::ptr_annotation:
2191 case Intrinsic::launder_invariant_group:
2192 case Intrinsic::strip_invariant_group: {
2194 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2198 case Intrinsic::assume:
2199 case Intrinsic::experimental_noalias_scope_decl:
2200 case Intrinsic::var_annotation:
2201 case Intrinsic::sideeffect:
2204 case Intrinsic::read_volatile_register:
2205 case Intrinsic::read_register: {
2208 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2209 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(
Arg)->getMetadata()));
2212 case Intrinsic::write_register: {
2214 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2215 .
addMetadata(cast<MDNode>(cast<MetadataAsValue>(
Arg)->getMetadata()))
2219 case Intrinsic::localescape: {
2227 if (isa<ConstantPointerNull>(
Arg))
2230 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(
Arg));
2246 case Intrinsic::vector_reduce_fadd:
2247 case Intrinsic::vector_reduce_fmul: {
2250 Register Dst = getOrCreateVReg(CI);
2256 Opc =
ID == Intrinsic::vector_reduce_fadd
2257 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2258 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2259 MIRBuilder.
buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2266 if (
ID == Intrinsic::vector_reduce_fadd) {
2267 Opc = TargetOpcode::G_VECREDUCE_FADD;
2268 ScalarOpc = TargetOpcode::G_FADD;
2270 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2271 ScalarOpc = TargetOpcode::G_FMUL;
2276 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2281 case Intrinsic::trap:
2282 case Intrinsic::debugtrap:
2283 case Intrinsic::ubsantrap: {
2286 if (TrapFuncName.
empty())
2289 if (
ID == Intrinsic::ubsantrap) {
2296 return CLI->
lowerCall(MIRBuilder, Info);
2298 case Intrinsic::fptrunc_round: {
2303 std::optional<RoundingMode> RoundMode =
2308 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2309 {getOrCreateVReg(CI)},
2311 .addImm((
int)*RoundMode);
2315 case Intrinsic::is_fpclass: {
2320 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2321 {getOrCreateVReg(*FpValue)})
2326#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2327 case Intrinsic::INTRINSIC:
2328#include "llvm/IR/ConstrainedOps.def"
2329 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2336bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2343 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2348 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2351bool IRTranslator::translateCallBase(
const CallBase &CB,
2358 for (
const auto &
Arg : CB.
args()) {
2360 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2370 Args.push_back(getOrCreateVRegs(*
Arg));
2373 if (
auto *CI = dyn_cast<CallInst>(&CB)) {
2374 if (ORE->enabled()) {
2386 CLI->
lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2391 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2400 const CallInst &CI = cast<CallInst>(U);
2405 if (
F && (
F->hasDLLImportStorageClass() ||
2407 F->hasExternalWeakLinkage())))
2415 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2419 return translateInlineAsm(CI, MIRBuilder);
2424 if (
F &&
F->isIntrinsic()) {
2425 ID =
F->getIntrinsicID();
2431 return translateCallBase(CI, MIRBuilder);
2435 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2440 ResultRegs = getOrCreateVRegs(CI);
2446 if (isa<FPMathOperator>(CI))
2456 assert(CI->getBitWidth() <= 64 &&
2457 "large intrinsic immediates not handled");
2458 MIB.
addImm(CI->getSExtValue());
2462 }
else if (
auto *MDVal = dyn_cast<MetadataAsValue>(
Arg.value())) {
2463 auto *MD = MDVal->getMetadata();
2464 auto *MDN = dyn_cast<MDNode>(MD);
2466 if (
auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2474 if (VRegs.
size() > 1)
2487 LLT MemTy =
Info.memVT.isSimple()
2489 :
LLT::scalar(
Info.memVT.getStoreSizeInBits());
2496 else if (
Info.fallbackAddressSpace)
2505bool IRTranslator::findUnwindDestinations(
2525 if (isa<LandingPadInst>(Pad)) {
2527 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2530 if (isa<CleanupPadInst>(Pad)) {
2533 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2534 UnwindDests.
back().first->setIsEHScopeEntry();
2535 UnwindDests.back().first->setIsEHFuncletEntry();
2538 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2540 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2541 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2543 if (IsMSVCCXX || IsCoreCLR)
2544 UnwindDests.back().first->setIsEHFuncletEntry();
2546 UnwindDests.back().first->setIsEHScopeEntry();
2548 NewEHPadBB = CatchSwitch->getUnwindDest();
2554 if (BPI && NewEHPadBB)
2556 EHPadBB = NewEHPadBB;
2561bool IRTranslator::translateInvoke(
const User &U,
2569 const Function *Fn =
I.getCalledFunction();
2587 bool LowerInlineAsm =
I.isInlineAsm();
2588 bool NeedEHLabel =
true;
2594 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2595 BeginSymbol =
Context.createTempSymbol();
2599 if (LowerInlineAsm) {
2600 if (!translateInlineAsm(
I, MIRBuilder))
2602 }
else if (!translateCallBase(
I, MIRBuilder))
2607 EndSymbol =
Context.createTempSymbol();
2618 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2622 &ReturnMBB = getMBB(*ReturnBB);
2624 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2625 for (
auto &UnwindDest : UnwindDests) {
2626 UnwindDest.first->setIsEHPad();
2627 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2632 assert(BeginSymbol &&
"Expected a begin symbol!");
2633 assert(EndSymbol &&
"Expected an end symbol!");
2634 MF->
addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2637 MIRBuilder.
buildBr(ReturnMBB);
2641bool IRTranslator::translateCallBr(
const User &U,
2647bool IRTranslator::translateLandingPad(
const User &U,
2672 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
2678 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
2686 for (
Type *Ty : cast<StructType>(LP.
getType())->elements())
2688 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
2697 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
2705 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
2706 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
2711bool IRTranslator::translateAlloca(
const User &U,
2713 auto &AI = cast<AllocaInst>(U);
2719 Register Res = getOrCreateVReg(AI);
2720 int FI = getOrCreateFrameIndex(AI);
2733 if (MRI->
getType(NumElts) != IntPtrTy) {
2744 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
2751 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2755 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
2758 if (Alignment <= StackAlign)
2759 Alignment =
Align(1);
2772 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2773 {getOrCreateVReg(*
U.getOperand(0)),
2782 auto &UI = cast<UnreachableInst>(U);
2786 if (&UI != &BB.
front()) {
2789 if (
const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2790 if (
Call->doesNotReturn())
2800bool IRTranslator::translateInsertElement(
const User &U,
2804 if (cast<FixedVectorType>(
U.getType())->getNumElements() == 1)
2805 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
2808 Register Val = getOrCreateVReg(*
U.getOperand(0));
2809 Register Elt = getOrCreateVReg(*
U.getOperand(1));
2815bool IRTranslator::translateExtractElement(
const User &U,
2819 if (cast<FixedVectorType>(
U.getOperand(0)->getType())->getNumElements() == 1)
2820 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
2823 Register Val = getOrCreateVReg(*
U.getOperand(0));
2827 if (
auto *CI = dyn_cast<ConstantInt>(
U.getOperand(1))) {
2828 if (CI->getBitWidth() != PreferredVecIdxWidth) {
2829 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
2831 Idx = getOrCreateVReg(*NewIdxCI);
2835 Idx = getOrCreateVReg(*
U.getOperand(1));
2844bool IRTranslator::translateShuffleVector(
const User &U,
2847 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2848 Mask = SVI->getShuffleMask();
2850 Mask = cast<ConstantExpr>(U).getShuffleMask();
2853 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2854 {getOrCreateVReg(*
U.getOperand(0)),
2855 getOrCreateVReg(*
U.getOperand(1))})
2856 .addShuffleMask(MaskAlloc);
2861 const PHINode &PI = cast<PHINode>(U);
2864 for (
auto Reg : getOrCreateVRegs(PI)) {
2865 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
2869 PendingPHIs.emplace_back(&PI, std::move(Insts));
2873bool IRTranslator::translateAtomicCmpXchg(
const User &U,
2880 auto Res = getOrCreateVRegs(
I);
2884 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
2885 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
2888 OldValRes, SuccessRes,
Addr, Cmp, NewVal,
2891 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
2892 I.getSuccessOrdering(),
I.getFailureOrdering()));
2896bool IRTranslator::translateAtomicRMW(
const User &U,
2904 Register Val = getOrCreateVReg(*
I.getValOperand());
2906 unsigned Opcode = 0;
2907 switch (
I.getOperation()) {
2911 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2914 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2917 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2920 Opcode = TargetOpcode::G_ATOMICRMW_AND;
2923 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2926 Opcode = TargetOpcode::G_ATOMICRMW_OR;
2929 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2932 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2935 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2938 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2941 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2944 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2947 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2950 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
2953 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
2956 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
2959 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
2964 Opcode, Res,
Addr, Val,
2966 Flags, MRI->
getType(Val), getMemOpAlign(
I),
2967 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
2972bool IRTranslator::translateFence(
const User &U,
2974 const FenceInst &Fence = cast<FenceInst>(U);
2980bool IRTranslator::translateFreeze(
const User &U,
2986 "Freeze with different source and destination type?");
2988 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
2995void IRTranslator::finishPendingPhis() {
3001 for (
auto &Phi : PendingPHIs) {
3002 const PHINode *PI = Phi.first;
3014 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3018 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3028bool IRTranslator::translate(
const Instruction &Inst) {
3030 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3037#define HANDLE_INST(NUM, OPCODE, CLASS) \
3038 case Instruction::OPCODE: \
3039 return translate##OPCODE(Inst, *CurBuilder.get());
3040#include "llvm/IR/Instruction.def"
3049 if (
auto CurrInstDL = CurBuilder->getDL())
3050 EntryBuilder->setDebugLoc(
DebugLoc());
3052 if (
auto CI = dyn_cast<ConstantInt>(&
C))
3053 EntryBuilder->buildConstant(Reg, *CI);
3054 else if (
auto CF = dyn_cast<ConstantFP>(&
C))
3055 EntryBuilder->buildFConstant(Reg, *CF);
3056 else if (isa<UndefValue>(
C))
3057 EntryBuilder->buildUndef(Reg);
3058 else if (isa<ConstantPointerNull>(
C))
3059 EntryBuilder->buildConstant(Reg, 0);
3060 else if (
auto GV = dyn_cast<GlobalValue>(&
C))
3061 EntryBuilder->buildGlobalValue(Reg, GV);
3062 else if (
auto CAZ = dyn_cast<ConstantAggregateZero>(&
C)) {
3063 if (!isa<FixedVectorType>(CAZ->getType()))
3066 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3068 return translateCopy(
C, *CAZ->getElementValue(0u), *EntryBuilder);
3070 for (
unsigned I = 0;
I < NumElts; ++
I) {
3071 Constant &Elt = *CAZ->getElementValue(
I);
3074 EntryBuilder->buildBuildVector(Reg, Ops);
3075 }
else if (
auto CV = dyn_cast<ConstantDataVector>(&
C)) {
3077 if (CV->getNumElements() == 1)
3078 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3080 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3081 Constant &Elt = *CV->getElementAsConstant(i);
3084 EntryBuilder->buildBuildVector(Reg, Ops);
3085 }
else if (
auto CE = dyn_cast<ConstantExpr>(&
C)) {
3086 switch(
CE->getOpcode()) {
3087#define HANDLE_INST(NUM, OPCODE, CLASS) \
3088 case Instruction::OPCODE: \
3089 return translate##OPCODE(*CE, *EntryBuilder.get());
3090#include "llvm/IR/Instruction.def"
3094 }
else if (
auto CV = dyn_cast<ConstantVector>(&
C)) {
3095 if (CV->getNumOperands() == 1)
3096 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3098 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3099 Ops.
push_back(getOrCreateVReg(*CV->getOperand(i)));
3101 EntryBuilder->buildBuildVector(Reg, Ops);
3102 }
else if (
auto *BA = dyn_cast<BlockAddress>(&
C)) {
3103 EntryBuilder->buildBlockAddress(Reg, BA);
3110bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3112 for (
auto &BTB : SL->BitTestCases) {
3115 emitBitTestHeader(BTB, BTB.Parent);
3118 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3119 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3131 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3134 NextMBB = BTB.Cases[
j + 1].TargetBB;
3135 }
else if (j + 1 == ej) {
3137 NextMBB = BTB.Default;
3140 NextMBB = BTB.Cases[
j + 1].ThisBB;
3143 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3145 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3149 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3150 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3153 BTB.Cases.pop_back();
3159 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3160 BTB.Default->getBasicBlock()};
3161 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3162 if (!BTB.ContiguousRange) {
3163 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3166 SL->BitTestCases.clear();
3168 for (
auto &JTCase : SL->JTCases) {
3170 if (!JTCase.first.Emitted)
3171 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3173 emitJumpTable(JTCase.second, JTCase.second.MBB);
3175 SL->JTCases.clear();
3177 for (
auto &SwCase : SL->SwitchCases)
3178 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3179 SL->SwitchCases.clear();
3185 bool FunctionBasedInstrumentation =
3187 SPDescriptor.
initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3207 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3211 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3216 if (FailureMBB->
empty()) {
3217 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3229 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3240 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3247 ->buildLoad(PtrMemTy, StackSlotPtr,
3253 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
3271 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
3273 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3276 {GuardVal, FnTy->getParamType(0), {
Flags}});
3279 Info.OrigArgs.push_back(GuardArgInfo);
3280 Info.CallConv = GuardCheckFn->getCallingConv();
3283 if (!CLI->
lowerCall(MIRBuilder, Info)) {
3284 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
3296 getStackGuard(Guard, *CurBuilder);
3300 Register GuardPtr = getOrCreateVReg(*IRGuard);
3303 ->buildLoad(PtrMemTy, GuardPtr,
3322 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
3333 if (!CLI->
lowerCall(*CurBuilder, Info)) {
3334 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
3344 if (
TM.getTargetTriple().isPS() ||
TM.getTargetTriple().isWasm()) {
3345 LLVM_DEBUG(
dbgs() <<
"Unhandled trap emission for stack protector fail\n");
3351void IRTranslator::finalizeFunction() {
3354 PendingPHIs.clear();
3356 FrameIndices.clear();
3357 MachinePreds.clear();
3361 EntryBuilder.reset();
3376 const auto *CI = dyn_cast<CallInst>(&
I);
3385 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3388 TPC = &getAnalysis<TargetPassConfig>();
3394 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3396 EntryBuilder->setCSEInfo(CSEInfo);
3397 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3398 CurBuilder->setCSEInfo(CSEInfo);
3400 EntryBuilder = std::make_unique<MachineIRBuilder>();
3401 CurBuilder = std::make_unique<MachineIRBuilder>();
3404 CurBuilder->setMF(*MF);
3405 EntryBuilder->setMF(*MF);
3407 DL = &
F.getParent()->getDataLayout();
3408 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
3410 TM.resetTargetOptions(
F);
3414 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3415 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3421 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3423 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
3428 SL = std::make_unique<GISelSwitchLowering>(
this,
FuncInfo);
3429 SL->init(TLI,
TM, *DL);
3433 assert(PendingPHIs.empty() &&
"stale PHIs");
3440 F.getSubprogram(), &
F.getEntryBlock());
3441 R <<
"unable to translate in big endian mode";
3446 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
3451 EntryBuilder->setMBB(*EntryBB);
3453 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3457 bool IsVarArg =
F.isVarArg();
3458 bool HasMustTailInVarArgFn =
false;
3462 auto *&
MBB = BBToMBB[&BB];
3470 if (!HasMustTailInVarArgFn)
3477 EntryBB->addSuccessor(&getMBB(
F.front()));
3481 F.getSubprogram(), &
F.getEntryBlock());
3482 R <<
"unable to lower function: " <<
ore::NV(
"Prototype",
F.getType());
3495 if (
Arg.hasSwiftErrorAttr()) {
3496 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
3503 F.getSubprogram(), &
F.getEntryBlock());
3504 R <<
"unable to lower arguments: " <<
ore::NV(
"Prototype",
F.getType());
3511 if (EnableCSE && CSEInfo)
3525 CurBuilder->setMBB(
MBB);
3526 HasTailCall =
false;
3538 if (translate(Inst))
3543 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
3545 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
3546 std::string InstStrStorage;
3550 R <<
": '" << InstStr.
str() <<
"'";
3557 if (!finalizeBasicBlock(*BB,
MBB)) {
3559 BB->getTerminator()->getDebugLoc(), BB);
3560 R <<
"unable to translate basic block";
3570 finishPendingPhis();
3577 assert(EntryBB->succ_size() == 1 &&
3578 "Custom BB used for lowering should have only one successor");
3582 "LLVM-IR entry block has a predecessor!?");
3585 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
3594 EntryBB->removeSuccessor(&NewEntryBB);
3599 "New entry wasn't next in the list of basic block!");
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
Statically lint checks LLVM IR
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
StringRef getValueAsString() const
Return the attribute's value as a string.
LLVM Basic Block Representation.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
InstListType::const_iterator const_iterator
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction & back() const
Legacy analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
virtual bool enableBigEndian() const
For targets which want to use big-endian can enable it with enableBigEndian() hook.
virtual bool supportSwiftError() const
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
virtual bool fallBackToDAGISel(const MachineFunction &MF) const
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
bool isLittleEndian() const
Layout endianness...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
This instruction compares its operands according to the predicate given to the constructor.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
DISubprogram * getSubprogram() const
Get the attached subprogram.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
This instruction compares its operands according to the predicate given to the constructor.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
IRTranslator(CodeGenOpt::Level OptLevel=CodeGenOpt::None)
Indirect Branch Instruction.
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
const BasicBlock * getParent() const
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
Context object for machine code objects.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
void setAddressTakenIRBlock(BasicBlock *BB)
Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
void setHasMustTailInVarArgFunc(bool B)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
void remove(iterator MBBI)
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static uint16_t copyFlagsFromInstruction(const Instruction &I)
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MCContext & getContext() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
void addPhysRegsUsedFromRegMask(const uint32_t *RegMask)
addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
A simple RAII based Delegate installer.
A simple RAII based Observer installer.
Wrapper class representing virtual and physical registers.
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, bool FunctionBasedInstrumentation)
Initialize the stack protector descriptor structure for a new basic block.
MachineBasicBlock * getSuccessMBB()
void resetPerBBState()
Reset state that changes when we handle different basic blocks.
void resetPerFunctionState()
Reset state that only changes when we switch functions.
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitStackProtector() const
Returns true if all fields of the stack protector descriptor are initialized implying that we should/...
bool shouldEmitFunctionBasedCheckStackProtector() const
bool shouldEmitSDCheck(const BasicBlock &BB) const
void copyToMachineFrameInfo(MachineFrameInfo &MFI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
uint64_t getElementOffset(unsigned Idx) const
Class to represent struct types.
bool createEntriesInEntryBlock(DebugLoc DbgLoc)
Create initial definitions of swifterror values in the entry block of the current function.
void setFunction(MachineFunction &MF)
Initialize data structures for specified new function.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
void propagateVRegs()
Propagate assigned swifterror vregs through a function, synthesizing PHI nodes when needed to maintai...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool fallBackToDAGISel(const Instruction &Inst) const
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Primary interface to the complete machine description for the target machine.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
const Triple & getTargetTriple() const
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
virtual std::unique_ptr< CSEConfigBase > getCSEConfig() const
Returns the CSEConfig object to use for the current optimization level.
virtual bool isGISelCSEEnabled() const
Check whether continuous CSE should be enabled in GISel passes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const InlineAsmLowering * getInlineAsmLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const CallLowering * getCallLowering() const
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
bool isOSWindows() const
Tests whether the OS is Windows.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
TypeID
Definitions of all of the base types for the Type system.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static IntegerType * getInt32Ty(LLVMContext &C)
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
constexpr bool isZero() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Level
Code generation optimization level.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Undef
Value of the register doesn't matter.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.