64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (
" (in function: " + MF.getName() +
")").str();
125 if (IsGlobalISelAbortEnabled)
142 DILocationVerifier() =
default;
143 ~DILocationVerifier()
override =
default;
145 const Instruction *getCurrentInst()
const {
return CurrInst; }
146 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
148 void erasingInstr(MachineInstr &
MI)
override {}
149 void changingInstr(MachineInstr &
MI)
override {}
150 void changedInstr(MachineInstr &
MI)
override {}
152 void createdInstr(MachineInstr &
MI)
override {
153 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
158 <<
" was copied to " <<
MI);
164 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
165 (
MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
190IRTranslator::ValueToVRegInfo::VRegListT &
191IRTranslator::allocateVRegs(
const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
199 Offsets->empty() ? Offsets :
nullptr);
200 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
211 return *VMap.getVRegs(Val);
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
219 "Don't know how to create an empty vreg");
223 Offsets->empty() ? Offsets :
nullptr);
226 for (
auto Ty : SplitTys)
235 while (
auto Elt =
C.getAggregateElement(Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
240 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
241 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
244 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
245 MF->getFunction().getSubprogram(),
246 &MF->getFunction().getEntryBlock());
247 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
256int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
257 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
259 return MapEntry->second;
265 Size = std::max<uint64_t>(
Size, 1u);
267 int &FI = MapEntry->second;
268 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
275 MF->getSubtarget().getFrameLowering()->getStackIDForScalableVectors();
276 MF->getFrameInfo().setStackID(FI, StackID);
284 return SI->getAlign();
286 return LI->getAlign();
292 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
293 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
299 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
300 assert(
MBB &&
"BasicBlock was not encountered before");
305 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
306 MachinePreds[
Edge].push_back(NewPred);
317 return U.getType()->getScalarType()->isBFloatTy() ||
319 return V->getType()->getScalarType()->isBFloatTy();
323bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
332 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
333 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
345bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
350 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
362 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
365bool IRTranslator::translateCompare(
const User &U,
371 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
372 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
377 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
385 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
393 if (Ret && DL->getTypeStoreSize(Ret->
getType()).isZero())
398 VRegs = getOrCreateVRegs(*Ret);
401 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
402 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
403 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
409 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
412void IRTranslator::emitBranchForMergedCondition(
421 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
424 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
427 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
428 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
429 CurBuilder->getDebugLoc(), TProb, FProb);
430 SL->SwitchCases.push_back(CB);
436 SwitchCG::CaseBlock CB(
438 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
439 SL->SwitchCases.push_back(CB);
444 return I->getParent() == BB;
448void IRTranslator::findMergedConditions(
453 using namespace PatternMatch;
454 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
455 "Expected Opc to be AND/OR");
461 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
467 const Value *BOpOp0, *BOpOp1;
481 if (BOpc == Instruction::And)
482 BOpc = Instruction::Or;
483 else if (BOpc == Instruction::Or)
484 BOpc = Instruction::And;
490 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
494 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
501 MachineBasicBlock *TmpBB =
505 if (
Opc == Instruction::Or) {
526 auto NewTrueProb = TProb / 2;
527 auto NewFalseProb = TProb / 2 + FProb;
529 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
530 NewFalseProb, InvertCond);
536 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
537 Probs[1], InvertCond);
539 assert(
Opc == Instruction::And &&
"Unknown merge op!");
559 auto NewTrueProb = TProb + FProb / 2;
560 auto NewFalseProb = FProb / 2;
562 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
563 NewFalseProb, InvertCond);
569 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
570 Probs[1], InvertCond);
574bool IRTranslator::shouldEmitAsBranches(
575 const std::vector<SwitchCG::CaseBlock> &Cases) {
577 if (Cases.size() != 2)
582 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
583 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
584 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
585 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
591 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
592 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
596 Cases[0].TrueBB == Cases[1].ThisBB)
599 Cases[0].FalseBB == Cases[1].ThisBB)
606bool IRTranslator::translateUncondBr(
const User &U,
609 auto &CurMBB = MIRBuilder.
getMBB();
617 for (
const BasicBlock *Succ :
successors(&BrInst))
618 CurMBB.addSuccessor(&getMBB(*Succ));
622bool IRTranslator::translateCondBr(
const User &U,
625 auto &CurMBB = MIRBuilder.
getMBB();
631 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
650 using namespace PatternMatch;
652 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
653 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
656 const Value *BOp0, *BOp1;
658 Opcode = Instruction::And;
660 Opcode = Instruction::Or;
664 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
665 getEdgeProbability(&CurMBB, Succ0MBB),
666 getEdgeProbability(&CurMBB, Succ1MBB),
668 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
671 if (shouldEmitAsBranches(SL->SwitchCases)) {
673 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
674 SL->SwitchCases.erase(SL->SwitchCases.begin());
680 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
681 MF->erase(SL->SwitchCases[
I].ThisBB);
683 SL->SwitchCases.clear();
690 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
691 CurBuilder->getDebugLoc());
695 emitSwitchCase(CB, &CurMBB, *CurBuilder);
703 Src->addSuccessorWithoutProb(Dst);
707 Prob = getEdgeProbability(Src, Dst);
708 Src->addSuccessor(Dst, Prob);
714 const BasicBlock *SrcBB = Src->getBasicBlock();
715 const BasicBlock *DstBB = Dst->getBasicBlock();
719 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
720 return BranchProbability(1, SuccSize);
722 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
726 using namespace SwitchCG;
729 BranchProbabilityInfo *BPI = FuncInfo.BPI;
731 Clusters.reserve(
SI.getNumCases());
732 for (
const auto &
I :
SI.cases()) {
733 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
734 assert(Succ &&
"Could not find successor mbb in mapping");
735 const ConstantInt *CaseVal =
I.getCaseValue();
736 BranchProbability Prob =
738 : BranchProbability(1,
SI.getNumCases() + 1);
739 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
742 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
749 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
752 if (Clusters.empty()) {
759 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
760 SL->findBitTestClusters(Clusters, &SI);
763 dbgs() <<
"Case clusters: ";
764 for (
const CaseCluster &
C : Clusters) {
765 if (
C.Kind == CC_JumpTable)
767 if (
C.Kind == CC_BitTests)
770 C.Low->getValue().print(
dbgs(),
true);
771 if (
C.Low !=
C.High) {
773 C.High->getValue().print(
dbgs(),
true);
780 assert(!Clusters.empty());
784 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
785 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
787 while (!WorkList.empty()) {
788 SwitchWorkListItem
W = WorkList.pop_back_val();
790 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
792 if (NumClusters > 3 &&
795 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
799 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
809 using namespace SwitchCG;
810 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
811 "Clusters not sorted?");
812 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
814 auto [LastLeft, FirstRight, LeftProb, RightProb] =
815 SL->computeSplitWorkItemInfo(W);
820 assert(PivotCluster >
W.FirstCluster);
821 assert(PivotCluster <=
W.LastCluster);
826 const ConstantInt *Pivot = PivotCluster->Low;
835 MachineBasicBlock *LeftMBB;
836 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
837 FirstLeft->Low ==
W.GE &&
838 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
839 LeftMBB = FirstLeft->MBB;
841 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
842 FuncInfo.MF->
insert(BBI, LeftMBB);
844 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
850 MachineBasicBlock *RightMBB;
851 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
852 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
853 RightMBB = FirstRight->MBB;
855 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
856 FuncInfo.MF->
insert(BBI, RightMBB);
858 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
866 if (
W.MBB == SwitchMBB)
867 emitSwitchCase(CB, SwitchMBB, MIB);
869 SL->SwitchCases.push_back(CB);
875 assert(JT.
Reg &&
"Should lower JT Header first!");
890 MachineIRBuilder MIB(*HeaderBB->
getParent());
897 Register SwitchOpReg = getOrCreateVReg(SValue);
899 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
904 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
918 auto Cst = getOrCreateVReg(
959 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
973 "Can only handle SLE ranges");
984 const LLT CmpTy = MRI->getType(CmpOpReg);
985 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
1020 bool FallthroughUnreachable) {
1021 using namespace SwitchCG;
1022 MachineFunction *CurMF = SwitchMBB->
getParent();
1024 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1025 SwitchCG::JumpTable *JT = &SL->JTCases[
I->JTCasesIndex].second;
1026 BranchProbability DefaultProb =
W.DefaultProb;
1029 MachineBasicBlock *JumpMBB = JT->
MBB;
1030 CurMF->
insert(BBI, JumpMBB);
1040 auto JumpProb =
I->Prob;
1041 auto FallthroughProb = UnhandledProbs;
1049 if (*SI == DefaultMBB) {
1050 JumpProb += DefaultProb / 2;
1051 FallthroughProb -= DefaultProb / 2;
1056 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1061 if (FallthroughUnreachable)
1062 JTH->FallthroughUnreachable =
true;
1064 if (!JTH->FallthroughUnreachable)
1065 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1066 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1071 JTH->HeaderBB = CurMBB;
1075 if (CurMBB == SwitchMBB) {
1076 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1078 JTH->Emitted =
true;
1085 bool FallthroughUnreachable,
1090 using namespace SwitchCG;
1093 if (
I->Low ==
I->High) {
1109 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1112 emitSwitchCase(CB, SwitchMBB, MIB);
1118 MachineIRBuilder &MIB = *CurBuilder;
1122 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1124 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1126 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1131 LLT MaskTy = SwitchOpTy;
1137 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1146 Register SubReg = RangeSub.getReg(0);
1147 if (SwitchOpTy != MaskTy)
1153 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1155 if (!
B.FallthroughUnreachable)
1156 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1157 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1161 if (!
B.FallthroughUnreachable) {
1165 RangeSub, RangeCst);
1179 MachineIRBuilder &MIB = *CurBuilder;
1185 if (PopCount == 1) {
1188 auto MaskTrailingZeros =
1193 }
else if (PopCount == BB.
Range) {
1195 auto MaskTrailingOnes =
1202 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1206 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1213 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1215 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1233bool IRTranslator::lowerBitTestWorkItem(
1239 bool FallthroughUnreachable) {
1240 using namespace SwitchCG;
1241 MachineFunction *CurMF = SwitchMBB->
getParent();
1243 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1245 for (BitTestCase &BTC : BTB->Cases)
1246 CurMF->
insert(BBI, BTC.ThisBB);
1249 BTB->Parent = CurMBB;
1250 BTB->Default = Fallthrough;
1252 BTB->DefaultProb = UnhandledProbs;
1256 if (!BTB->ContiguousRange) {
1257 BTB->Prob += DefaultProb / 2;
1258 BTB->DefaultProb -= DefaultProb / 2;
1261 if (FallthroughUnreachable)
1262 BTB->FallthroughUnreachable =
true;
1265 if (CurMBB == SwitchMBB) {
1266 emitBitTestHeader(*BTB, SwitchMBB);
1267 BTB->Emitted =
true;
1277 using namespace SwitchCG;
1278 MachineFunction *CurMF = FuncInfo.MF;
1279 MachineBasicBlock *NextMBB =
nullptr;
1281 if (++BBI != FuncInfo.MF->end())
1290 [](
const CaseCluster &a,
const CaseCluster &b) {
1291 return a.Prob != b.Prob
1293 : a.Low->getValue().slt(b.Low->getValue());
1298 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1300 if (
I->Prob >
W.LastCluster->Prob)
1302 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1310 BranchProbability DefaultProb =
W.DefaultProb;
1311 BranchProbability UnhandledProbs = DefaultProb;
1312 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1313 UnhandledProbs +=
I->Prob;
1315 MachineBasicBlock *CurMBB =
W.MBB;
1316 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1317 bool FallthroughUnreachable =
false;
1318 MachineBasicBlock *Fallthrough;
1319 if (
I ==
W.LastCluster) {
1321 Fallthrough = DefaultMBB;
1326 CurMF->
insert(BBI, Fallthrough);
1328 UnhandledProbs -=
I->Prob;
1332 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1333 DefaultProb, UnhandledProbs,
I, Fallthrough,
1334 FallthroughUnreachable)) {
1342 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1343 UnhandledProbs,
I, Fallthrough,
1344 FallthroughUnreachable)) {
1351 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1352 FallthroughUnreachable, UnhandledProbs,
1353 CurMBB, MIB, SwitchMBB)) {
1360 CurMBB = Fallthrough;
1366bool IRTranslator::translateIndirectBr(
const User &U,
1374 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1375 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1376 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1380 if (!AddedSuccessors.
insert(Succ).second)
1390 return Arg->hasSwiftErrorAttr();
1398 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1403 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1408 Type *OffsetIRTy = DL->getIndexType(Ptr->
getType());
1412 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1414 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(), Ptr);
1420 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1422 if (AA->pointsToConstantMemory(
1430 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1435 Align BaseAlign = getMemOpAlign(LI);
1437 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),
1440 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1448 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1452 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1455 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1458 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1459 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1461 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1462 SI.getPointerOperand());
1469 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1473 MachinePointerInfo Ptr(
SI.getPointerOperand(), Offsets[i]);
1474 Align BaseAlign = getMemOpAlign(SI);
1475 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),
1477 SI.getAAMetadata(),
nullptr,
1478 SI.getSyncScopeID(),
SI.getOrdering());
1485 const Value *Src = U.getOperand(0);
1494 for (
auto Idx : EVI->indices())
1497 for (
auto Idx : IVI->indices())
1504 DL.getIndexedOffsetInType(Src->getType(), Indices));
1507bool IRTranslator::translateExtractValue(
const User &U,
1509 const Value *Src =
U.getOperand(0);
1512 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1514 auto &DstRegs = allocateVRegs(U);
1516 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1517 DstRegs[i] = SrcRegs[Idx++];
1522bool IRTranslator::translateInsertValue(
const User &U,
1524 const Value *Src =
U.getOperand(0);
1526 auto &DstRegs = allocateVRegs(U);
1527 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1530 auto *InsertedIt = InsertedRegs.
begin();
1532 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1533 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1534 DstRegs[i] = *InsertedIt++;
1536 DstRegs[i] = SrcRegs[i];
1542bool IRTranslator::translateSelect(
const User &U,
1544 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1553 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1554 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1560bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1563 auto &Regs = *VMap.getVRegs(U);
1565 Regs.push_back(Src);
1566 VMap.getOffsets(U)->push_back(0);
1575bool IRTranslator::translateBitCast(
const User &U,
1583 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1585 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1588 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1591bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1606bool IRTranslator::translateGetElementPtr(
const User &U,
1608 Value &Op0 = *
U.getOperand(0);
1612 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1615 uint32_t PtrAddFlags = 0;
1621 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1631 unsigned VectorWidth = 0;
1635 bool WantSplatVector =
false;
1639 WantSplatVector = VectorWidth > 1;
1644 if (WantSplatVector && !PtrTy.
isVector()) {
1651 OffsetIRTy = DL->getIndexType(PtrIRTy);
1658 const Value *Idx = GTI.getOperand();
1659 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1661 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1664 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1669 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1670 Offset += ElementSize * *Val;
1679 PtrAddFlagsWithConst(
Offset))
1684 Register IdxReg = getOrCreateVReg(*Idx);
1685 LLT IdxTy = MRI->getType(IdxReg);
1686 if (IdxTy != OffsetTy) {
1687 if (!IdxTy.
isVector() && WantSplatVector) {
1700 if (ElementSize != 1) {
1711 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1714 GepOffsetReg = IdxReg;
1718 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1727 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1728 PtrAddFlagsWithConst(
Offset));
1732 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1736bool IRTranslator::translateMemFunc(
const CallInst &CI,
1746 unsigned MinPtrSize = UINT_MAX;
1747 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1748 Register SrcReg = getOrCreateVReg(**AI);
1749 LLT SrcTy = MRI->getType(SrcReg);
1751 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1759 if (MRI->getType(SizeOpReg) != SizeTy)
1771 ConstantInt *CopySize =
nullptr;
1774 DstAlign = MCI->getDestAlign().valueOrOne();
1775 SrcAlign = MCI->getSourceAlign().valueOrOne();
1778 DstAlign = MMI->getDestAlign().valueOrOne();
1779 SrcAlign = MMI->getSourceAlign().valueOrOne();
1783 DstAlign = MSI->getDestAlign().valueOrOne();
1786 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1802 if (AA && CopySize &&
1803 AA->pointsToConstantMemory(MemoryLocation(
1813 ICall.addMemOperand(
1814 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1815 StoreFlags, 1, DstAlign, AAInfo));
1816 if (Opcode != TargetOpcode::G_MEMSET)
1817 ICall.addMemOperand(MF->getMachineMemOperand(
1818 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1823bool IRTranslator::translateTrap(
const CallInst &CI,
1826 StringRef TrapFuncName =
1827 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1828 if (TrapFuncName.
empty()) {
1829 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1838 CallLowering::CallLoweringInfo
Info;
1839 if (Opcode == TargetOpcode::G_UBSANTRAP)
1846 return CLI->lowerCall(MIRBuilder, Info);
1849bool IRTranslator::translateVectorInterleave2Intrinsic(
1852 "This function can only be called on the interleave2 intrinsic!");
1856 Register Res = getOrCreateVReg(CI);
1858 LLT OpTy = MRI->getType(Op0);
1865bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1868 "This function can only be called on the deinterleave2 intrinsic!");
1875 LLT ResTy = MRI->getType(Res[0]);
1884void IRTranslator::getStackGuard(
Register DstReg,
1887 TLI->getSDagStackGuard(*MF->getFunction().getParent(), *Libcalls);
1890 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1895 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1896 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1898 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1900 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1901 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1903 MachinePointerInfo MPInfo(
Global);
1906 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1907 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1908 MIB.setMemRefs({MemRef});
1911bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1915 Op, {ResRegs[0], ResRegs[1]},
1921bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1923 Register Dst = getOrCreateVReg(CI);
1927 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1935 case Intrinsic::acos:
1936 return TargetOpcode::G_FACOS;
1937 case Intrinsic::asin:
1938 return TargetOpcode::G_FASIN;
1939 case Intrinsic::atan:
1940 return TargetOpcode::G_FATAN;
1941 case Intrinsic::atan2:
1942 return TargetOpcode::G_FATAN2;
1943 case Intrinsic::bswap:
1944 return TargetOpcode::G_BSWAP;
1945 case Intrinsic::bitreverse:
1946 return TargetOpcode::G_BITREVERSE;
1947 case Intrinsic::fshl:
1948 return TargetOpcode::G_FSHL;
1949 case Intrinsic::fshr:
1950 return TargetOpcode::G_FSHR;
1951 case Intrinsic::ceil:
1952 return TargetOpcode::G_FCEIL;
1953 case Intrinsic::cos:
1954 return TargetOpcode::G_FCOS;
1955 case Intrinsic::cosh:
1956 return TargetOpcode::G_FCOSH;
1957 case Intrinsic::ctpop:
1958 return TargetOpcode::G_CTPOP;
1959 case Intrinsic::exp:
1960 return TargetOpcode::G_FEXP;
1961 case Intrinsic::exp2:
1962 return TargetOpcode::G_FEXP2;
1963 case Intrinsic::exp10:
1964 return TargetOpcode::G_FEXP10;
1965 case Intrinsic::fabs:
1966 return TargetOpcode::G_FABS;
1967 case Intrinsic::copysign:
1968 return TargetOpcode::G_FCOPYSIGN;
1969 case Intrinsic::minnum:
1970 return TargetOpcode::G_FMINNUM;
1971 case Intrinsic::maxnum:
1972 return TargetOpcode::G_FMAXNUM;
1973 case Intrinsic::minimum:
1974 return TargetOpcode::G_FMINIMUM;
1975 case Intrinsic::maximum:
1976 return TargetOpcode::G_FMAXIMUM;
1977 case Intrinsic::minimumnum:
1978 return TargetOpcode::G_FMINIMUMNUM;
1979 case Intrinsic::maximumnum:
1980 return TargetOpcode::G_FMAXIMUMNUM;
1981 case Intrinsic::canonicalize:
1982 return TargetOpcode::G_FCANONICALIZE;
1983 case Intrinsic::floor:
1984 return TargetOpcode::G_FFLOOR;
1985 case Intrinsic::fma:
1986 return TargetOpcode::G_FMA;
1987 case Intrinsic::log:
1988 return TargetOpcode::G_FLOG;
1989 case Intrinsic::log2:
1990 return TargetOpcode::G_FLOG2;
1991 case Intrinsic::log10:
1992 return TargetOpcode::G_FLOG10;
1993 case Intrinsic::ldexp:
1994 return TargetOpcode::G_FLDEXP;
1995 case Intrinsic::nearbyint:
1996 return TargetOpcode::G_FNEARBYINT;
1997 case Intrinsic::pow:
1998 return TargetOpcode::G_FPOW;
1999 case Intrinsic::powi:
2000 return TargetOpcode::G_FPOWI;
2001 case Intrinsic::rint:
2002 return TargetOpcode::G_FRINT;
2003 case Intrinsic::round:
2004 return TargetOpcode::G_INTRINSIC_ROUND;
2005 case Intrinsic::roundeven:
2006 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
2007 case Intrinsic::sin:
2008 return TargetOpcode::G_FSIN;
2009 case Intrinsic::sinh:
2010 return TargetOpcode::G_FSINH;
2011 case Intrinsic::sqrt:
2012 return TargetOpcode::G_FSQRT;
2013 case Intrinsic::tan:
2014 return TargetOpcode::G_FTAN;
2015 case Intrinsic::tanh:
2016 return TargetOpcode::G_FTANH;
2017 case Intrinsic::trunc:
2018 return TargetOpcode::G_INTRINSIC_TRUNC;
2019 case Intrinsic::readcyclecounter:
2020 return TargetOpcode::G_READCYCLECOUNTER;
2021 case Intrinsic::readsteadycounter:
2022 return TargetOpcode::G_READSTEADYCOUNTER;
2023 case Intrinsic::ptrmask:
2024 return TargetOpcode::G_PTRMASK;
2025 case Intrinsic::lrint:
2026 return TargetOpcode::G_INTRINSIC_LRINT;
2027 case Intrinsic::llrint:
2028 return TargetOpcode::G_INTRINSIC_LLRINT;
2030 case Intrinsic::vector_reduce_fmin:
2031 return TargetOpcode::G_VECREDUCE_FMIN;
2032 case Intrinsic::vector_reduce_fmax:
2033 return TargetOpcode::G_VECREDUCE_FMAX;
2034 case Intrinsic::vector_reduce_fminimum:
2035 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2036 case Intrinsic::vector_reduce_fmaximum:
2037 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2038 case Intrinsic::vector_reduce_add:
2039 return TargetOpcode::G_VECREDUCE_ADD;
2040 case Intrinsic::vector_reduce_mul:
2041 return TargetOpcode::G_VECREDUCE_MUL;
2042 case Intrinsic::vector_reduce_and:
2043 return TargetOpcode::G_VECREDUCE_AND;
2044 case Intrinsic::vector_reduce_or:
2045 return TargetOpcode::G_VECREDUCE_OR;
2046 case Intrinsic::vector_reduce_xor:
2047 return TargetOpcode::G_VECREDUCE_XOR;
2048 case Intrinsic::vector_reduce_smax:
2049 return TargetOpcode::G_VECREDUCE_SMAX;
2050 case Intrinsic::vector_reduce_smin:
2051 return TargetOpcode::G_VECREDUCE_SMIN;
2052 case Intrinsic::vector_reduce_umax:
2053 return TargetOpcode::G_VECREDUCE_UMAX;
2054 case Intrinsic::vector_reduce_umin:
2055 return TargetOpcode::G_VECREDUCE_UMIN;
2056 case Intrinsic::experimental_vector_compress:
2057 return TargetOpcode::G_VECTOR_COMPRESS;
2058 case Intrinsic::lround:
2059 return TargetOpcode::G_LROUND;
2060 case Intrinsic::llround:
2061 return TargetOpcode::G_LLROUND;
2062 case Intrinsic::get_fpenv:
2063 return TargetOpcode::G_GET_FPENV;
2064 case Intrinsic::get_fpmode:
2065 return TargetOpcode::G_GET_FPMODE;
2070bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2074 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2082 for (
const auto &Arg : CI.
args())
2085 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2093 case Intrinsic::experimental_constrained_fadd:
2094 return TargetOpcode::G_STRICT_FADD;
2095 case Intrinsic::experimental_constrained_fsub:
2096 return TargetOpcode::G_STRICT_FSUB;
2097 case Intrinsic::experimental_constrained_fmul:
2098 return TargetOpcode::G_STRICT_FMUL;
2099 case Intrinsic::experimental_constrained_fdiv:
2100 return TargetOpcode::G_STRICT_FDIV;
2101 case Intrinsic::experimental_constrained_frem:
2102 return TargetOpcode::G_STRICT_FREM;
2103 case Intrinsic::experimental_constrained_fma:
2104 return TargetOpcode::G_STRICT_FMA;
2105 case Intrinsic::experimental_constrained_sqrt:
2106 return TargetOpcode::G_STRICT_FSQRT;
2107 case Intrinsic::experimental_constrained_ldexp:
2108 return TargetOpcode::G_STRICT_FLDEXP;
2114bool IRTranslator::translateConstrainedFPIntrinsic(
2134std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2135 auto VRegs = getOrCreateVRegs(Arg);
2136 if (VRegs.
size() != 1)
2137 return std::nullopt;
2140 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2141 if (!VRegDef || !VRegDef->isCopy())
2142 return std::nullopt;
2143 return VRegDef->getOperand(1).getReg().asMCReg();
2146bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2158 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2160 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2161 <<
": expression is entry_value but "
2162 <<
"couldn't find a physical register\n");
2170 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2182 case Intrinsic::experimental_convergence_anchor:
2183 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2184 case Intrinsic::experimental_convergence_entry:
2185 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2186 case Intrinsic::experimental_convergence_loop:
2187 return TargetOpcode::CONVERGENCECTRL_LOOP;
2191bool IRTranslator::translateConvergenceControlIntrinsic(
2194 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2197 if (
ID == Intrinsic::experimental_convergence_loop) {
2199 assert(Bundle &&
"Expected a convergence control token.");
2201 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2211 if (ORE->enabled()) {
2213 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2221 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2227 case Intrinsic::lifetime_start:
2228 case Intrinsic::lifetime_end: {
2231 MF->getFunction().hasOptNone())
2234 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2235 : TargetOpcode::LIFETIME_END;
2244 case Intrinsic::fake_use: {
2246 for (
const auto &Arg : CI.
args())
2248 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2249 MF->setHasFakeUses(
true);
2252 case Intrinsic::dbg_declare: {
2259 case Intrinsic::dbg_label: {
2265 "Expected inlined-at fields to agree");
2270 case Intrinsic::vaend:
2274 case Intrinsic::vastart: {
2276 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2279 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2280 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2282 ListSize, Alignment));
2285 case Intrinsic::dbg_assign:
2292 case Intrinsic::dbg_value: {
2299 case Intrinsic::uadd_with_overflow:
2300 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2301 case Intrinsic::sadd_with_overflow:
2302 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2303 case Intrinsic::usub_with_overflow:
2304 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2305 case Intrinsic::ssub_with_overflow:
2306 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2307 case Intrinsic::umul_with_overflow:
2308 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2309 case Intrinsic::smul_with_overflow:
2310 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2311 case Intrinsic::uadd_sat:
2312 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2313 case Intrinsic::sadd_sat:
2314 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2315 case Intrinsic::usub_sat:
2316 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2317 case Intrinsic::ssub_sat:
2318 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2319 case Intrinsic::ushl_sat:
2320 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2321 case Intrinsic::sshl_sat:
2322 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2323 case Intrinsic::umin:
2324 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2325 case Intrinsic::umax:
2326 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2327 case Intrinsic::smin:
2328 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2329 case Intrinsic::smax:
2330 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2331 case Intrinsic::abs:
2333 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2334 case Intrinsic::smul_fix:
2335 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2336 case Intrinsic::umul_fix:
2337 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2338 case Intrinsic::smul_fix_sat:
2339 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2340 case Intrinsic::umul_fix_sat:
2341 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2342 case Intrinsic::sdiv_fix:
2343 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2344 case Intrinsic::udiv_fix:
2345 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2346 case Intrinsic::sdiv_fix_sat:
2347 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2348 case Intrinsic::udiv_fix_sat:
2349 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2350 case Intrinsic::fmuladd: {
2351 const TargetMachine &TM = MF->getTarget();
2352 Register Dst = getOrCreateVReg(CI);
2357 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2358 TLI->getValueType(*DL, CI.
getType()))) {
2361 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2372 case Intrinsic::frexp: {
2379 case Intrinsic::modf: {
2381 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2386 case Intrinsic::sincos: {
2393 case Intrinsic::fptosi_sat:
2397 case Intrinsic::fptoui_sat:
2401 case Intrinsic::memcpy_inline:
2402 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2403 case Intrinsic::memcpy:
2404 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2405 case Intrinsic::memmove:
2406 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2407 case Intrinsic::memset:
2408 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2409 case Intrinsic::eh_typeid_for: {
2412 unsigned TypeID = MF->getTypeIDFor(GV);
2416 case Intrinsic::objectsize:
2419 case Intrinsic::is_constant:
2422 case Intrinsic::stackguard:
2423 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2425 case Intrinsic::stackprotector: {
2428 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2429 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2430 getStackGuard(GuardVal, MIRBuilder);
2435 int FI = getOrCreateFrameIndex(*Slot);
2436 MF->getFrameInfo().setStackProtectorIndex(FI);
2439 GuardVal, getOrCreateVReg(*Slot),
2446 case Intrinsic::stacksave: {
2447 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2450 case Intrinsic::stackrestore: {
2451 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2455 case Intrinsic::cttz:
2456 case Intrinsic::ctlz: {
2458 bool isTrailing =
ID == Intrinsic::cttz;
2459 unsigned Opcode = isTrailing
2460 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2461 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2462 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2463 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2464 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2468 case Intrinsic::invariant_start: {
2472 case Intrinsic::invariant_end:
2474 case Intrinsic::expect:
2475 case Intrinsic::expect_with_probability:
2476 case Intrinsic::annotation:
2477 case Intrinsic::ptr_annotation:
2478 case Intrinsic::launder_invariant_group:
2479 case Intrinsic::strip_invariant_group: {
2481 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2485 case Intrinsic::assume:
2486 case Intrinsic::experimental_noalias_scope_decl:
2487 case Intrinsic::var_annotation:
2488 case Intrinsic::sideeffect:
2491 case Intrinsic::read_volatile_register:
2492 case Intrinsic::read_register: {
2495 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2499 case Intrinsic::write_register: {
2501 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2506 case Intrinsic::localescape: {
2507 MachineBasicBlock &EntryMBB = MF->front();
2512 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2519 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2532 case Intrinsic::vector_reduce_fadd:
2533 case Intrinsic::vector_reduce_fmul: {
2536 Register Dst = getOrCreateVReg(CI);
2542 Opc =
ID == Intrinsic::vector_reduce_fadd
2543 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2544 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2545 if (!MRI->getType(VecSrc).isVector())
2546 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2547 : TargetOpcode::G_FMUL;
2555 if (
ID == Intrinsic::vector_reduce_fadd) {
2556 Opc = TargetOpcode::G_VECREDUCE_FADD;
2557 ScalarOpc = TargetOpcode::G_FADD;
2559 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2560 ScalarOpc = TargetOpcode::G_FMUL;
2562 LLT DstTy = MRI->getType(Dst);
2565 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2570 case Intrinsic::trap:
2571 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2572 case Intrinsic::debugtrap:
2573 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2574 case Intrinsic::ubsantrap:
2575 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2576 case Intrinsic::allow_runtime_check:
2577 case Intrinsic::allow_ubsan_check:
2578 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2581 case Intrinsic::amdgcn_cs_chain:
2582 case Intrinsic::amdgcn_call_whole_wave:
2583 return translateCallBase(CI, MIRBuilder);
2584 case Intrinsic::fptrunc_round: {
2589 std::optional<RoundingMode> RoundMode =
2594 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2595 {getOrCreateVReg(CI)},
2597 .addImm((
int)*RoundMode);
2601 case Intrinsic::is_fpclass: {
2606 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2607 {getOrCreateVReg(*FpValue)})
2612 case Intrinsic::set_fpenv: {
2617 case Intrinsic::reset_fpenv:
2620 case Intrinsic::set_fpmode: {
2625 case Intrinsic::reset_fpmode:
2628 case Intrinsic::get_rounding:
2631 case Intrinsic::set_rounding:
2634 case Intrinsic::vscale: {
2638 case Intrinsic::scmp:
2639 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2643 case Intrinsic::ucmp:
2644 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2648 case Intrinsic::vector_extract:
2649 return translateExtractVector(CI, MIRBuilder);
2650 case Intrinsic::vector_insert:
2651 return translateInsertVector(CI, MIRBuilder);
2652 case Intrinsic::stepvector: {
2656 case Intrinsic::prefetch: {
2663 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2666 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2672 case Intrinsic::vector_interleave2:
2673 case Intrinsic::vector_deinterleave2: {
2681 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2683 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2686#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2687 case Intrinsic::INTRINSIC:
2688#include "llvm/IR/ConstrainedOps.def"
2691 case Intrinsic::experimental_convergence_anchor:
2692 case Intrinsic::experimental_convergence_entry:
2693 case Intrinsic::experimental_convergence_loop:
2694 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2695 case Intrinsic::reloc_none: {
2698 MIRBuilder.
buildInstr(TargetOpcode::RELOC_NONE)
2706bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2711 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2715 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2720 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2723bool IRTranslator::translateCallBase(
const CallBase &CB,
2730 for (
const auto &Arg : CB.
args()) {
2732 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2734 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2735 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2736 &CB, &MIRBuilder.
getMBB(), Arg));
2739 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2742 Args.push_back(getOrCreateVRegs(*Arg));
2746 if (ORE->enabled()) {
2748 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2754 std::optional<CallLowering::PtrAuthInfo> PAI;
2759 const Value *
Key = Bundle->Inputs[0];
2766 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2767 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2769 Register DiscReg = getOrCreateVReg(*Discriminator);
2777 const auto &Token = *Bundle->Inputs[0].get();
2778 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2784 bool Success = CLI->lowerCall(
2785 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2790 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2791 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2807 if (
F && (
F->hasDLLImportStorageClass() ||
2808 (MF->getTarget().getTargetTriple().isOSWindows() &&
2809 F->hasExternalWeakLinkage())))
2821 return translateInlineAsm(CI, MIRBuilder);
2825 if (translateCallBase(CI, MIRBuilder)) {
2834 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2838 TLI->getTgtMemIntrinsic(Infos, CI, *MF,
ID);
2840 return translateIntrinsic(CI,
ID, MIRBuilder, Infos);
2844bool IRTranslator::translateIntrinsic(
2849 ResultRegs = getOrCreateVRegs(CB);
2864 assert(CI->getBitWidth() <= 64 &&
2865 "large intrinsic immediates not handled");
2866 MIB.
addImm(CI->getSExtValue());
2871 auto *MD = MDVal->getMetadata();
2875 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2882 if (VRegs.
size() > 1)
2889 for (
const auto &Info : TgtMemIntrinsicInfos) {
2892 LLT MemTy =
Info.memVT.isSimple()
2894 : LLT::scalar(
Info.memVT.getStoreSizeInBits());
2898 MachinePointerInfo MPI;
2900 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2901 }
else if (
Info.fallbackAddressSpace) {
2902 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2911 auto *Token = Bundle->Inputs[0].get();
2912 Register TokenReg = getOrCreateVReg(*Token);
2923bool IRTranslator::findUnwindDestinations(
2945 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2951 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2952 UnwindDests.back().first->setIsEHScopeEntry();
2953 UnwindDests.back().first->setIsEHFuncletEntry();
2958 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2959 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2961 if (IsMSVCCXX || IsCoreCLR)
2962 UnwindDests.back().first->setIsEHFuncletEntry();
2964 UnwindDests.back().first->setIsEHScopeEntry();
2966 NewEHPadBB = CatchSwitch->getUnwindDest();
2971 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2972 if (BPI && NewEHPadBB)
2974 EHPadBB = NewEHPadBB;
2979bool IRTranslator::translateInvoke(
const User &U,
2982 MCContext &
Context = MF->getContext();
2987 const Function *Fn =
I.getCalledFunction();
2994 if (
I.hasDeoptState())
3008 (MF->getTarget().getTargetTriple().isOSWindows() &&
3012 bool LowerInlineAsm =
I.isInlineAsm();
3013 bool NeedEHLabel =
true;
3019 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
3020 BeginSymbol =
Context.createTempSymbol();
3024 if (LowerInlineAsm) {
3025 if (!translateInlineAsm(
I, MIRBuilder))
3027 }
else if (!translateCallBase(
I, MIRBuilder))
3032 EndSymbol =
Context.createTempSymbol();
3037 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3038 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3039 BranchProbability EHPadBBProb =
3043 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3046 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3047 &ReturnMBB = getMBB(*ReturnBB);
3049 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3050 for (
auto &UnwindDest : UnwindDests) {
3051 UnwindDest.first->setIsEHPad();
3052 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3057 assert(BeginSymbol &&
"Expected a begin symbol!");
3058 assert(EndSymbol &&
"Expected an end symbol!");
3059 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3062 MIRBuilder.
buildBr(ReturnMBB);
3068bool IRTranslator::translateCallBr(
const User &U,
3074 MachineBasicBlock *CallBrMBB = &MIRBuilder.
getMBB();
3077 if (
I.isInlineAsm()) {
3083 if (!translateIntrinsic(
I, IID, MIRBuilder))
3087 SmallPtrSet<BasicBlock *, 8> Dests = {
I.getDefaultDest()};
3088 MachineBasicBlock *
Return = &getMBB(*
I.getDefaultDest());
3097 for (BasicBlock *Dest :
I.getIndirectDests()) {
3098 MachineBasicBlock &
Target = getMBB(*Dest);
3099 Target.setIsInlineAsmBrIndirectTarget();
3100 Target.setLabelMustBeEmitted();
3102 if (Dests.
insert(Dest).second)
3114bool IRTranslator::translateLandingPad(
const User &U,
3118 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3124 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3125 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3126 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3138 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3143 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3144 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3145 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3154 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3157 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3163 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3165 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3170 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3171 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3172 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3177bool IRTranslator::translateAlloca(
const User &U,
3185 Register Res = getOrCreateVReg(AI);
3186 int FI = getOrCreateFrameIndex(AI);
3192 if (MF->getTarget().getTargetTriple().isOSWindows())
3197 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3199 if (MRI->getType(NumElts) != IntPtrTy) {
3200 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3206 TypeSize TySize = DL->getTypeAllocSize(Ty);
3208 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3212 TySizeReg = MRI->createGenericVirtualRegister(IntPtrTy);
3217 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, TySize.
getFixedValue()));
3219 MIRBuilder.
buildMul(AllocSize, NumElts, TySizeReg);
3224 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3226 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3230 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3233 if (Alignment <= StackAlign)
3234 Alignment =
Align(1);
3237 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3238 assert(MF->getFrameInfo().hasVarSizedObjects());
3247 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3248 {getOrCreateVReg(*
U.getOperand(0)),
3249 DL->getABITypeAlign(
U.getType()).value()});
3253bool IRTranslator::translateUnreachable(
const User &U,
3256 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3257 MF->getTarget().Options.NoTrapAfterNoreturn))
3264bool IRTranslator::translateInsertElement(
const User &U,
3269 FVT && FVT->getNumElements() == 1)
3270 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3273 Register Val = getOrCreateVReg(*
U.getOperand(0));
3274 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3275 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3278 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3279 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3280 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3281 Idx = getOrCreateVReg(*NewIdxCI);
3285 Idx = getOrCreateVReg(*
U.getOperand(2));
3286 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3287 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3294bool IRTranslator::translateInsertVector(
const User &U,
3297 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3298 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3301 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3306 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3311 ResultType && ResultType->getNumElements() == 1) {
3313 InputType && InputType->getNumElements() == 1) {
3317 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3323 Register Idx = getOrCreateVReg(*CI);
3331 Register Idx = getOrCreateVReg(*CI);
3332 auto ScaledIndex = MIRBuilder.
buildMul(
3333 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3340 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3345bool IRTranslator::translateExtractElement(
const User &U,
3349 if (
const FixedVectorType *FVT =
3351 if (FVT->getNumElements() == 1)
3352 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3355 Register Val = getOrCreateVReg(*
U.getOperand(0));
3356 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3361 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3362 Idx = getOrCreateVReg(*NewIdxCI);
3366 Idx = getOrCreateVReg(*
U.getOperand(1));
3367 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3368 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3375bool IRTranslator::translateExtractVector(
const User &U,
3378 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3380 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3385 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3390 ResultType && ResultType->getNumElements() == 1) {
3392 InputType && InputType->getNumElements() == 1) {
3395 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3401 Register Idx = getOrCreateVReg(*CI);
3409 Register Idx = getOrCreateVReg(*CI);
3410 auto ScaledIndex = MIRBuilder.
buildMul(
3411 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3418 getOrCreateVReg(*
U.getOperand(0)),
3423bool IRTranslator::translateShuffleVector(
const User &U,
3429 if (
U.getOperand(0)->getType()->isScalableTy()) {
3430 Register Val = getOrCreateVReg(*
U.getOperand(0));
3432 MRI->getType(Val).getElementType(), Val, 0);
3439 Mask = SVI->getShuffleMask();
3450 unsigned M =
Mask[0];
3452 if (M == 0 || M == 1)
3453 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3459 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3460 }
else if (M < SrcElts * 2) {
3462 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3474 for (
int M : Mask) {
3476 if (M == 0 || M == 1) {
3477 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3479 if (!
Undef.isValid()) {
3480 Undef = MRI->createGenericVirtualRegister(SrcTy);
3490 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3492 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3493 {getOrCreateVReg(*
U.getOperand(0)),
3494 getOrCreateVReg(*
U.getOperand(1))})
3495 .addShuffleMask(MaskAlloc);
3502 SmallVector<MachineInstr *, 4> Insts;
3503 for (
auto Reg : getOrCreateVRegs(PI)) {
3504 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3508 PendingPHIs.emplace_back(&PI, std::move(Insts));
3512bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3516 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3518 auto Res = getOrCreateVRegs(
I);
3521 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3522 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3523 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3526 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3527 *MF->getMachineMemOperand(
3528 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3529 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3530 I.getSuccessOrdering(),
I.getFailureOrdering()));
3534bool IRTranslator::translateAtomicRMW(
const User &U,
3540 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3543 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3544 Register Val = getOrCreateVReg(*
I.getValOperand());
3546 unsigned Opcode = 0;
3547 switch (
I.getOperation()) {
3551 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3554 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3557 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3560 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3563 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3566 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3569 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3572 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3575 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3578 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3581 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3584 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3587 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3590 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3593 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3596 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3599 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3602 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUMNUM;
3605 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUMNUM;
3608 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3611 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3614 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3617 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3622 Opcode, Res, Addr, Val,
3623 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3624 Flags, MRI->getType(Val), getMemOpAlign(
I),
3625 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3630bool IRTranslator::translateFence(
const User &U,
3638bool IRTranslator::translateFreeze(
const User &U,
3644 "Freeze with different source and destination type?");
3646 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3653void IRTranslator::finishPendingPhis() {
3656 GISelObserverWrapper WrapperObserver(&
Verifier);
3657 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3659 for (
auto &Phi : PendingPHIs) {
3660 const PHINode *PI =
Phi.first;
3664 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3670 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3674 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3678 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3679 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3688void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3694 "Expected inlined-at fields to agree");
3698 if (!V || HasArgList) {
3716 auto *ExprDerefRemoved =
3722 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3734void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3740 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3745 "Expected inlined-at fields to agree");
3750 MF->setVariableDbgInfo(Variable, Expression,
3751 getOrCreateFrameIndex(*AI), DL);
3755 if (translateIfEntryValueArgument(
true,
Address, Variable,
3767void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3772 assert(DLR->getLabel() &&
"Missing label");
3773 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3775 "Expected inlined-at fields to agree");
3784 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3787 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3792bool IRTranslator::translate(
const Instruction &Inst) {
3794 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3795 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3797 if (TLI->fallBackToDAGISel(Inst))
3801#define HANDLE_INST(NUM, OPCODE, CLASS) \
3802 case Instruction::OPCODE: \
3803 return translate##OPCODE(Inst, *CurBuilder.get());
3804#include "llvm/IR/Instruction.def"
3813 if (
auto CurrInstDL = CurBuilder->getDL())
3814 EntryBuilder->setDebugLoc(
DebugLoc());
3820 EntryBuilder->buildConstant(
Reg, *CI);
3824 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3825 EntryBuilder->buildFConstant(
Reg, *CF);
3827 EntryBuilder->buildUndef(
Reg);
3829 EntryBuilder->buildConstant(
Reg, 0);
3831 EntryBuilder->buildGlobalValue(
Reg, GV);
3833 Register Addr = getOrCreateVReg(*CPA->getPointer());
3834 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3835 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3837 Constant &Elt = *CAZ->getElementValue(0u);
3839 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3843 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3845 return translateCopy(
C, Elt, *EntryBuilder);
3847 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3850 if (CV->getNumElements() == 1)
3851 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3853 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3854 Constant &Elt = *CV->getElementAsConstant(i);
3855 Ops.push_back(getOrCreateVReg(Elt));
3857 EntryBuilder->buildBuildVector(
Reg,
Ops);
3859 switch(
CE->getOpcode()) {
3860#define HANDLE_INST(NUM, OPCODE, CLASS) \
3861 case Instruction::OPCODE: \
3862 return translate##OPCODE(*CE, *EntryBuilder.get());
3863#include "llvm/IR/Instruction.def"
3868 if (CV->getNumOperands() == 1)
3869 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3871 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3872 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3874 EntryBuilder->buildBuildVector(
Reg,
Ops);
3876 EntryBuilder->buildBlockAddress(
Reg, BA);
3883bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3885 for (
auto &BTB : SL->BitTestCases) {
3888 emitBitTestHeader(BTB, BTB.Parent);
3890 BranchProbability UnhandledProb = BTB.Prob;
3891 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3892 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3894 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3903 MachineBasicBlock *NextMBB;
3904 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3907 NextMBB = BTB.Cases[
j + 1].TargetBB;
3908 }
else if (j + 1 == ej) {
3910 NextMBB = BTB.Default;
3913 NextMBB = BTB.Cases[
j + 1].ThisBB;
3916 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3918 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3922 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3923 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3926 BTB.Cases.pop_back();
3932 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3933 BTB.Default->getBasicBlock()};
3934 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3935 if (!BTB.ContiguousRange) {
3936 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3939 SL->BitTestCases.clear();
3941 for (
auto &JTCase : SL->JTCases) {
3943 if (!JTCase.first.Emitted)
3944 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3946 emitJumpTable(JTCase.second, JTCase.second.MBB);
3948 SL->JTCases.clear();
3950 for (
auto &SwCase : SL->SwitchCases)
3951 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3952 SL->SwitchCases.clear();
3956 if (
SP.shouldEmitSDCheck(BB)) {
3957 bool FunctionBasedInstrumentation =
3958 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent(), *Libcalls);
3959 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3962 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3965 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3966 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3967 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3976 ParentMBB, *MF->getSubtarget().getInstrInfo());
3979 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3983 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3987 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3988 if (FailureMBB->
empty()) {
3989 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3994 SPDescriptor.resetPerBBState();
4001 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
4005 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
4011 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
4018 ->buildLoad(PtrMemTy, StackSlotPtr,
4023 if (TLI->useStackGuardXorFP()) {
4024 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
4029 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M, *Libcalls)) {
4041 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4042 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
4043 ISD::ArgFlagsTy
Flags;
4044 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4046 CallLowering::ArgInfo GuardArgInfo(
4047 {GuardVal, FnTy->getParamType(0), {
Flags}});
4049 CallLowering::CallLoweringInfo
Info;
4050 Info.OrigArgs.push_back(GuardArgInfo);
4051 Info.CallConv = GuardCheckFn->getCallingConv();
4054 if (!CLI->lowerCall(MIRBuilder, Info)) {
4055 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
4067 getStackGuard(Guard, *CurBuilder);
4070 const Value *IRGuard = TLI->getSDagStackGuard(M, *Libcalls);
4071 Register GuardPtr = getOrCreateVReg(*IRGuard);
4074 ->buildLoad(PtrMemTy, GuardPtr,
4093 const RTLIB::LibcallImpl LibcallImpl =
4094 Libcalls->getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
4095 if (LibcallImpl == RTLIB::Unsupported)
4098 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4100 CallLowering::CallLoweringInfo
Info;
4101 Info.CallConv = Libcalls->getLibcallImplCallingConv(LibcallImpl);
4103 StringRef LibcallName =
4108 if (!CLI->lowerCall(*CurBuilder, Info)) {
4109 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4114 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4116 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4121void IRTranslator::finalizeFunction() {
4124 PendingPHIs.clear();
4126 FrameIndices.clear();
4127 MachinePreds.clear();
4131 EntryBuilder.reset();
4134 SPDescriptor.resetPerFunctionState();
4147 return CI && CI->isMustTailCall();
4154 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4155 CLI = MF->getSubtarget().getCallLowering();
4157 if (CLI->fallBackToDAGISel(*MF)) {
4159 F.getSubprogram(), &
F.getEntryBlock());
4160 R <<
"unable to lower function: "
4161 <<
ore::NV(
"Prototype",
F.getFunctionType());
4175 : TPC->isGISelCSEEnabled();
4181 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4182 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4183 EntryBuilder->setCSEInfo(CSEInfo);
4184 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4185 CurBuilder->setCSEInfo(CSEInfo);
4187 EntryBuilder = std::make_unique<MachineIRBuilder>();
4188 CurBuilder = std::make_unique<MachineIRBuilder>();
4191 CurBuilder->setMF(*MF);
4192 EntryBuilder->setMF(*MF);
4193 MRI = &MF->getRegInfo();
4194 DL = &
F.getDataLayout();
4204 FuncInfo.BPI =
nullptr;
4211 *
F.getParent(), Subtarget);
4213 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4215 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4216 SL->init(*TLI, TM, *DL);
4218 assert(PendingPHIs.empty() &&
"stale PHIs");
4222 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4225 F.getSubprogram(), &
F.getEntryBlock());
4226 R <<
"unable to translate in big endian mode";
4237 EntryBuilder->setMBB(*EntryBB);
4239 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4240 SwiftError.setFunction(CurMF);
4241 SwiftError.createEntriesInEntryBlock(DbgLoc);
4243 bool IsVarArg =
F.isVarArg();
4244 bool HasMustTailInVarArgFn =
false;
4247 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4251 MBB = MF->CreateMachineBasicBlock(&BB);
4259 if (!BA->hasZeroLiveUses())
4263 if (!HasMustTailInVarArgFn)
4267 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4270 EntryBB->addSuccessor(&getMBB(
F.front()));
4275 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4280 if (Arg.hasSwiftErrorAttr()) {
4281 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4282 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4286 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4288 F.getSubprogram(), &
F.getEntryBlock());
4289 R <<
"unable to lower arguments: "
4290 <<
ore::NV(
"Prototype",
F.getFunctionType());
4297 if (EnableCSE && CSEInfo)
4302 DILocationVerifier Verifier;
4310 CurBuilder->setMBB(
MBB);
4311 HasTailCall =
false;
4321 Verifier.setCurrentInst(&Inst);
4325 translateDbgInfo(Inst, *CurBuilder);
4327 if (translate(Inst))
4332 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4334 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4335 std::string InstStrStorage;
4339 R <<
": '" << InstStrStorage <<
"'";
4346 if (!finalizeBasicBlock(*BB,
MBB)) {
4348 BB->getTerminator()->getDebugLoc(), BB);
4349 R <<
"unable to translate basic block";
4359 finishPendingPhis();
4361 SwiftError.propagateVRegs();
4366 assert(EntryBB->succ_size() == 1 &&
4367 "Custom BB used for lowering should have only one successor");
4371 "LLVM-IR entry block has a predecessor!?");
4374 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4383 EntryBB->removeSuccessor(&NewEntryBB);
4384 MF->remove(EntryBB);
4385 MF->deleteMachineBasicBlock(EntryBB);
4387 assert(&MF->front() == &NewEntryBB &&
4388 "New entry wasn't next in the list of basic block!");
4392 SP.copyToMachineFrameInfo(MF->getFrameInfo());
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool targetSupportsBF16Type(const MachineFunction *MF)
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
The address of a basic block.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Legacy analysis pass which computes BlockFrequencyInfo.
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getOne()
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const Target & getTarget() const
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const CallLowering * getCallLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
BasicBlock * getSuccessor(unsigned i=0) const
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueLLTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.