64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (
" (in function: " + MF.getName() +
")").str();
125 if (IsGlobalISelAbortEnabled)
142 DILocationVerifier() =
default;
143 ~DILocationVerifier()
override =
default;
145 const Instruction *getCurrentInst()
const {
return CurrInst; }
146 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
148 void erasingInstr(MachineInstr &
MI)
override {}
149 void changingInstr(MachineInstr &
MI)
override {}
150 void changedInstr(MachineInstr &
MI)
override {}
152 void createdInstr(MachineInstr &
MI)
override {
153 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
158 <<
" was copied to " <<
MI);
164 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
165 (
MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
190IRTranslator::ValueToVRegInfo::VRegListT &
191IRTranslator::allocateVRegs(
const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
199 Offsets->empty() ? Offsets :
nullptr);
200 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
211 return *VMap.getVRegs(Val);
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
219 "Don't know how to create an empty vreg");
223 Offsets->empty() ? Offsets :
nullptr);
226 for (
auto Ty : SplitTys)
235 while (
auto Elt =
C.getAggregateElement(Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
240 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
241 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
244 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
245 MF->getFunction().getSubprogram(),
246 &MF->getFunction().getEntryBlock());
247 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
256int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
257 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
259 return MapEntry->second;
265 Size = std::max<uint64_t>(
Size, 1u);
267 int &FI = MapEntry->second;
268 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
275 MF->getSubtarget().getFrameLowering()->getStackIDForScalableVectors();
276 MF->getFrameInfo().setStackID(FI, StackID);
284 return SI->getAlign();
286 return LI->getAlign();
292 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
293 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
299 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
300 assert(
MBB &&
"BasicBlock was not encountered before");
305 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
306 MachinePreds[
Edge].push_back(NewPred);
309bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
311 if (!mayTranslateUserTypes(U))
318 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
319 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
331bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
333 if (!mayTranslateUserTypes(U))
336 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
348 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
351bool IRTranslator::translateCompare(
const User &U,
353 if (!mayTranslateUserTypes(U))
357 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
358 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
363 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
371 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
379 if (Ret && DL->getTypeStoreSize(Ret->
getType()).isZero())
384 VRegs = getOrCreateVRegs(*Ret);
387 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
388 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
389 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
395 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
398void IRTranslator::emitBranchForMergedCondition(
407 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
410 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
413 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
414 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
415 CurBuilder->getDebugLoc(), TProb, FProb);
416 SL->SwitchCases.push_back(CB);
422 SwitchCG::CaseBlock CB(
424 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
425 SL->SwitchCases.push_back(CB);
430 return I->getParent() == BB;
434void IRTranslator::findMergedConditions(
439 using namespace PatternMatch;
440 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
441 "Expected Opc to be AND/OR");
447 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
453 const Value *BOpOp0, *BOpOp1;
467 if (BOpc == Instruction::And)
468 BOpc = Instruction::Or;
469 else if (BOpc == Instruction::Or)
470 BOpc = Instruction::And;
476 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
480 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
487 MachineBasicBlock *TmpBB =
491 if (
Opc == Instruction::Or) {
512 auto NewTrueProb = TProb / 2;
513 auto NewFalseProb = TProb / 2 + FProb;
515 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
516 NewFalseProb, InvertCond);
522 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
523 Probs[1], InvertCond);
525 assert(
Opc == Instruction::And &&
"Unknown merge op!");
545 auto NewTrueProb = TProb + FProb / 2;
546 auto NewFalseProb = FProb / 2;
548 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
549 NewFalseProb, InvertCond);
555 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
556 Probs[1], InvertCond);
560bool IRTranslator::shouldEmitAsBranches(
561 const std::vector<SwitchCG::CaseBlock> &Cases) {
563 if (Cases.size() != 2)
568 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
569 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
570 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
571 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
577 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
578 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
582 Cases[0].TrueBB == Cases[1].ThisBB)
585 Cases[0].FalseBB == Cases[1].ThisBB)
592bool IRTranslator::translateUncondBr(
const User &U,
595 auto &CurMBB = MIRBuilder.
getMBB();
603 for (
const BasicBlock *Succ :
successors(&BrInst))
604 CurMBB.addSuccessor(&getMBB(*Succ));
608bool IRTranslator::translateCondBr(
const User &U,
611 auto &CurMBB = MIRBuilder.
getMBB();
617 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
636 using namespace PatternMatch;
638 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
639 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
642 const Value *BOp0, *BOp1;
644 Opcode = Instruction::And;
646 Opcode = Instruction::Or;
650 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
651 getEdgeProbability(&CurMBB, Succ0MBB),
652 getEdgeProbability(&CurMBB, Succ1MBB),
654 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
657 if (shouldEmitAsBranches(SL->SwitchCases)) {
659 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
660 SL->SwitchCases.erase(SL->SwitchCases.begin());
666 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
667 MF->erase(SL->SwitchCases[
I].ThisBB);
669 SL->SwitchCases.clear();
676 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
677 CurBuilder->getDebugLoc());
681 emitSwitchCase(CB, &CurMBB, *CurBuilder);
689 Src->addSuccessorWithoutProb(Dst);
693 Prob = getEdgeProbability(Src, Dst);
694 Src->addSuccessor(Dst, Prob);
700 const BasicBlock *SrcBB = Src->getBasicBlock();
701 const BasicBlock *DstBB = Dst->getBasicBlock();
705 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
706 return BranchProbability(1, SuccSize);
708 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
712 using namespace SwitchCG;
715 BranchProbabilityInfo *BPI = FuncInfo.BPI;
717 Clusters.reserve(
SI.getNumCases());
718 for (
const auto &
I :
SI.cases()) {
719 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
720 assert(Succ &&
"Could not find successor mbb in mapping");
721 const ConstantInt *CaseVal =
I.getCaseValue();
722 BranchProbability Prob =
724 : BranchProbability(1,
SI.getNumCases() + 1);
725 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
728 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
735 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
738 if (Clusters.empty()) {
745 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
746 SL->findBitTestClusters(Clusters, &SI);
749 dbgs() <<
"Case clusters: ";
750 for (
const CaseCluster &
C : Clusters) {
751 if (
C.Kind == CC_JumpTable)
753 if (
C.Kind == CC_BitTests)
756 C.Low->getValue().print(
dbgs(),
true);
757 if (
C.Low !=
C.High) {
759 C.High->getValue().print(
dbgs(),
true);
766 assert(!Clusters.empty());
770 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
771 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
773 while (!WorkList.empty()) {
774 SwitchWorkListItem
W = WorkList.pop_back_val();
776 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
778 if (NumClusters > 3 &&
781 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
785 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
795 using namespace SwitchCG;
796 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
797 "Clusters not sorted?");
798 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
800 auto [LastLeft, FirstRight, LeftProb, RightProb] =
801 SL->computeSplitWorkItemInfo(W);
806 assert(PivotCluster >
W.FirstCluster);
807 assert(PivotCluster <=
W.LastCluster);
812 const ConstantInt *Pivot = PivotCluster->Low;
821 MachineBasicBlock *LeftMBB;
822 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
823 FirstLeft->Low ==
W.GE &&
824 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
825 LeftMBB = FirstLeft->MBB;
827 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
828 FuncInfo.MF->
insert(BBI, LeftMBB);
830 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
836 MachineBasicBlock *RightMBB;
837 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
838 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
839 RightMBB = FirstRight->MBB;
841 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
842 FuncInfo.MF->
insert(BBI, RightMBB);
844 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
852 if (
W.MBB == SwitchMBB)
853 emitSwitchCase(CB, SwitchMBB, MIB);
855 SL->SwitchCases.push_back(CB);
861 assert(JT.
Reg &&
"Should lower JT Header first!");
876 MachineIRBuilder MIB(*HeaderBB->
getParent());
883 Register SwitchOpReg = getOrCreateVReg(SValue);
885 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
890 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
904 auto Cst = getOrCreateVReg(
945 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
959 "Can only handle SLE ranges");
970 const LLT CmpTy = MRI->getType(CmpOpReg);
971 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
1006 bool FallthroughUnreachable) {
1007 using namespace SwitchCG;
1008 MachineFunction *CurMF = SwitchMBB->
getParent();
1010 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1011 SwitchCG::JumpTable *JT = &SL->JTCases[
I->JTCasesIndex].second;
1012 BranchProbability DefaultProb =
W.DefaultProb;
1015 MachineBasicBlock *JumpMBB = JT->
MBB;
1016 CurMF->
insert(BBI, JumpMBB);
1026 auto JumpProb =
I->Prob;
1027 auto FallthroughProb = UnhandledProbs;
1035 if (*SI == DefaultMBB) {
1036 JumpProb += DefaultProb / 2;
1037 FallthroughProb -= DefaultProb / 2;
1042 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1047 if (FallthroughUnreachable)
1048 JTH->FallthroughUnreachable =
true;
1050 if (!JTH->FallthroughUnreachable)
1051 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1052 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1057 JTH->HeaderBB = CurMBB;
1061 if (CurMBB == SwitchMBB) {
1062 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1064 JTH->Emitted =
true;
1071 bool FallthroughUnreachable,
1076 using namespace SwitchCG;
1079 if (
I->Low ==
I->High) {
1095 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1098 emitSwitchCase(CB, SwitchMBB, MIB);
1104 MachineIRBuilder &MIB = *CurBuilder;
1108 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1110 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1112 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1117 LLT MaskTy = SwitchOpTy;
1123 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1132 Register SubReg = RangeSub.getReg(0);
1133 if (SwitchOpTy != MaskTy)
1139 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1141 if (!
B.FallthroughUnreachable)
1142 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1143 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1147 if (!
B.FallthroughUnreachable) {
1151 RangeSub, RangeCst);
1165 MachineIRBuilder &MIB = *CurBuilder;
1171 if (PopCount == 1) {
1174 auto MaskTrailingZeros =
1179 }
else if (PopCount == BB.
Range) {
1181 auto MaskTrailingOnes =
1189 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1193 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1200 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1202 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1220bool IRTranslator::lowerBitTestWorkItem(
1226 bool FallthroughUnreachable) {
1227 using namespace SwitchCG;
1228 MachineFunction *CurMF = SwitchMBB->
getParent();
1230 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1232 for (BitTestCase &BTC : BTB->Cases)
1233 CurMF->
insert(BBI, BTC.ThisBB);
1236 BTB->Parent = CurMBB;
1237 BTB->Default = Fallthrough;
1239 BTB->DefaultProb = UnhandledProbs;
1243 if (!BTB->ContiguousRange) {
1244 BTB->Prob += DefaultProb / 2;
1245 BTB->DefaultProb -= DefaultProb / 2;
1248 if (FallthroughUnreachable)
1249 BTB->FallthroughUnreachable =
true;
1252 if (CurMBB == SwitchMBB) {
1253 emitBitTestHeader(*BTB, SwitchMBB);
1254 BTB->Emitted =
true;
1264 using namespace SwitchCG;
1265 MachineFunction *CurMF = FuncInfo.MF;
1266 MachineBasicBlock *NextMBB =
nullptr;
1268 if (++BBI != FuncInfo.MF->end())
1277 [](
const CaseCluster &a,
const CaseCluster &b) {
1278 return a.Prob != b.Prob
1280 : a.Low->getValue().slt(b.Low->getValue());
1285 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1287 if (
I->Prob >
W.LastCluster->Prob)
1289 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1297 BranchProbability DefaultProb =
W.DefaultProb;
1298 BranchProbability UnhandledProbs = DefaultProb;
1299 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1300 UnhandledProbs +=
I->Prob;
1302 MachineBasicBlock *CurMBB =
W.MBB;
1303 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1304 bool FallthroughUnreachable =
false;
1305 MachineBasicBlock *Fallthrough;
1306 if (
I ==
W.LastCluster) {
1308 Fallthrough = DefaultMBB;
1313 CurMF->
insert(BBI, Fallthrough);
1315 UnhandledProbs -=
I->Prob;
1319 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1320 DefaultProb, UnhandledProbs,
I, Fallthrough,
1321 FallthroughUnreachable)) {
1329 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1330 UnhandledProbs,
I, Fallthrough,
1331 FallthroughUnreachable)) {
1338 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1339 FallthroughUnreachable, UnhandledProbs,
1340 CurMBB, MIB, SwitchMBB)) {
1347 CurMBB = Fallthrough;
1353bool IRTranslator::translateIndirectBr(
const User &U,
1361 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1362 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1363 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1367 if (!AddedSuccessors.
insert(Succ).second)
1377 return Arg->hasSwiftErrorAttr();
1385 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1390 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1395 Type *OffsetIRTy = DL->getIndexType(Ptr->
getType());
1399 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1401 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(), Ptr);
1407 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1409 if (AA->pointsToConstantMemory(
1417 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1422 Align BaseAlign = getMemOpAlign(LI);
1424 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),
1427 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1435 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1439 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1442 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1445 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1446 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1448 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1449 SI.getPointerOperand());
1456 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1460 MachinePointerInfo Ptr(
SI.getPointerOperand(), Offsets[i]);
1461 Align BaseAlign = getMemOpAlign(SI);
1462 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),
1464 SI.getAAMetadata(),
nullptr,
1465 SI.getSyncScopeID(),
SI.getOrdering());
1472 const Value *Src = U.getOperand(0);
1481 for (
auto Idx : EVI->indices())
1484 for (
auto Idx : IVI->indices())
1491 DL.getIndexedOffsetInType(Src->getType(), Indices));
1494bool IRTranslator::translateExtractValue(
const User &U,
1496 const Value *Src =
U.getOperand(0);
1499 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1501 auto &DstRegs = allocateVRegs(U);
1503 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1504 DstRegs[i] = SrcRegs[Idx++];
1509bool IRTranslator::translateInsertValue(
const User &U,
1511 const Value *Src =
U.getOperand(0);
1513 auto &DstRegs = allocateVRegs(U);
1514 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1517 auto *InsertedIt = InsertedRegs.
begin();
1519 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1520 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1521 DstRegs[i] = *InsertedIt++;
1523 DstRegs[i] = SrcRegs[i];
1529bool IRTranslator::translateSelect(
const User &U,
1531 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1540 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1541 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1547bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1550 auto &Regs = *VMap.getVRegs(U);
1552 Regs.push_back(Src);
1553 VMap.getOffsets(U)->push_back(0);
1562bool IRTranslator::translateBitCast(
const User &U,
1570 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1572 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1575 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1578bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1580 if (!mayTranslateUserTypes(U))
1593bool IRTranslator::translateGetElementPtr(
const User &U,
1595 Value &Op0 = *
U.getOperand(0);
1599 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1602 uint32_t PtrAddFlags = 0;
1608 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1618 unsigned VectorWidth = 0;
1622 bool WantSplatVector =
false;
1626 WantSplatVector = VectorWidth > 1;
1631 if (WantSplatVector && !PtrTy.
isVector()) {
1638 OffsetIRTy = DL->getIndexType(PtrIRTy);
1645 const Value *Idx = GTI.getOperand();
1646 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1648 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1651 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1656 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1657 Offset += ElementSize * *Val;
1666 PtrAddFlagsWithConst(
Offset))
1671 Register IdxReg = getOrCreateVReg(*Idx);
1672 LLT IdxTy = MRI->getType(IdxReg);
1673 if (IdxTy != OffsetTy) {
1674 if (!IdxTy.
isVector() && WantSplatVector) {
1687 if (ElementSize != 1) {
1698 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1701 GepOffsetReg = IdxReg;
1705 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1714 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1715 PtrAddFlagsWithConst(
Offset));
1719 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1723bool IRTranslator::translateMemFunc(
const CallInst &CI,
1733 unsigned MinPtrSize = UINT_MAX;
1734 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1735 Register SrcReg = getOrCreateVReg(**AI);
1736 LLT SrcTy = MRI->getType(SrcReg);
1738 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1746 if (MRI->getType(SizeOpReg) != SizeTy)
1758 ConstantInt *CopySize =
nullptr;
1761 DstAlign = MCI->getDestAlign().valueOrOne();
1762 SrcAlign = MCI->getSourceAlign().valueOrOne();
1765 DstAlign = MMI->getDestAlign().valueOrOne();
1766 SrcAlign = MMI->getSourceAlign().valueOrOne();
1770 DstAlign = MSI->getDestAlign().valueOrOne();
1773 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1789 if (AA && CopySize &&
1790 AA->pointsToConstantMemory(MemoryLocation(
1800 ICall.addMemOperand(
1801 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1802 StoreFlags, 1, DstAlign, AAInfo));
1803 if (Opcode != TargetOpcode::G_MEMSET)
1804 ICall.addMemOperand(MF->getMachineMemOperand(
1805 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1810bool IRTranslator::translateTrap(
const CallInst &CI,
1813 StringRef TrapFuncName =
1814 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1815 if (TrapFuncName.
empty()) {
1816 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1825 CallLowering::CallLoweringInfo
Info;
1826 if (Opcode == TargetOpcode::G_UBSANTRAP)
1833 return CLI->lowerCall(MIRBuilder, Info);
1836bool IRTranslator::translateVectorInterleave2Intrinsic(
1839 "This function can only be called on the interleave2 intrinsic!");
1843 Register Res = getOrCreateVReg(CI);
1845 LLT OpTy = MRI->getType(Op0);
1852bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1855 "This function can only be called on the deinterleave2 intrinsic!");
1862 LLT ResTy = MRI->getType(Res[0]);
1871void IRTranslator::getStackGuard(
Register DstReg,
1874 TLI->getSDagStackGuard(*MF->getFunction().getParent(), *Libcalls);
1877 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1882 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1883 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1885 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1887 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1888 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1890 MachinePointerInfo MPInfo(
Global);
1893 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1894 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1895 MIB.setMemRefs({MemRef});
1898bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1902 Op, {ResRegs[0], ResRegs[1]},
1908bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1910 Register Dst = getOrCreateVReg(CI);
1914 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1922 case Intrinsic::acos:
1923 return TargetOpcode::G_FACOS;
1924 case Intrinsic::asin:
1925 return TargetOpcode::G_FASIN;
1926 case Intrinsic::atan:
1927 return TargetOpcode::G_FATAN;
1928 case Intrinsic::atan2:
1929 return TargetOpcode::G_FATAN2;
1930 case Intrinsic::bswap:
1931 return TargetOpcode::G_BSWAP;
1932 case Intrinsic::bitreverse:
1933 return TargetOpcode::G_BITREVERSE;
1934 case Intrinsic::fshl:
1935 return TargetOpcode::G_FSHL;
1936 case Intrinsic::fshr:
1937 return TargetOpcode::G_FSHR;
1938 case Intrinsic::ceil:
1939 return TargetOpcode::G_FCEIL;
1940 case Intrinsic::cos:
1941 return TargetOpcode::G_FCOS;
1942 case Intrinsic::cosh:
1943 return TargetOpcode::G_FCOSH;
1944 case Intrinsic::ctpop:
1945 return TargetOpcode::G_CTPOP;
1946 case Intrinsic::exp:
1947 return TargetOpcode::G_FEXP;
1948 case Intrinsic::exp2:
1949 return TargetOpcode::G_FEXP2;
1950 case Intrinsic::exp10:
1951 return TargetOpcode::G_FEXP10;
1952 case Intrinsic::fabs:
1953 return TargetOpcode::G_FABS;
1954 case Intrinsic::copysign:
1955 return TargetOpcode::G_FCOPYSIGN;
1956 case Intrinsic::minnum:
1957 return TargetOpcode::G_FMINNUM;
1958 case Intrinsic::maxnum:
1959 return TargetOpcode::G_FMAXNUM;
1960 case Intrinsic::minimum:
1961 return TargetOpcode::G_FMINIMUM;
1962 case Intrinsic::maximum:
1963 return TargetOpcode::G_FMAXIMUM;
1964 case Intrinsic::minimumnum:
1965 return TargetOpcode::G_FMINIMUMNUM;
1966 case Intrinsic::maximumnum:
1967 return TargetOpcode::G_FMAXIMUMNUM;
1968 case Intrinsic::canonicalize:
1969 return TargetOpcode::G_FCANONICALIZE;
1970 case Intrinsic::floor:
1971 return TargetOpcode::G_FFLOOR;
1972 case Intrinsic::fma:
1973 return TargetOpcode::G_FMA;
1974 case Intrinsic::log:
1975 return TargetOpcode::G_FLOG;
1976 case Intrinsic::log2:
1977 return TargetOpcode::G_FLOG2;
1978 case Intrinsic::log10:
1979 return TargetOpcode::G_FLOG10;
1980 case Intrinsic::ldexp:
1981 return TargetOpcode::G_FLDEXP;
1982 case Intrinsic::nearbyint:
1983 return TargetOpcode::G_FNEARBYINT;
1984 case Intrinsic::pow:
1985 return TargetOpcode::G_FPOW;
1986 case Intrinsic::powi:
1987 return TargetOpcode::G_FPOWI;
1988 case Intrinsic::rint:
1989 return TargetOpcode::G_FRINT;
1990 case Intrinsic::round:
1991 return TargetOpcode::G_INTRINSIC_ROUND;
1992 case Intrinsic::roundeven:
1993 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1994 case Intrinsic::sin:
1995 return TargetOpcode::G_FSIN;
1996 case Intrinsic::sinh:
1997 return TargetOpcode::G_FSINH;
1998 case Intrinsic::sqrt:
1999 return TargetOpcode::G_FSQRT;
2000 case Intrinsic::tan:
2001 return TargetOpcode::G_FTAN;
2002 case Intrinsic::tanh:
2003 return TargetOpcode::G_FTANH;
2004 case Intrinsic::trunc:
2005 return TargetOpcode::G_INTRINSIC_TRUNC;
2006 case Intrinsic::readcyclecounter:
2007 return TargetOpcode::G_READCYCLECOUNTER;
2008 case Intrinsic::readsteadycounter:
2009 return TargetOpcode::G_READSTEADYCOUNTER;
2010 case Intrinsic::ptrmask:
2011 return TargetOpcode::G_PTRMASK;
2012 case Intrinsic::lrint:
2013 return TargetOpcode::G_INTRINSIC_LRINT;
2014 case Intrinsic::llrint:
2015 return TargetOpcode::G_INTRINSIC_LLRINT;
2017 case Intrinsic::vector_reduce_fmin:
2018 return TargetOpcode::G_VECREDUCE_FMIN;
2019 case Intrinsic::vector_reduce_fmax:
2020 return TargetOpcode::G_VECREDUCE_FMAX;
2021 case Intrinsic::vector_reduce_fminimum:
2022 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2023 case Intrinsic::vector_reduce_fmaximum:
2024 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2025 case Intrinsic::vector_reduce_add:
2026 return TargetOpcode::G_VECREDUCE_ADD;
2027 case Intrinsic::vector_reduce_mul:
2028 return TargetOpcode::G_VECREDUCE_MUL;
2029 case Intrinsic::vector_reduce_and:
2030 return TargetOpcode::G_VECREDUCE_AND;
2031 case Intrinsic::vector_reduce_or:
2032 return TargetOpcode::G_VECREDUCE_OR;
2033 case Intrinsic::vector_reduce_xor:
2034 return TargetOpcode::G_VECREDUCE_XOR;
2035 case Intrinsic::vector_reduce_smax:
2036 return TargetOpcode::G_VECREDUCE_SMAX;
2037 case Intrinsic::vector_reduce_smin:
2038 return TargetOpcode::G_VECREDUCE_SMIN;
2039 case Intrinsic::vector_reduce_umax:
2040 return TargetOpcode::G_VECREDUCE_UMAX;
2041 case Intrinsic::vector_reduce_umin:
2042 return TargetOpcode::G_VECREDUCE_UMIN;
2043 case Intrinsic::experimental_vector_compress:
2044 return TargetOpcode::G_VECTOR_COMPRESS;
2045 case Intrinsic::lround:
2046 return TargetOpcode::G_LROUND;
2047 case Intrinsic::llround:
2048 return TargetOpcode::G_LLROUND;
2049 case Intrinsic::get_fpenv:
2050 return TargetOpcode::G_GET_FPENV;
2051 case Intrinsic::get_fpmode:
2052 return TargetOpcode::G_GET_FPMODE;
2057bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2061 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2069 for (
const auto &Arg : CI.
args())
2072 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2080 case Intrinsic::experimental_constrained_fadd:
2081 return TargetOpcode::G_STRICT_FADD;
2082 case Intrinsic::experimental_constrained_fsub:
2083 return TargetOpcode::G_STRICT_FSUB;
2084 case Intrinsic::experimental_constrained_fmul:
2085 return TargetOpcode::G_STRICT_FMUL;
2086 case Intrinsic::experimental_constrained_fdiv:
2087 return TargetOpcode::G_STRICT_FDIV;
2088 case Intrinsic::experimental_constrained_frem:
2089 return TargetOpcode::G_STRICT_FREM;
2090 case Intrinsic::experimental_constrained_fma:
2091 return TargetOpcode::G_STRICT_FMA;
2092 case Intrinsic::experimental_constrained_sqrt:
2093 return TargetOpcode::G_STRICT_FSQRT;
2094 case Intrinsic::experimental_constrained_ldexp:
2095 return TargetOpcode::G_STRICT_FLDEXP;
2096 case Intrinsic::experimental_constrained_fcmp:
2097 return TargetOpcode::G_STRICT_FCMP;
2098 case Intrinsic::experimental_constrained_fcmps:
2099 return TargetOpcode::G_STRICT_FCMPS;
2105bool IRTranslator::translateConstrainedFPIntrinsic(
2117 if (Opcode == TargetOpcode::G_STRICT_FCMP ||
2118 Opcode == TargetOpcode::G_STRICT_FCMPS) {
2120 Register Operand0 = getOrCreateVReg(*FPCmp->getArgOperand(0));
2121 Register Operand1 = getOrCreateVReg(*FPCmp->getArgOperand(1));
2124 .addPredicate(FPCmp->getPredicate())
2138std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2139 auto VRegs = getOrCreateVRegs(Arg);
2140 if (VRegs.
size() != 1)
2141 return std::nullopt;
2144 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2145 if (!VRegDef || !VRegDef->isCopy())
2146 return std::nullopt;
2147 return VRegDef->getOperand(1).getReg().asMCReg();
2150bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2162 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2164 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2165 <<
": expression is entry_value but "
2166 <<
"couldn't find a physical register\n");
2174 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2186 case Intrinsic::experimental_convergence_anchor:
2187 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2188 case Intrinsic::experimental_convergence_entry:
2189 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2190 case Intrinsic::experimental_convergence_loop:
2191 return TargetOpcode::CONVERGENCECTRL_LOOP;
2195bool IRTranslator::translateConvergenceControlIntrinsic(
2198 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2201 if (
ID == Intrinsic::experimental_convergence_loop) {
2203 assert(Bundle &&
"Expected a convergence control token.");
2205 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2215 if (ORE->enabled()) {
2217 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2225 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2231 case Intrinsic::lifetime_start:
2232 case Intrinsic::lifetime_end: {
2235 MF->getFunction().hasOptNone())
2238 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2239 : TargetOpcode::LIFETIME_END;
2248 case Intrinsic::fake_use: {
2250 for (
const auto &Arg : CI.
args())
2252 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2253 MF->setHasFakeUses(
true);
2256 case Intrinsic::dbg_declare: {
2263 case Intrinsic::dbg_label: {
2269 "Expected inlined-at fields to agree");
2274 case Intrinsic::vaend:
2278 case Intrinsic::vastart: {
2280 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2283 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2284 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2286 ListSize, Alignment));
2289 case Intrinsic::dbg_assign:
2296 case Intrinsic::dbg_value: {
2303 case Intrinsic::uadd_with_overflow:
2304 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2305 case Intrinsic::sadd_with_overflow:
2306 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2307 case Intrinsic::usub_with_overflow:
2308 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2309 case Intrinsic::ssub_with_overflow:
2310 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2311 case Intrinsic::umul_with_overflow:
2312 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2313 case Intrinsic::smul_with_overflow:
2314 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2315 case Intrinsic::uadd_sat:
2316 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2317 case Intrinsic::sadd_sat:
2318 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2319 case Intrinsic::usub_sat:
2320 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2321 case Intrinsic::ssub_sat:
2322 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2323 case Intrinsic::ushl_sat:
2324 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2325 case Intrinsic::sshl_sat:
2326 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2327 case Intrinsic::umin:
2328 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2329 case Intrinsic::umax:
2330 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2331 case Intrinsic::smin:
2332 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2333 case Intrinsic::smax:
2334 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2335 case Intrinsic::abs:
2337 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2338 case Intrinsic::smul_fix:
2339 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2340 case Intrinsic::umul_fix:
2341 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2342 case Intrinsic::smul_fix_sat:
2343 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2344 case Intrinsic::umul_fix_sat:
2345 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2346 case Intrinsic::sdiv_fix:
2347 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2348 case Intrinsic::udiv_fix:
2349 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2350 case Intrinsic::sdiv_fix_sat:
2351 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2352 case Intrinsic::udiv_fix_sat:
2353 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2354 case Intrinsic::fmuladd: {
2355 const TargetMachine &TM = MF->getTarget();
2356 Register Dst = getOrCreateVReg(CI);
2361 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2362 TLI->getValueType(*DL, CI.
getType()))) {
2365 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2376 case Intrinsic::frexp: {
2383 case Intrinsic::modf: {
2385 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2390 case Intrinsic::sincos: {
2397 case Intrinsic::fptosi_sat:
2401 case Intrinsic::fptoui_sat:
2405 case Intrinsic::memcpy_inline:
2406 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2407 case Intrinsic::memcpy:
2408 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2409 case Intrinsic::memmove:
2410 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2411 case Intrinsic::memset:
2412 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2413 case Intrinsic::eh_typeid_for: {
2416 unsigned TypeID = MF->getTypeIDFor(GV);
2420 case Intrinsic::objectsize:
2423 case Intrinsic::is_constant:
2426 case Intrinsic::stackguard:
2427 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2429 case Intrinsic::stackprotector: {
2432 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2433 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2434 getStackGuard(GuardVal, MIRBuilder);
2439 int FI = getOrCreateFrameIndex(*Slot);
2440 MF->getFrameInfo().setStackProtectorIndex(FI);
2443 GuardVal, getOrCreateVReg(*Slot),
2450 case Intrinsic::stacksave: {
2451 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2454 case Intrinsic::stackrestore: {
2455 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2459 case Intrinsic::cttz:
2460 case Intrinsic::ctlz: {
2462 bool isTrailing =
ID == Intrinsic::cttz;
2463 unsigned Opcode = isTrailing
2464 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2465 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2466 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2467 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2468 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2472 case Intrinsic::invariant_start: {
2476 case Intrinsic::invariant_end:
2478 case Intrinsic::expect:
2479 case Intrinsic::expect_with_probability:
2480 case Intrinsic::annotation:
2481 case Intrinsic::ptr_annotation:
2482 case Intrinsic::launder_invariant_group:
2483 case Intrinsic::strip_invariant_group: {
2485 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2489 case Intrinsic::assume:
2490 case Intrinsic::experimental_noalias_scope_decl:
2491 case Intrinsic::var_annotation:
2492 case Intrinsic::sideeffect:
2495 case Intrinsic::read_volatile_register:
2496 case Intrinsic::read_register: {
2499 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2503 case Intrinsic::write_register: {
2505 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2510 case Intrinsic::localescape: {
2511 MachineBasicBlock &EntryMBB = MF->front();
2516 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2523 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2536 case Intrinsic::vector_reduce_fadd:
2537 case Intrinsic::vector_reduce_fmul: {
2540 Register Dst = getOrCreateVReg(CI);
2546 Opc =
ID == Intrinsic::vector_reduce_fadd
2547 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2548 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2549 if (!MRI->getType(VecSrc).isVector())
2550 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2551 : TargetOpcode::G_FMUL;
2559 if (
ID == Intrinsic::vector_reduce_fadd) {
2560 Opc = TargetOpcode::G_VECREDUCE_FADD;
2561 ScalarOpc = TargetOpcode::G_FADD;
2563 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2564 ScalarOpc = TargetOpcode::G_FMUL;
2566 LLT DstTy = MRI->getType(Dst);
2569 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2574 case Intrinsic::trap:
2575 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2576 case Intrinsic::debugtrap:
2577 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2578 case Intrinsic::ubsantrap:
2579 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2580 case Intrinsic::allow_runtime_check:
2581 case Intrinsic::allow_ubsan_check:
2582 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2585 case Intrinsic::amdgcn_cs_chain:
2586 case Intrinsic::amdgcn_call_whole_wave:
2587 return translateCallBase(CI, MIRBuilder);
2588 case Intrinsic::fptrunc_round: {
2593 std::optional<RoundingMode> RoundMode =
2598 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2599 {getOrCreateVReg(CI)},
2601 .addImm((
int)*RoundMode);
2605 case Intrinsic::is_fpclass: {
2610 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2611 {getOrCreateVReg(*FpValue)})
2616 case Intrinsic::set_fpenv: {
2621 case Intrinsic::reset_fpenv:
2624 case Intrinsic::set_fpmode: {
2629 case Intrinsic::reset_fpmode:
2632 case Intrinsic::get_rounding:
2635 case Intrinsic::set_rounding:
2638 case Intrinsic::vscale: {
2642 case Intrinsic::scmp:
2643 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2647 case Intrinsic::ucmp:
2648 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2652 case Intrinsic::vector_extract:
2653 return translateExtractVector(CI, MIRBuilder);
2654 case Intrinsic::vector_insert:
2655 return translateInsertVector(CI, MIRBuilder);
2656 case Intrinsic::stepvector: {
2660 case Intrinsic::prefetch: {
2667 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2670 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2676 case Intrinsic::vector_interleave2:
2677 case Intrinsic::vector_deinterleave2: {
2685 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2687 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2690#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2691 case Intrinsic::INTRINSIC:
2692#include "llvm/IR/ConstrainedOps.def"
2695 case Intrinsic::experimental_convergence_anchor:
2696 case Intrinsic::experimental_convergence_entry:
2697 case Intrinsic::experimental_convergence_loop:
2698 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2699 case Intrinsic::reloc_none: {
2702 MIRBuilder.
buildInstr(TargetOpcode::RELOC_NONE)
2710bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2712 if (!mayTranslateUserTypes(CB))
2715 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2719 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2724 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2727bool IRTranslator::translateCallBase(
const CallBase &CB,
2734 for (
const auto &Arg : CB.
args()) {
2736 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2738 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2739 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2740 &CB, &MIRBuilder.
getMBB(), Arg));
2743 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2746 Args.push_back(getOrCreateVRegs(*Arg));
2750 if (ORE->enabled()) {
2752 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2758 std::optional<CallLowering::PtrAuthInfo> PAI;
2763 const Value *
Key = Bundle->Inputs[0];
2770 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2771 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2773 Register DiscReg = getOrCreateVReg(*Discriminator);
2781 const auto &Token = *Bundle->Inputs[0].get();
2782 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2788 bool Success = CLI->lowerCall(
2789 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2794 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2795 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2803 if (!mayTranslateUserTypes(U))
2811 if (
F && (
F->hasDLLImportStorageClass() ||
2812 (MF->getTarget().getTargetTriple().isOSWindows() &&
2813 F->hasExternalWeakLinkage())))
2825 return translateInlineAsm(CI, MIRBuilder);
2829 if (translateCallBase(CI, MIRBuilder)) {
2838 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2842 TLI->getTgtMemIntrinsic(Infos, CI, *MF,
ID);
2844 return translateIntrinsic(CI,
ID, MIRBuilder, Infos);
2848bool IRTranslator::translateIntrinsic(
2853 ResultRegs = getOrCreateVRegs(CB);
2868 assert(CI->getBitWidth() <= 64 &&
2869 "large intrinsic immediates not handled");
2870 MIB.
addImm(CI->getSExtValue());
2875 auto *MD = MDVal->getMetadata();
2879 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2886 if (VRegs.
size() > 1)
2893 for (
const auto &Info : TgtMemIntrinsicInfos) {
2896 LLT MemTy =
Info.memVT.isSimple()
2898 : LLT::scalar(
Info.memVT.getStoreSizeInBits());
2902 MachinePointerInfo MPI;
2904 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2905 }
else if (
Info.fallbackAddressSpace) {
2906 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2915 auto *Token = Bundle->Inputs[0].get();
2916 Register TokenReg = getOrCreateVReg(*Token);
2927bool IRTranslator::findUnwindDestinations(
2949 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2955 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2956 UnwindDests.back().first->setIsEHScopeEntry();
2957 UnwindDests.back().first->setIsEHFuncletEntry();
2962 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2963 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2965 if (IsMSVCCXX || IsCoreCLR)
2966 UnwindDests.back().first->setIsEHFuncletEntry();
2968 UnwindDests.back().first->setIsEHScopeEntry();
2970 NewEHPadBB = CatchSwitch->getUnwindDest();
2975 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2976 if (BPI && NewEHPadBB)
2978 EHPadBB = NewEHPadBB;
2983bool IRTranslator::translateInvoke(
const User &U,
2986 MCContext &
Context = MF->getContext();
2991 const Function *Fn =
I.getCalledFunction();
2998 if (
I.hasDeoptState())
3012 (MF->getTarget().getTargetTriple().isOSWindows() &&
3016 bool LowerInlineAsm =
I.isInlineAsm();
3017 bool NeedEHLabel =
true;
3023 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
3024 BeginSymbol =
Context.createTempSymbol();
3028 if (LowerInlineAsm) {
3029 if (!translateInlineAsm(
I, MIRBuilder))
3031 }
else if (!translateCallBase(
I, MIRBuilder))
3036 EndSymbol =
Context.createTempSymbol();
3041 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3042 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3043 BranchProbability EHPadBBProb =
3047 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3050 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3051 &ReturnMBB = getMBB(*ReturnBB);
3053 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3054 for (
auto &UnwindDest : UnwindDests) {
3055 UnwindDest.first->setIsEHPad();
3056 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3061 assert(BeginSymbol &&
"Expected a begin symbol!");
3062 assert(EndSymbol &&
"Expected an end symbol!");
3063 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3066 MIRBuilder.
buildBr(ReturnMBB);
3072bool IRTranslator::translateCallBr(
const User &U,
3074 if (!mayTranslateUserTypes(U))
3078 MachineBasicBlock *CallBrMBB = &MIRBuilder.
getMBB();
3081 if (
I.isInlineAsm()) {
3087 if (!translateIntrinsic(
I, IID, MIRBuilder))
3091 SmallPtrSet<BasicBlock *, 8> Dests = {
I.getDefaultDest()};
3092 MachineBasicBlock *
Return = &getMBB(*
I.getDefaultDest());
3101 for (BasicBlock *Dest :
I.getIndirectDests()) {
3102 MachineBasicBlock &
Target = getMBB(*Dest);
3103 Target.setIsInlineAsmBrIndirectTarget();
3104 Target.setLabelMustBeEmitted();
3106 if (Dests.
insert(Dest).second)
3118bool IRTranslator::translateLandingPad(
const User &U,
3122 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3128 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3129 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3130 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3142 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3147 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3148 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3149 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3158 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3161 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3167 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3169 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3174 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3175 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3176 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3181bool IRTranslator::translateAlloca(
const User &U,
3189 Register Res = getOrCreateVReg(AI);
3190 int FI = getOrCreateFrameIndex(AI);
3196 if (MF->getTarget().getTargetTriple().isOSWindows())
3201 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3203 if (MRI->getType(NumElts) != IntPtrTy) {
3204 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3210 TypeSize TySize = DL->getTypeAllocSize(Ty);
3212 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3216 TySizeReg = MRI->createGenericVirtualRegister(IntPtrTy);
3221 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, TySize.
getFixedValue()));
3223 MIRBuilder.
buildMul(AllocSize, NumElts, TySizeReg);
3228 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3230 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3234 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3237 if (Alignment <= StackAlign)
3238 Alignment =
Align(1);
3241 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3242 assert(MF->getFrameInfo().hasVarSizedObjects());
3251 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3252 {getOrCreateVReg(*
U.getOperand(0)),
3253 DL->getABITypeAlign(
U.getType()).value()});
3257bool IRTranslator::translateUnreachable(
const User &U,
3260 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3261 MF->getTarget().Options.NoTrapAfterNoreturn))
3268bool IRTranslator::translateInsertElement(
const User &U,
3273 FVT && FVT->getNumElements() == 1)
3274 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3277 Register Val = getOrCreateVReg(*
U.getOperand(0));
3278 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3279 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3282 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3283 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3284 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3285 Idx = getOrCreateVReg(*NewIdxCI);
3289 Idx = getOrCreateVReg(*
U.getOperand(2));
3290 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3291 const LLT VecIdxTy =
3292 MRI->getType(Idx).changeElementSize(PreferredVecIdxWidth);
3299bool IRTranslator::translateInsertVector(
const User &U,
3302 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3303 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3306 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3311 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3316 ResultType && ResultType->getNumElements() == 1) {
3318 InputType && InputType->getNumElements() == 1) {
3322 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3328 Register Idx = getOrCreateVReg(*CI);
3336 Register Idx = getOrCreateVReg(*CI);
3337 auto ScaledIndex = MIRBuilder.
buildMul(
3338 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3345 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3350bool IRTranslator::translateExtractElement(
const User &U,
3354 if (
const FixedVectorType *FVT =
3356 if (FVT->getNumElements() == 1)
3357 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3360 Register Val = getOrCreateVReg(*
U.getOperand(0));
3361 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3366 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3367 Idx = getOrCreateVReg(*NewIdxCI);
3371 Idx = getOrCreateVReg(*
U.getOperand(1));
3372 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3373 const LLT VecIdxTy =
3381bool IRTranslator::translateExtractVector(
const User &U,
3384 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3386 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3391 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3396 ResultType && ResultType->getNumElements() == 1) {
3398 InputType && InputType->getNumElements() == 1) {
3401 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3407 Register Idx = getOrCreateVReg(*CI);
3415 Register Idx = getOrCreateVReg(*CI);
3416 auto ScaledIndex = MIRBuilder.
buildMul(
3417 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3424 getOrCreateVReg(*
U.getOperand(0)),
3429bool IRTranslator::translateShuffleVector(
const User &U,
3435 if (
U.getOperand(0)->getType()->isScalableTy()) {
3436 Register Val = getOrCreateVReg(*
U.getOperand(0));
3438 MRI->getType(Val).getElementType(), Val, 0);
3445 Mask = SVI->getShuffleMask();
3456 unsigned M =
Mask[0];
3458 if (M == 0 || M == 1)
3459 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3465 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3466 }
else if (M < SrcElts * 2) {
3468 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3480 for (
int M : Mask) {
3482 if (M == 0 || M == 1) {
3483 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3485 if (!
Undef.isValid()) {
3486 Undef = MRI->createGenericVirtualRegister(SrcTy);
3496 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3498 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3499 {getOrCreateVReg(*
U.getOperand(0)),
3500 getOrCreateVReg(*
U.getOperand(1))})
3501 .addShuffleMask(MaskAlloc);
3508 SmallVector<MachineInstr *, 4> Insts;
3509 for (
auto Reg : getOrCreateVRegs(PI)) {
3510 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3514 PendingPHIs.emplace_back(&PI, std::move(Insts));
3518bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3522 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3524 auto Res = getOrCreateVRegs(
I);
3527 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3528 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3529 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3532 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3533 *MF->getMachineMemOperand(
3534 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3535 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3536 I.getSuccessOrdering(),
I.getFailureOrdering()));
3540bool IRTranslator::translateAtomicRMW(
const User &U,
3542 if (!mayTranslateUserTypes(U))
3546 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3549 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3550 Register Val = getOrCreateVReg(*
I.getValOperand());
3552 unsigned Opcode = 0;
3553 switch (
I.getOperation()) {
3557 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3560 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3563 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3566 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3569 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3572 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3575 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3578 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3581 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3584 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3587 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3590 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3593 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3596 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3599 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3602 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3605 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3608 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUMNUM;
3611 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUMNUM;
3614 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3617 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3620 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3623 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3628 Opcode, Res, Addr, Val,
3629 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3630 Flags, MRI->getType(Val), getMemOpAlign(
I),
3631 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3636bool IRTranslator::translateFence(
const User &U,
3640 Fence.getSyncScopeID());
3644bool IRTranslator::translateFreeze(
const User &U,
3650 "Freeze with different source and destination type?");
3652 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3659void IRTranslator::finishPendingPhis() {
3662 GISelObserverWrapper WrapperObserver(&
Verifier);
3663 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3665 for (
auto &Phi : PendingPHIs) {
3666 const PHINode *PI =
Phi.first;
3670 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3676 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3680 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3684 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3685 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3694void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3700 "Expected inlined-at fields to agree");
3704 if (!V || HasArgList) {
3722 auto *ExprDerefRemoved =
3728 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3740void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3746 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3751 "Expected inlined-at fields to agree");
3756 MF->setVariableDbgInfo(Variable, Expression,
3757 getOrCreateFrameIndex(*AI), DL);
3761 if (translateIfEntryValueArgument(
true,
Address, Variable,
3773void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3778 assert(DLR->getLabel() &&
"Missing label");
3779 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3781 "Expected inlined-at fields to agree");
3790 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3793 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3798bool IRTranslator::translate(
const Instruction &Inst) {
3800 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3801 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3803 if (TLI->fallBackToDAGISel(Inst))
3807#define HANDLE_INST(NUM, OPCODE, CLASS) \
3808 case Instruction::OPCODE: \
3809 return translate##OPCODE(Inst, *CurBuilder.get());
3810#include "llvm/IR/Instruction.def"
3819 if (
auto CurrInstDL = CurBuilder->getDL())
3820 EntryBuilder->setDebugLoc(
DebugLoc());
3826 EntryBuilder->buildConstant(
Reg, *CI);
3830 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3831 EntryBuilder->buildFConstant(
Reg, *CF);
3833 EntryBuilder->buildUndef(
Reg);
3835 EntryBuilder->buildConstant(
Reg, 0);
3837 EntryBuilder->buildGlobalValue(
Reg, GV);
3839 Register Addr = getOrCreateVReg(*CPA->getPointer());
3840 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3841 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3843 Constant &Elt = *CAZ->getElementValue(0u);
3845 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3849 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3851 return translateCopy(
C, Elt, *EntryBuilder);
3853 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3856 if (CV->getNumElements() == 1)
3857 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3859 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3860 Constant &Elt = *CV->getElementAsConstant(i);
3861 Ops.push_back(getOrCreateVReg(Elt));
3863 EntryBuilder->buildBuildVector(
Reg,
Ops);
3865 switch(
CE->getOpcode()) {
3866#define HANDLE_INST(NUM, OPCODE, CLASS) \
3867 case Instruction::OPCODE: \
3868 return translate##OPCODE(*CE, *EntryBuilder.get());
3869#include "llvm/IR/Instruction.def"
3874 if (CV->getNumOperands() == 1)
3875 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3877 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3878 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3880 EntryBuilder->buildBuildVector(
Reg,
Ops);
3882 EntryBuilder->buildBlockAddress(
Reg, BA);
3889bool IRTranslator::mayTranslateUserTypes(
const User &U)
const {
3890 const TargetMachine &TM = TLI->getTargetMachine();
3899 (!
U.getType()->getScalarType()->isBFloatTy() &&
3901 return V->getType()->getScalarType()->isBFloatTy();
3905bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3907 for (
auto &BTB : SL->BitTestCases) {
3910 emitBitTestHeader(BTB, BTB.Parent);
3912 BranchProbability UnhandledProb = BTB.Prob;
3913 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3914 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3916 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3925 MachineBasicBlock *NextMBB;
3926 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3929 NextMBB = BTB.Cases[
j + 1].TargetBB;
3930 }
else if (j + 1 == ej) {
3932 NextMBB = BTB.Default;
3935 NextMBB = BTB.Cases[
j + 1].ThisBB;
3938 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3940 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3944 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3945 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3948 BTB.Cases.pop_back();
3954 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3955 BTB.Default->getBasicBlock()};
3956 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3957 if (!BTB.ContiguousRange) {
3958 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3961 SL->BitTestCases.clear();
3963 for (
auto &JTCase : SL->JTCases) {
3965 if (!JTCase.first.Emitted)
3966 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3968 emitJumpTable(JTCase.second, JTCase.second.MBB);
3970 SL->JTCases.clear();
3972 for (
auto &SwCase : SL->SwitchCases)
3973 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3974 SL->SwitchCases.clear();
3978 if (
SP.shouldEmitSDCheck(BB)) {
3979 bool FunctionBasedInstrumentation =
3980 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent(), *Libcalls);
3981 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3984 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3987 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3988 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3989 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3998 ParentMBB, *MF->getSubtarget().getInstrInfo());
4001 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
4005 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
4009 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
4010 if (FailureMBB->
empty()) {
4011 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
4016 SPDescriptor.resetPerBBState();
4023 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
4027 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
4033 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
4040 ->buildLoad(PtrMemTy, StackSlotPtr,
4045 if (TLI->useStackGuardXorFP()) {
4046 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
4051 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M, *Libcalls)) {
4063 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4064 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
4065 ISD::ArgFlagsTy
Flags;
4066 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4068 CallLowering::ArgInfo GuardArgInfo(
4069 {GuardVal, FnTy->getParamType(0), {
Flags}});
4071 CallLowering::CallLoweringInfo
Info;
4072 Info.OrigArgs.push_back(GuardArgInfo);
4073 Info.CallConv = GuardCheckFn->getCallingConv();
4076 if (!CLI->lowerCall(MIRBuilder, Info)) {
4077 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
4089 getStackGuard(Guard, *CurBuilder);
4092 const Value *IRGuard = TLI->getSDagStackGuard(M, *Libcalls);
4093 Register GuardPtr = getOrCreateVReg(*IRGuard);
4096 ->buildLoad(PtrMemTy, GuardPtr,
4115 const RTLIB::LibcallImpl LibcallImpl =
4116 Libcalls->getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
4117 if (LibcallImpl == RTLIB::Unsupported)
4120 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4122 CallLowering::CallLoweringInfo
Info;
4123 Info.CallConv = Libcalls->getLibcallImplCallingConv(LibcallImpl);
4125 StringRef LibcallName =
4130 if (!CLI->lowerCall(*CurBuilder, Info)) {
4131 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4136 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4138 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4143void IRTranslator::finalizeFunction() {
4146 PendingPHIs.clear();
4148 FrameIndices.clear();
4149 MachinePreds.clear();
4153 EntryBuilder.reset();
4156 SPDescriptor.resetPerFunctionState();
4169 return CI && CI->isMustTailCall();
4176 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4177 CLI = MF->getSubtarget().getCallLowering();
4179 if (CLI->fallBackToDAGISel(*MF)) {
4181 F.getSubprogram(), &
F.getEntryBlock());
4182 R <<
"unable to lower function: "
4183 <<
ore::NV(
"Prototype",
F.getFunctionType());
4197 : TPC->isGISelCSEEnabled();
4203 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4204 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4205 EntryBuilder->setCSEInfo(CSEInfo);
4206 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4207 CurBuilder->setCSEInfo(CSEInfo);
4209 EntryBuilder = std::make_unique<MachineIRBuilder>();
4210 CurBuilder = std::make_unique<MachineIRBuilder>();
4213 CurBuilder->setMF(*MF);
4214 EntryBuilder->setMF(*MF);
4215 MRI = &MF->getRegInfo();
4216 DL = &
F.getDataLayout();
4226 FuncInfo.BPI =
nullptr;
4233 *
F.getParent(), Subtarget);
4235 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4237 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4238 SL->init(*TLI, TM, *DL);
4240 assert(PendingPHIs.empty() &&
"stale PHIs");
4244 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4247 F.getSubprogram(), &
F.getEntryBlock());
4248 R <<
"unable to translate in big endian mode";
4259 EntryBuilder->setMBB(*EntryBB);
4261 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4262 SwiftError.setFunction(CurMF);
4263 SwiftError.createEntriesInEntryBlock(DbgLoc);
4265 bool IsVarArg =
F.isVarArg();
4266 bool HasMustTailInVarArgFn =
false;
4269 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4273 MBB = MF->CreateMachineBasicBlock(&BB);
4281 if (!BA->hasZeroLiveUses())
4285 if (!HasMustTailInVarArgFn)
4289 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4292 EntryBB->addSuccessor(&getMBB(
F.front()));
4297 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4302 if (CLI->supportSwiftError() && Arg.hasSwiftErrorAttr()) {
4303 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4304 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4308 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4310 F.getSubprogram(), &
F.getEntryBlock());
4311 R <<
"unable to lower arguments: "
4312 <<
ore::NV(
"Prototype",
F.getFunctionType());
4319 if (EnableCSE && CSEInfo)
4324 DILocationVerifier Verifier;
4332 CurBuilder->setMBB(
MBB);
4333 HasTailCall =
false;
4343 Verifier.setCurrentInst(&Inst);
4347 translateDbgInfo(Inst, *CurBuilder);
4349 if (translate(Inst))
4354 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4356 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4357 std::string InstStrStorage;
4361 R <<
": '" << InstStrStorage <<
"'";
4368 if (!finalizeBasicBlock(*BB,
MBB)) {
4370 BB->getTerminator()->getDebugLoc(), BB);
4371 R <<
"unable to translate basic block";
4381 finishPendingPhis();
4383 SwiftError.propagateVRegs();
4388 assert(EntryBB->succ_size() == 1 &&
4389 "Custom BB used for lowering should have only one successor");
4393 "LLVM-IR entry block has a predecessor!?");
4396 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4405 EntryBB->removeSuccessor(&NewEntryBB);
4406 MF->remove(EntryBB);
4407 MF->deleteMachineBasicBlock(EntryBB);
4409 assert(&MF->front() == &NewEntryBB &&
4410 "New entry wasn't next in the list of basic block!");
4414 SP.copyToMachineFrameInfo(MF->getFrameInfo());
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
The address of a basic block.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Legacy analysis pass which computes BlockFrequencyInfo.
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getOne()
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static bool getUseExtended()
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
static LLT integer(unsigned SizeInBits)
LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const Target & getTarget() const
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const CallLowering * getCallLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
BasicBlock * getSuccessor(unsigned i=0) const
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueLLTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.