64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (
" (in function: " + MF.getName() +
")").str();
125 if (IsGlobalISelAbortEnabled)
142 DILocationVerifier() =
default;
143 ~DILocationVerifier()
override =
default;
145 const Instruction *getCurrentInst()
const {
return CurrInst; }
146 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
148 void erasingInstr(MachineInstr &
MI)
override {}
149 void changingInstr(MachineInstr &
MI)
override {}
150 void changedInstr(MachineInstr &
MI)
override {}
152 void createdInstr(MachineInstr &
MI)
override {
153 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
158 <<
" was copied to " <<
MI);
164 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
165 (
MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
190IRTranslator::ValueToVRegInfo::VRegListT &
191IRTranslator::allocateVRegs(
const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
199 Offsets->empty() ? Offsets :
nullptr);
200 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
211 return *VMap.getVRegs(Val);
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
219 "Don't know how to create an empty vreg");
223 Offsets->empty() ? Offsets :
nullptr);
226 for (
auto Ty : SplitTys)
227 VRegs->push_back(
MRI->createGenericVirtualRegister(Ty));
235 while (
auto Elt =
C.getAggregateElement(Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
240 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
241 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
244 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
245 MF->getFunction().getSubprogram(),
246 &MF->getFunction().getEntryBlock());
247 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
256int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
257 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
259 return MapEntry->second;
266 Size = std::max<uint64_t>(
Size, 1u);
268 int &FI = MapEntry->second;
269 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
275 return SI->getAlign();
277 return LI->getAlign();
283 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
284 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
290 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
291 assert(
MBB &&
"BasicBlock was not encountered before");
296 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
297 MachinePreds[
Edge].push_back(NewPred);
308 return U.getType()->getScalarType()->isBFloatTy() ||
310 return V->getType()->getScalarType()->isBFloatTy();
314bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
323 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
324 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
336bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
341 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
353 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
356bool IRTranslator::translateCompare(
const User &U,
362 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
363 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
368 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
376 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
384 if (Ret && DL->getTypeStoreSize(Ret->
getType()).isZero())
389 VRegs = getOrCreateVRegs(*Ret);
392 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
393 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
394 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
400 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
403void IRTranslator::emitBranchForMergedCondition(
412 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
415 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
418 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
419 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
420 CurBuilder->getDebugLoc(), TProb, FProb);
421 SL->SwitchCases.push_back(CB);
427 SwitchCG::CaseBlock CB(
429 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
430 SL->SwitchCases.push_back(CB);
435 return I->getParent() == BB;
439void IRTranslator::findMergedConditions(
444 using namespace PatternMatch;
445 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
446 "Expected Opc to be AND/OR");
452 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
458 const Value *BOpOp0, *BOpOp1;
472 if (BOpc == Instruction::And)
473 BOpc = Instruction::Or;
474 else if (BOpc == Instruction::Or)
475 BOpc = Instruction::And;
481 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
485 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
492 MachineBasicBlock *TmpBB =
496 if (
Opc == Instruction::Or) {
517 auto NewTrueProb = TProb / 2;
518 auto NewFalseProb = TProb / 2 + FProb;
520 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
521 NewFalseProb, InvertCond);
527 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
528 Probs[1], InvertCond);
530 assert(
Opc == Instruction::And &&
"Unknown merge op!");
550 auto NewTrueProb = TProb + FProb / 2;
551 auto NewFalseProb = FProb / 2;
553 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
554 NewFalseProb, InvertCond);
560 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
561 Probs[1], InvertCond);
565bool IRTranslator::shouldEmitAsBranches(
566 const std::vector<SwitchCG::CaseBlock> &Cases) {
568 if (Cases.size() != 2)
573 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
574 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
575 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
576 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
582 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
583 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
587 Cases[0].TrueBB == Cases[1].ThisBB)
590 Cases[0].FalseBB == Cases[1].ThisBB)
599 auto &CurMBB = MIRBuilder.
getMBB();
605 !CurMBB.isLayoutSuccessor(Succ0MBB))
609 for (
const BasicBlock *Succ :
successors(&BrInst))
610 CurMBB.addSuccessor(&getMBB(*Succ));
617 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
636 using namespace PatternMatch;
638 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
639 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
642 const Value *BOp0, *BOp1;
644 Opcode = Instruction::And;
646 Opcode = Instruction::Or;
650 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
651 getEdgeProbability(&CurMBB, Succ0MBB),
652 getEdgeProbability(&CurMBB, Succ1MBB),
654 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
657 if (shouldEmitAsBranches(SL->SwitchCases)) {
659 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
660 SL->SwitchCases.erase(SL->SwitchCases.begin());
666 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
667 MF->erase(SL->SwitchCases[
I].ThisBB);
669 SL->SwitchCases.clear();
676 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
677 CurBuilder->getDebugLoc());
681 emitSwitchCase(CB, &CurMBB, *CurBuilder);
689 Src->addSuccessorWithoutProb(Dst);
693 Prob = getEdgeProbability(Src, Dst);
694 Src->addSuccessor(Dst, Prob);
700 const BasicBlock *SrcBB = Src->getBasicBlock();
701 const BasicBlock *DstBB = Dst->getBasicBlock();
705 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
706 return BranchProbability(1, SuccSize);
708 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
712 using namespace SwitchCG;
715 BranchProbabilityInfo *BPI = FuncInfo.BPI;
717 Clusters.reserve(
SI.getNumCases());
718 for (
const auto &
I :
SI.cases()) {
719 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
720 assert(Succ &&
"Could not find successor mbb in mapping");
721 const ConstantInt *CaseVal =
I.getCaseValue();
722 BranchProbability Prob =
724 : BranchProbability(1,
SI.getNumCases() + 1);
725 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
728 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
735 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
738 if (Clusters.empty()) {
745 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
746 SL->findBitTestClusters(Clusters, &SI);
749 dbgs() <<
"Case clusters: ";
750 for (
const CaseCluster &
C : Clusters) {
751 if (
C.Kind == CC_JumpTable)
753 if (
C.Kind == CC_BitTests)
756 C.Low->getValue().print(
dbgs(),
true);
757 if (
C.Low !=
C.High) {
759 C.High->getValue().print(
dbgs(),
true);
766 assert(!Clusters.empty());
770 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
771 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
773 while (!WorkList.empty()) {
774 SwitchWorkListItem
W = WorkList.pop_back_val();
776 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
778 if (NumClusters > 3 &&
781 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
785 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
795 using namespace SwitchCG;
796 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
797 "Clusters not sorted?");
798 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
800 auto [LastLeft, FirstRight, LeftProb, RightProb] =
801 SL->computeSplitWorkItemInfo(W);
806 assert(PivotCluster >
W.FirstCluster);
807 assert(PivotCluster <=
W.LastCluster);
812 const ConstantInt *Pivot = PivotCluster->Low;
821 MachineBasicBlock *LeftMBB;
822 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
823 FirstLeft->Low ==
W.GE &&
824 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
825 LeftMBB = FirstLeft->MBB;
827 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
828 FuncInfo.MF->
insert(BBI, LeftMBB);
830 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
836 MachineBasicBlock *RightMBB;
837 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
838 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
839 RightMBB = FirstRight->MBB;
841 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
842 FuncInfo.MF->
insert(BBI, RightMBB);
844 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
852 if (
W.MBB == SwitchMBB)
853 emitSwitchCase(CB, SwitchMBB, MIB);
855 SL->SwitchCases.push_back(CB);
861 assert(JT.
Reg &&
"Should lower JT Header first!");
876 MachineIRBuilder MIB(*HeaderBB->
getParent());
883 Register SwitchOpReg = getOrCreateVReg(SValue);
885 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
890 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
904 auto Cst = getOrCreateVReg(
945 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
959 "Can only handle SLE ranges");
970 const LLT CmpTy = MRI->getType(CmpOpReg);
971 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
1006 bool FallthroughUnreachable) {
1007 using namespace SwitchCG;
1008 MachineFunction *CurMF = SwitchMBB->
getParent();
1010 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1011 SwitchCG::JumpTable *JT = &SL->JTCases[
I->JTCasesIndex].second;
1012 BranchProbability DefaultProb =
W.DefaultProb;
1015 MachineBasicBlock *JumpMBB = JT->
MBB;
1016 CurMF->
insert(BBI, JumpMBB);
1026 auto JumpProb =
I->Prob;
1027 auto FallthroughProb = UnhandledProbs;
1035 if (*SI == DefaultMBB) {
1036 JumpProb += DefaultProb / 2;
1037 FallthroughProb -= DefaultProb / 2;
1042 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1047 if (FallthroughUnreachable)
1048 JTH->FallthroughUnreachable =
true;
1050 if (!JTH->FallthroughUnreachable)
1051 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1052 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1057 JTH->HeaderBB = CurMBB;
1061 if (CurMBB == SwitchMBB) {
1062 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1064 JTH->Emitted =
true;
1071 bool FallthroughUnreachable,
1076 using namespace SwitchCG;
1079 if (
I->Low ==
I->High) {
1095 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1098 emitSwitchCase(CB, SwitchMBB, MIB);
1104 MachineIRBuilder &MIB = *CurBuilder;
1108 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1110 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1112 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1117 LLT MaskTy = SwitchOpTy;
1123 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1133 if (SwitchOpTy != MaskTy)
1139 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1141 if (!
B.FallthroughUnreachable)
1142 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1143 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1147 if (!
B.FallthroughUnreachable) {
1151 RangeSub, RangeCst);
1165 MachineIRBuilder &MIB = *CurBuilder;
1171 if (PopCount == 1) {
1174 auto MaskTrailingZeros =
1179 }
else if (PopCount == BB.
Range) {
1181 auto MaskTrailingOnes =
1188 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1192 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1199 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1201 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1219bool IRTranslator::lowerBitTestWorkItem(
1225 bool FallthroughUnreachable) {
1226 using namespace SwitchCG;
1227 MachineFunction *CurMF = SwitchMBB->
getParent();
1229 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1231 for (BitTestCase &BTC : BTB->Cases)
1232 CurMF->
insert(BBI, BTC.ThisBB);
1235 BTB->Parent = CurMBB;
1236 BTB->Default = Fallthrough;
1238 BTB->DefaultProb = UnhandledProbs;
1242 if (!BTB->ContiguousRange) {
1243 BTB->Prob += DefaultProb / 2;
1244 BTB->DefaultProb -= DefaultProb / 2;
1247 if (FallthroughUnreachable)
1248 BTB->FallthroughUnreachable =
true;
1251 if (CurMBB == SwitchMBB) {
1252 emitBitTestHeader(*BTB, SwitchMBB);
1253 BTB->Emitted =
true;
1263 using namespace SwitchCG;
1264 MachineFunction *CurMF = FuncInfo.MF;
1265 MachineBasicBlock *NextMBB =
nullptr;
1267 if (++BBI != FuncInfo.MF->end())
1276 [](
const CaseCluster &a,
const CaseCluster &b) {
1277 return a.Prob != b.Prob
1279 : a.Low->getValue().slt(b.Low->getValue());
1284 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1286 if (
I->Prob >
W.LastCluster->Prob)
1288 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1296 BranchProbability DefaultProb =
W.DefaultProb;
1297 BranchProbability UnhandledProbs = DefaultProb;
1298 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1299 UnhandledProbs +=
I->Prob;
1301 MachineBasicBlock *CurMBB =
W.MBB;
1302 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1303 bool FallthroughUnreachable =
false;
1304 MachineBasicBlock *Fallthrough;
1305 if (
I ==
W.LastCluster) {
1307 Fallthrough = DefaultMBB;
1312 CurMF->
insert(BBI, Fallthrough);
1314 UnhandledProbs -=
I->Prob;
1318 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1319 DefaultProb, UnhandledProbs,
I, Fallthrough,
1320 FallthroughUnreachable)) {
1328 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1329 UnhandledProbs,
I, Fallthrough,
1330 FallthroughUnreachable)) {
1337 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1338 FallthroughUnreachable, UnhandledProbs,
1339 CurMBB, MIB, SwitchMBB)) {
1346 CurMBB = Fallthrough;
1352bool IRTranslator::translateIndirectBr(
const User &U,
1360 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1361 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1362 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1366 if (!AddedSuccessors.
insert(Succ).second)
1376 return Arg->hasSwiftErrorAttr();
1384 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1389 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1394 Type *OffsetIRTy = DL->getIndexType(Ptr->
getType());
1398 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1400 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(), Ptr);
1406 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1408 if (AA->pointsToConstantMemory(
1416 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1421 Align BaseAlign = getMemOpAlign(LI);
1423 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),
1426 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1434 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1438 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1441 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1444 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1445 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1447 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1448 SI.getPointerOperand());
1455 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1459 MachinePointerInfo Ptr(
SI.getPointerOperand(), Offsets[i]);
1460 Align BaseAlign = getMemOpAlign(SI);
1461 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),
1463 SI.getAAMetadata(),
nullptr,
1464 SI.getSyncScopeID(),
SI.getOrdering());
1471 const Value *Src = U.getOperand(0);
1480 for (
auto Idx : EVI->indices())
1483 for (
auto Idx : IVI->indices())
1490 DL.getIndexedOffsetInType(Src->getType(), Indices));
1493bool IRTranslator::translateExtractValue(
const User &U,
1495 const Value *Src =
U.getOperand(0);
1498 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1500 auto &DstRegs = allocateVRegs(U);
1502 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1503 DstRegs[i] = SrcRegs[Idx++];
1508bool IRTranslator::translateInsertValue(
const User &U,
1510 const Value *Src =
U.getOperand(0);
1512 auto &DstRegs = allocateVRegs(U);
1513 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1516 auto *InsertedIt = InsertedRegs.
begin();
1518 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1519 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1520 DstRegs[i] = *InsertedIt++;
1522 DstRegs[i] = SrcRegs[i];
1528bool IRTranslator::translateSelect(
const User &U,
1530 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1539 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1540 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1546bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1549 auto &Regs = *VMap.getVRegs(U);
1551 Regs.push_back(Src);
1552 VMap.getOffsets(U)->push_back(0);
1561bool IRTranslator::translateBitCast(
const User &U,
1569 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1571 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1574 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1577bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1592bool IRTranslator::translateGetElementPtr(
const User &U,
1594 Value &Op0 = *
U.getOperand(0);
1598 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1601 uint32_t PtrAddFlags = 0;
1607 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1617 unsigned VectorWidth = 0;
1621 bool WantSplatVector =
false;
1625 WantSplatVector = VectorWidth > 1;
1630 if (WantSplatVector && !PtrTy.
isVector()) {
1637 OffsetIRTy = DL->getIndexType(PtrIRTy);
1644 const Value *Idx = GTI.getOperand();
1645 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1647 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1650 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1655 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1656 Offset += ElementSize * *Val;
1665 PtrAddFlagsWithConst(
Offset))
1670 Register IdxReg = getOrCreateVReg(*Idx);
1671 LLT IdxTy = MRI->getType(IdxReg);
1672 if (IdxTy != OffsetTy) {
1673 if (!IdxTy.
isVector() && WantSplatVector) {
1686 if (ElementSize != 1) {
1697 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1700 GepOffsetReg = IdxReg;
1704 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1713 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1714 PtrAddFlagsWithConst(
Offset));
1718 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1722bool IRTranslator::translateMemFunc(
const CallInst &CI,
1732 unsigned MinPtrSize = UINT_MAX;
1733 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1734 Register SrcReg = getOrCreateVReg(**AI);
1735 LLT SrcTy = MRI->getType(SrcReg);
1737 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1745 if (MRI->getType(SizeOpReg) != SizeTy)
1757 ConstantInt *CopySize =
nullptr;
1760 DstAlign = MCI->getDestAlign().valueOrOne();
1761 SrcAlign = MCI->getSourceAlign().valueOrOne();
1764 DstAlign = MMI->getDestAlign().valueOrOne();
1765 SrcAlign = MMI->getSourceAlign().valueOrOne();
1769 DstAlign = MSI->getDestAlign().valueOrOne();
1772 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1788 if (AA && CopySize &&
1789 AA->pointsToConstantMemory(MemoryLocation(
1799 ICall.addMemOperand(
1800 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1801 StoreFlags, 1, DstAlign, AAInfo));
1802 if (Opcode != TargetOpcode::G_MEMSET)
1803 ICall.addMemOperand(MF->getMachineMemOperand(
1804 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1809bool IRTranslator::translateTrap(
const CallInst &CI,
1812 StringRef TrapFuncName =
1813 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1814 if (TrapFuncName.
empty()) {
1815 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1824 CallLowering::CallLoweringInfo
Info;
1825 if (Opcode == TargetOpcode::G_UBSANTRAP)
1832 return CLI->lowerCall(MIRBuilder,
Info);
1835bool IRTranslator::translateVectorInterleave2Intrinsic(
1838 "This function can only be called on the interleave2 intrinsic!");
1842 Register Res = getOrCreateVReg(CI);
1844 LLT OpTy = MRI->getType(Op0);
1851bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1854 "This function can only be called on the deinterleave2 intrinsic!");
1861 LLT ResTy = MRI->getType(Res[0]);
1870void IRTranslator::getStackGuard(
Register DstReg,
1873 TLI->getSDagStackGuard(*MF->getFunction().getParent(), *Libcalls);
1876 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1881 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1882 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1884 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1886 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1887 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1889 MachinePointerInfo MPInfo(
Global);
1892 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1893 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1894 MIB.setMemRefs({MemRef});
1897bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1901 Op, {ResRegs[0], ResRegs[1]},
1907bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1909 Register Dst = getOrCreateVReg(CI);
1913 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1921 case Intrinsic::acos:
1922 return TargetOpcode::G_FACOS;
1923 case Intrinsic::asin:
1924 return TargetOpcode::G_FASIN;
1925 case Intrinsic::atan:
1926 return TargetOpcode::G_FATAN;
1927 case Intrinsic::atan2:
1928 return TargetOpcode::G_FATAN2;
1929 case Intrinsic::bswap:
1930 return TargetOpcode::G_BSWAP;
1931 case Intrinsic::bitreverse:
1932 return TargetOpcode::G_BITREVERSE;
1933 case Intrinsic::fshl:
1934 return TargetOpcode::G_FSHL;
1935 case Intrinsic::fshr:
1936 return TargetOpcode::G_FSHR;
1937 case Intrinsic::ceil:
1938 return TargetOpcode::G_FCEIL;
1939 case Intrinsic::cos:
1940 return TargetOpcode::G_FCOS;
1941 case Intrinsic::cosh:
1942 return TargetOpcode::G_FCOSH;
1943 case Intrinsic::ctpop:
1944 return TargetOpcode::G_CTPOP;
1945 case Intrinsic::exp:
1946 return TargetOpcode::G_FEXP;
1947 case Intrinsic::exp2:
1948 return TargetOpcode::G_FEXP2;
1949 case Intrinsic::exp10:
1950 return TargetOpcode::G_FEXP10;
1951 case Intrinsic::fabs:
1952 return TargetOpcode::G_FABS;
1953 case Intrinsic::copysign:
1954 return TargetOpcode::G_FCOPYSIGN;
1955 case Intrinsic::minnum:
1956 return TargetOpcode::G_FMINNUM;
1957 case Intrinsic::maxnum:
1958 return TargetOpcode::G_FMAXNUM;
1959 case Intrinsic::minimum:
1960 return TargetOpcode::G_FMINIMUM;
1961 case Intrinsic::maximum:
1962 return TargetOpcode::G_FMAXIMUM;
1963 case Intrinsic::minimumnum:
1964 return TargetOpcode::G_FMINIMUMNUM;
1965 case Intrinsic::maximumnum:
1966 return TargetOpcode::G_FMAXIMUMNUM;
1967 case Intrinsic::canonicalize:
1968 return TargetOpcode::G_FCANONICALIZE;
1969 case Intrinsic::floor:
1970 return TargetOpcode::G_FFLOOR;
1971 case Intrinsic::fma:
1972 return TargetOpcode::G_FMA;
1973 case Intrinsic::log:
1974 return TargetOpcode::G_FLOG;
1975 case Intrinsic::log2:
1976 return TargetOpcode::G_FLOG2;
1977 case Intrinsic::log10:
1978 return TargetOpcode::G_FLOG10;
1979 case Intrinsic::ldexp:
1980 return TargetOpcode::G_FLDEXP;
1981 case Intrinsic::nearbyint:
1982 return TargetOpcode::G_FNEARBYINT;
1983 case Intrinsic::pow:
1984 return TargetOpcode::G_FPOW;
1985 case Intrinsic::powi:
1986 return TargetOpcode::G_FPOWI;
1987 case Intrinsic::rint:
1988 return TargetOpcode::G_FRINT;
1989 case Intrinsic::round:
1990 return TargetOpcode::G_INTRINSIC_ROUND;
1991 case Intrinsic::roundeven:
1992 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1993 case Intrinsic::sin:
1994 return TargetOpcode::G_FSIN;
1995 case Intrinsic::sinh:
1996 return TargetOpcode::G_FSINH;
1997 case Intrinsic::sqrt:
1998 return TargetOpcode::G_FSQRT;
1999 case Intrinsic::tan:
2000 return TargetOpcode::G_FTAN;
2001 case Intrinsic::tanh:
2002 return TargetOpcode::G_FTANH;
2003 case Intrinsic::trunc:
2004 return TargetOpcode::G_INTRINSIC_TRUNC;
2005 case Intrinsic::readcyclecounter:
2006 return TargetOpcode::G_READCYCLECOUNTER;
2007 case Intrinsic::readsteadycounter:
2008 return TargetOpcode::G_READSTEADYCOUNTER;
2009 case Intrinsic::ptrmask:
2010 return TargetOpcode::G_PTRMASK;
2011 case Intrinsic::lrint:
2012 return TargetOpcode::G_INTRINSIC_LRINT;
2013 case Intrinsic::llrint:
2014 return TargetOpcode::G_INTRINSIC_LLRINT;
2016 case Intrinsic::vector_reduce_fmin:
2017 return TargetOpcode::G_VECREDUCE_FMIN;
2018 case Intrinsic::vector_reduce_fmax:
2019 return TargetOpcode::G_VECREDUCE_FMAX;
2020 case Intrinsic::vector_reduce_fminimum:
2021 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2022 case Intrinsic::vector_reduce_fmaximum:
2023 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2024 case Intrinsic::vector_reduce_add:
2025 return TargetOpcode::G_VECREDUCE_ADD;
2026 case Intrinsic::vector_reduce_mul:
2027 return TargetOpcode::G_VECREDUCE_MUL;
2028 case Intrinsic::vector_reduce_and:
2029 return TargetOpcode::G_VECREDUCE_AND;
2030 case Intrinsic::vector_reduce_or:
2031 return TargetOpcode::G_VECREDUCE_OR;
2032 case Intrinsic::vector_reduce_xor:
2033 return TargetOpcode::G_VECREDUCE_XOR;
2034 case Intrinsic::vector_reduce_smax:
2035 return TargetOpcode::G_VECREDUCE_SMAX;
2036 case Intrinsic::vector_reduce_smin:
2037 return TargetOpcode::G_VECREDUCE_SMIN;
2038 case Intrinsic::vector_reduce_umax:
2039 return TargetOpcode::G_VECREDUCE_UMAX;
2040 case Intrinsic::vector_reduce_umin:
2041 return TargetOpcode::G_VECREDUCE_UMIN;
2042 case Intrinsic::experimental_vector_compress:
2043 return TargetOpcode::G_VECTOR_COMPRESS;
2044 case Intrinsic::lround:
2045 return TargetOpcode::G_LROUND;
2046 case Intrinsic::llround:
2047 return TargetOpcode::G_LLROUND;
2048 case Intrinsic::get_fpenv:
2049 return TargetOpcode::G_GET_FPENV;
2050 case Intrinsic::get_fpmode:
2051 return TargetOpcode::G_GET_FPMODE;
2056bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2060 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2068 for (
const auto &Arg : CI.
args())
2071 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2079 case Intrinsic::experimental_constrained_fadd:
2080 return TargetOpcode::G_STRICT_FADD;
2081 case Intrinsic::experimental_constrained_fsub:
2082 return TargetOpcode::G_STRICT_FSUB;
2083 case Intrinsic::experimental_constrained_fmul:
2084 return TargetOpcode::G_STRICT_FMUL;
2085 case Intrinsic::experimental_constrained_fdiv:
2086 return TargetOpcode::G_STRICT_FDIV;
2087 case Intrinsic::experimental_constrained_frem:
2088 return TargetOpcode::G_STRICT_FREM;
2089 case Intrinsic::experimental_constrained_fma:
2090 return TargetOpcode::G_STRICT_FMA;
2091 case Intrinsic::experimental_constrained_sqrt:
2092 return TargetOpcode::G_STRICT_FSQRT;
2093 case Intrinsic::experimental_constrained_ldexp:
2094 return TargetOpcode::G_STRICT_FLDEXP;
2100bool IRTranslator::translateConstrainedFPIntrinsic(
2120std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2121 auto VRegs = getOrCreateVRegs(Arg);
2122 if (VRegs.
size() != 1)
2123 return std::nullopt;
2126 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2127 if (!VRegDef || !VRegDef->isCopy())
2128 return std::nullopt;
2129 return VRegDef->getOperand(1).getReg().asMCReg();
2132bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2144 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2146 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2147 <<
": expression is entry_value but "
2148 <<
"couldn't find a physical register\n");
2156 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2168 case Intrinsic::experimental_convergence_anchor:
2169 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2170 case Intrinsic::experimental_convergence_entry:
2171 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2172 case Intrinsic::experimental_convergence_loop:
2173 return TargetOpcode::CONVERGENCECTRL_LOOP;
2177bool IRTranslator::translateConvergenceControlIntrinsic(
2180 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2183 if (
ID == Intrinsic::experimental_convergence_loop) {
2185 assert(Bundle &&
"Expected a convergence control token.");
2187 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2197 if (ORE->enabled()) {
2199 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2207 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2213 case Intrinsic::lifetime_start:
2214 case Intrinsic::lifetime_end: {
2217 MF->getFunction().hasOptNone())
2220 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2221 : TargetOpcode::LIFETIME_END;
2230 case Intrinsic::fake_use: {
2232 for (
const auto &Arg : CI.
args())
2234 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2235 MF->setHasFakeUses(
true);
2238 case Intrinsic::dbg_declare: {
2245 case Intrinsic::dbg_label: {
2251 "Expected inlined-at fields to agree");
2256 case Intrinsic::vaend:
2260 case Intrinsic::vastart: {
2262 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2265 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2266 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2268 ListSize, Alignment));
2271 case Intrinsic::dbg_assign:
2278 case Intrinsic::dbg_value: {
2285 case Intrinsic::uadd_with_overflow:
2286 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2287 case Intrinsic::sadd_with_overflow:
2288 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2289 case Intrinsic::usub_with_overflow:
2290 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2291 case Intrinsic::ssub_with_overflow:
2292 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2293 case Intrinsic::umul_with_overflow:
2294 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2295 case Intrinsic::smul_with_overflow:
2296 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2297 case Intrinsic::uadd_sat:
2298 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2299 case Intrinsic::sadd_sat:
2300 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2301 case Intrinsic::usub_sat:
2302 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2303 case Intrinsic::ssub_sat:
2304 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2305 case Intrinsic::ushl_sat:
2306 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2307 case Intrinsic::sshl_sat:
2308 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2309 case Intrinsic::umin:
2310 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2311 case Intrinsic::umax:
2312 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2313 case Intrinsic::smin:
2314 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2315 case Intrinsic::smax:
2316 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2317 case Intrinsic::abs:
2319 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2320 case Intrinsic::smul_fix:
2321 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2322 case Intrinsic::umul_fix:
2323 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2324 case Intrinsic::smul_fix_sat:
2325 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2326 case Intrinsic::umul_fix_sat:
2327 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2328 case Intrinsic::sdiv_fix:
2329 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2330 case Intrinsic::udiv_fix:
2331 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2332 case Intrinsic::sdiv_fix_sat:
2333 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2334 case Intrinsic::udiv_fix_sat:
2335 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2336 case Intrinsic::fmuladd: {
2337 const TargetMachine &TM = MF->getTarget();
2338 Register Dst = getOrCreateVReg(CI);
2343 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2344 TLI->getValueType(*DL, CI.
getType()))) {
2347 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2358 case Intrinsic::convert_from_fp16:
2364 case Intrinsic::convert_to_fp16:
2370 case Intrinsic::frexp: {
2377 case Intrinsic::modf: {
2379 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2384 case Intrinsic::sincos: {
2391 case Intrinsic::fptosi_sat:
2395 case Intrinsic::fptoui_sat:
2399 case Intrinsic::memcpy_inline:
2400 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2401 case Intrinsic::memcpy:
2402 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2403 case Intrinsic::memmove:
2404 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2405 case Intrinsic::memset:
2406 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2407 case Intrinsic::eh_typeid_for: {
2410 unsigned TypeID = MF->getTypeIDFor(GV);
2414 case Intrinsic::objectsize:
2417 case Intrinsic::is_constant:
2420 case Intrinsic::stackguard:
2421 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2423 case Intrinsic::stackprotector: {
2426 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2427 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2428 getStackGuard(GuardVal, MIRBuilder);
2433 int FI = getOrCreateFrameIndex(*Slot);
2434 MF->getFrameInfo().setStackProtectorIndex(FI);
2437 GuardVal, getOrCreateVReg(*Slot),
2444 case Intrinsic::stacksave: {
2445 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2448 case Intrinsic::stackrestore: {
2449 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2453 case Intrinsic::cttz:
2454 case Intrinsic::ctlz: {
2456 bool isTrailing =
ID == Intrinsic::cttz;
2457 unsigned Opcode = isTrailing
2458 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2459 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2460 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2461 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2462 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2466 case Intrinsic::invariant_start: {
2470 case Intrinsic::invariant_end:
2472 case Intrinsic::expect:
2473 case Intrinsic::expect_with_probability:
2474 case Intrinsic::annotation:
2475 case Intrinsic::ptr_annotation:
2476 case Intrinsic::launder_invariant_group:
2477 case Intrinsic::strip_invariant_group: {
2479 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2483 case Intrinsic::assume:
2484 case Intrinsic::experimental_noalias_scope_decl:
2485 case Intrinsic::var_annotation:
2486 case Intrinsic::sideeffect:
2489 case Intrinsic::read_volatile_register:
2490 case Intrinsic::read_register: {
2493 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2497 case Intrinsic::write_register: {
2499 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2504 case Intrinsic::localescape: {
2505 MachineBasicBlock &EntryMBB = MF->front();
2510 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2517 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2530 case Intrinsic::vector_reduce_fadd:
2531 case Intrinsic::vector_reduce_fmul: {
2534 Register Dst = getOrCreateVReg(CI);
2540 Opc =
ID == Intrinsic::vector_reduce_fadd
2541 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2542 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2543 if (!MRI->getType(VecSrc).isVector())
2544 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2545 : TargetOpcode::G_FMUL;
2553 if (
ID == Intrinsic::vector_reduce_fadd) {
2554 Opc = TargetOpcode::G_VECREDUCE_FADD;
2555 ScalarOpc = TargetOpcode::G_FADD;
2557 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2558 ScalarOpc = TargetOpcode::G_FMUL;
2560 LLT DstTy = MRI->getType(Dst);
2563 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2568 case Intrinsic::trap:
2569 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2570 case Intrinsic::debugtrap:
2571 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2572 case Intrinsic::ubsantrap:
2573 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2574 case Intrinsic::allow_runtime_check:
2575 case Intrinsic::allow_ubsan_check:
2576 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2579 case Intrinsic::amdgcn_cs_chain:
2580 case Intrinsic::amdgcn_call_whole_wave:
2581 return translateCallBase(CI, MIRBuilder);
2582 case Intrinsic::fptrunc_round: {
2587 std::optional<RoundingMode> RoundMode =
2592 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2593 {getOrCreateVReg(CI)},
2595 .addImm((
int)*RoundMode);
2599 case Intrinsic::is_fpclass: {
2604 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2605 {getOrCreateVReg(*FpValue)})
2610 case Intrinsic::set_fpenv: {
2615 case Intrinsic::reset_fpenv:
2618 case Intrinsic::set_fpmode: {
2623 case Intrinsic::reset_fpmode:
2626 case Intrinsic::get_rounding:
2629 case Intrinsic::set_rounding:
2632 case Intrinsic::vscale: {
2636 case Intrinsic::scmp:
2637 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2641 case Intrinsic::ucmp:
2642 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2646 case Intrinsic::vector_extract:
2647 return translateExtractVector(CI, MIRBuilder);
2648 case Intrinsic::vector_insert:
2649 return translateInsertVector(CI, MIRBuilder);
2650 case Intrinsic::stepvector: {
2654 case Intrinsic::prefetch: {
2661 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2664 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2670 case Intrinsic::vector_interleave2:
2671 case Intrinsic::vector_deinterleave2: {
2679 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2681 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2684#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2685 case Intrinsic::INTRINSIC:
2686#include "llvm/IR/ConstrainedOps.def"
2689 case Intrinsic::experimental_convergence_anchor:
2690 case Intrinsic::experimental_convergence_entry:
2691 case Intrinsic::experimental_convergence_loop:
2692 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2693 case Intrinsic::reloc_none: {
2696 MIRBuilder.
buildInstr(TargetOpcode::RELOC_NONE)
2704bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2709 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2713 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2718 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2721bool IRTranslator::translateCallBase(
const CallBase &CB,
2728 for (
const auto &Arg : CB.
args()) {
2730 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2732 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2733 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2734 &CB, &MIRBuilder.
getMBB(), Arg));
2737 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2740 Args.push_back(getOrCreateVRegs(*Arg));
2744 if (ORE->enabled()) {
2746 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2752 std::optional<CallLowering::PtrAuthInfo> PAI;
2757 const Value *
Key = Bundle->Inputs[0];
2764 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2765 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2767 Register DiscReg = getOrCreateVReg(*Discriminator);
2775 const auto &Token = *Bundle->Inputs[0].get();
2776 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2782 bool Success = CLI->lowerCall(
2783 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2788 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2789 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2805 if (
F && (
F->hasDLLImportStorageClass() ||
2806 (MF->getTarget().getTargetTriple().isOSWindows() &&
2807 F->hasExternalWeakLinkage())))
2819 return translateInlineAsm(CI, MIRBuilder);
2823 if (translateCallBase(CI, MIRBuilder)) {
2832 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2835 TargetLowering::IntrinsicInfo
Info;
2836 bool IsTgtMemIntrinsic = TLI->getTgtMemIntrinsic(
Info, CI, *MF,
ID);
2838 return translateIntrinsic(CI,
ID, MIRBuilder,
2839 IsTgtMemIntrinsic ? &
Info :
nullptr);
2846bool IRTranslator::translateIntrinsic(
2851 ResultRegs = getOrCreateVRegs(CB);
2866 assert(CI->getBitWidth() <= 64 &&
2867 "large intrinsic immediates not handled");
2868 MIB.
addImm(CI->getSExtValue());
2873 auto *MD = MDVal->getMetadata();
2877 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2884 if (VRegs.
size() > 1)
2891 if (TgtMemIntrinsicInfo) {
2894 Align Alignment = TgtMemIntrinsicInfo->
align.value_or(DL->getABITypeAlign(
2899 : LLT::scalar(TgtMemIntrinsicInfo->memVT.getStoreSizeInBits());
2903 MachinePointerInfo MPI;
2904 if (TgtMemIntrinsicInfo->
ptrVal) {
2905 MPI = MachinePointerInfo(TgtMemIntrinsicInfo->ptrVal,
2906 TgtMemIntrinsicInfo->offset);
2908 MPI = MachinePointerInfo(*TgtMemIntrinsicInfo->fallbackAddressSpace);
2912 nullptr, TgtMemIntrinsicInfo->
ssid,
2918 auto *Token = Bundle->Inputs[0].get();
2919 Register TokenReg = getOrCreateVReg(*Token);
2930bool IRTranslator::findUnwindDestinations(
2952 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2958 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2959 UnwindDests.back().first->setIsEHScopeEntry();
2960 UnwindDests.back().first->setIsEHFuncletEntry();
2965 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2966 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2968 if (IsMSVCCXX || IsCoreCLR)
2969 UnwindDests.back().first->setIsEHFuncletEntry();
2971 UnwindDests.back().first->setIsEHScopeEntry();
2973 NewEHPadBB = CatchSwitch->getUnwindDest();
2978 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2979 if (BPI && NewEHPadBB)
2981 EHPadBB = NewEHPadBB;
2986bool IRTranslator::translateInvoke(
const User &U,
2989 MCContext &
Context = MF->getContext();
2994 const Function *Fn =
I.getCalledFunction();
3001 if (
I.hasDeoptState())
3015 (MF->getTarget().getTargetTriple().isOSWindows() &&
3019 bool LowerInlineAsm =
I.isInlineAsm();
3020 bool NeedEHLabel =
true;
3026 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
3027 BeginSymbol =
Context.createTempSymbol();
3031 if (LowerInlineAsm) {
3032 if (!translateInlineAsm(
I, MIRBuilder))
3034 }
else if (!translateCallBase(
I, MIRBuilder))
3039 EndSymbol =
Context.createTempSymbol();
3044 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3045 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3046 BranchProbability EHPadBBProb =
3050 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3053 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3054 &ReturnMBB = getMBB(*ReturnBB);
3056 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3057 for (
auto &UnwindDest : UnwindDests) {
3058 UnwindDest.first->setIsEHPad();
3059 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3064 assert(BeginSymbol &&
"Expected a begin symbol!");
3065 assert(EndSymbol &&
"Expected an end symbol!");
3066 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3069 MIRBuilder.
buildBr(ReturnMBB);
3075bool IRTranslator::translateCallBr(
const User &U,
3081 MachineBasicBlock *CallBrMBB = &MIRBuilder.
getMBB();
3084 if (
I.isInlineAsm()) {
3090 if (!translateIntrinsic(
I, IID, MIRBuilder))
3094 SmallPtrSet<BasicBlock *, 8> Dests = {
I.getDefaultDest()};
3095 MachineBasicBlock *
Return = &getMBB(*
I.getDefaultDest());
3104 for (BasicBlock *Dest :
I.getIndirectDests()) {
3105 MachineBasicBlock &
Target = getMBB(*Dest);
3106 Target.setIsInlineAsmBrIndirectTarget();
3107 Target.setLabelMustBeEmitted();
3109 if (Dests.
insert(Dest).second)
3121bool IRTranslator::translateLandingPad(
const User &U,
3125 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3131 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3132 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3133 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3145 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3150 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3151 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3152 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3161 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3164 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3170 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3172 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3177 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3178 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3179 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3184bool IRTranslator::translateAlloca(
const User &U,
3192 Register Res = getOrCreateVReg(AI);
3193 int FI = getOrCreateFrameIndex(AI);
3199 if (MF->getTarget().getTargetTriple().isOSWindows())
3204 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3206 if (MRI->getType(NumElts) != IntPtrTy) {
3207 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3214 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3216 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3217 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3222 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3224 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3228 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3230 Align Alignment = std::max(AI.
getAlign(), DL->getPrefTypeAlign(Ty));
3231 if (Alignment <= StackAlign)
3232 Alignment =
Align(1);
3235 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3236 assert(MF->getFrameInfo().hasVarSizedObjects());
3245 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3246 {getOrCreateVReg(*
U.getOperand(0)),
3247 DL->getABITypeAlign(
U.getType()).value()});
3251bool IRTranslator::translateUnreachable(
const User &U,
3254 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3255 MF->getTarget().Options.NoTrapAfterNoreturn))
3262bool IRTranslator::translateInsertElement(
const User &U,
3267 FVT && FVT->getNumElements() == 1)
3268 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3271 Register Val = getOrCreateVReg(*
U.getOperand(0));
3272 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3273 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3276 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3277 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3278 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3279 Idx = getOrCreateVReg(*NewIdxCI);
3283 Idx = getOrCreateVReg(*
U.getOperand(2));
3284 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3285 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3292bool IRTranslator::translateInsertVector(
const User &U,
3295 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3296 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3299 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3304 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3309 ResultType && ResultType->getNumElements() == 1) {
3311 InputType && InputType->getNumElements() == 1) {
3315 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3321 Register Idx = getOrCreateVReg(*CI);
3329 Register Idx = getOrCreateVReg(*CI);
3330 auto ScaledIndex = MIRBuilder.
buildMul(
3331 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3338 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3343bool IRTranslator::translateExtractElement(
const User &U,
3347 if (
const FixedVectorType *FVT =
3349 if (FVT->getNumElements() == 1)
3350 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3353 Register Val = getOrCreateVReg(*
U.getOperand(0));
3354 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3359 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3360 Idx = getOrCreateVReg(*NewIdxCI);
3364 Idx = getOrCreateVReg(*
U.getOperand(1));
3365 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3366 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3373bool IRTranslator::translateExtractVector(
const User &U,
3376 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3378 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3383 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3388 ResultType && ResultType->getNumElements() == 1) {
3390 InputType && InputType->getNumElements() == 1) {
3393 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3399 Register Idx = getOrCreateVReg(*CI);
3407 Register Idx = getOrCreateVReg(*CI);
3408 auto ScaledIndex = MIRBuilder.
buildMul(
3409 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3416 getOrCreateVReg(*
U.getOperand(0)),
3421bool IRTranslator::translateShuffleVector(
const User &U,
3427 if (
U.getOperand(0)->getType()->isScalableTy()) {
3428 Register Val = getOrCreateVReg(*
U.getOperand(0));
3430 MRI->getType(Val).getElementType(), Val, 0);
3437 Mask = SVI->getShuffleMask();
3448 unsigned M =
Mask[0];
3450 if (M == 0 || M == 1)
3451 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3457 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3458 }
else if (M < SrcElts * 2) {
3460 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3472 for (
int M : Mask) {
3474 if (M == 0 || M == 1) {
3475 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3477 if (!
Undef.isValid()) {
3478 Undef = MRI->createGenericVirtualRegister(SrcTy);
3481 Ops.push_back(Undef);
3488 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3490 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3491 {getOrCreateVReg(*
U.getOperand(0)),
3492 getOrCreateVReg(*
U.getOperand(1))})
3493 .addShuffleMask(MaskAlloc);
3500 SmallVector<MachineInstr *, 4> Insts;
3501 for (
auto Reg : getOrCreateVRegs(PI)) {
3502 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3506 PendingPHIs.emplace_back(&PI, std::move(Insts));
3510bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3514 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3516 auto Res = getOrCreateVRegs(
I);
3519 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3520 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3521 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3524 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3525 *MF->getMachineMemOperand(
3526 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3527 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3528 I.getSuccessOrdering(),
I.getFailureOrdering()));
3532bool IRTranslator::translateAtomicRMW(
const User &U,
3538 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3541 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3542 Register Val = getOrCreateVReg(*
I.getValOperand());
3544 unsigned Opcode = 0;
3545 switch (
I.getOperation()) {
3549 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3552 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3555 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3558 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3561 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3564 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3567 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3570 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3573 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3576 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3579 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3582 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3585 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3588 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3591 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3594 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3597 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3600 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3603 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3606 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3609 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3614 Opcode, Res, Addr, Val,
3615 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3616 Flags, MRI->getType(Val), getMemOpAlign(
I),
3617 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3622bool IRTranslator::translateFence(
const User &U,
3630bool IRTranslator::translateFreeze(
const User &U,
3636 "Freeze with different source and destination type?");
3638 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3645void IRTranslator::finishPendingPhis() {
3648 GISelObserverWrapper WrapperObserver(&
Verifier);
3649 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3651 for (
auto &Phi : PendingPHIs) {
3652 const PHINode *PI =
Phi.first;
3656 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3662 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3666 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3670 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3671 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3680void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3686 "Expected inlined-at fields to agree");
3690 if (!V || HasArgList) {
3708 auto *ExprDerefRemoved =
3714 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3726void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3732 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3737 "Expected inlined-at fields to agree");
3742 MF->setVariableDbgInfo(Variable, Expression,
3743 getOrCreateFrameIndex(*AI), DL);
3747 if (translateIfEntryValueArgument(
true,
Address, Variable,
3759void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3764 assert(DLR->getLabel() &&
"Missing label");
3765 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3767 "Expected inlined-at fields to agree");
3776 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3779 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3784bool IRTranslator::translate(
const Instruction &Inst) {
3786 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3787 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3789 if (TLI->fallBackToDAGISel(Inst))
3793#define HANDLE_INST(NUM, OPCODE, CLASS) \
3794 case Instruction::OPCODE: \
3795 return translate##OPCODE(Inst, *CurBuilder.get());
3796#include "llvm/IR/Instruction.def"
3805 if (
auto CurrInstDL = CurBuilder->getDL())
3806 EntryBuilder->setDebugLoc(
DebugLoc());
3812 EntryBuilder->buildConstant(
Reg, *CI);
3816 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3817 EntryBuilder->buildFConstant(
Reg, *CF);
3819 EntryBuilder->buildUndef(
Reg);
3821 EntryBuilder->buildConstant(
Reg, 0);
3823 EntryBuilder->buildGlobalValue(
Reg, GV);
3825 Register Addr = getOrCreateVReg(*CPA->getPointer());
3826 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3827 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3829 Constant &Elt = *CAZ->getElementValue(0u);
3831 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3835 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3837 return translateCopy(
C, Elt, *EntryBuilder);
3839 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3842 if (CV->getNumElements() == 1)
3843 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3845 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3846 Constant &Elt = *CV->getElementAsConstant(i);
3847 Ops.push_back(getOrCreateVReg(Elt));
3849 EntryBuilder->buildBuildVector(
Reg,
Ops);
3851 switch(
CE->getOpcode()) {
3852#define HANDLE_INST(NUM, OPCODE, CLASS) \
3853 case Instruction::OPCODE: \
3854 return translate##OPCODE(*CE, *EntryBuilder.get());
3855#include "llvm/IR/Instruction.def"
3860 if (CV->getNumOperands() == 1)
3861 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3863 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3864 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3866 EntryBuilder->buildBuildVector(
Reg,
Ops);
3868 EntryBuilder->buildBlockAddress(
Reg, BA);
3875bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3877 for (
auto &BTB : SL->BitTestCases) {
3880 emitBitTestHeader(BTB, BTB.Parent);
3882 BranchProbability UnhandledProb = BTB.Prob;
3883 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3884 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3886 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3895 MachineBasicBlock *NextMBB;
3896 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3899 NextMBB = BTB.Cases[
j + 1].TargetBB;
3900 }
else if (j + 1 == ej) {
3902 NextMBB = BTB.Default;
3905 NextMBB = BTB.Cases[
j + 1].ThisBB;
3908 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3910 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3914 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3915 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3918 BTB.Cases.pop_back();
3924 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3925 BTB.Default->getBasicBlock()};
3926 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3927 if (!BTB.ContiguousRange) {
3928 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3931 SL->BitTestCases.clear();
3933 for (
auto &JTCase : SL->JTCases) {
3935 if (!JTCase.first.Emitted)
3936 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3938 emitJumpTable(JTCase.second, JTCase.second.MBB);
3940 SL->JTCases.clear();
3942 for (
auto &SwCase : SL->SwitchCases)
3943 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3944 SL->SwitchCases.clear();
3948 if (
SP.shouldEmitSDCheck(BB)) {
3949 bool FunctionBasedInstrumentation =
3950 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent(), *Libcalls);
3951 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3954 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3957 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3958 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3959 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3968 ParentMBB, *MF->getSubtarget().getInstrInfo());
3971 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3975 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3979 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3980 if (FailureMBB->
empty()) {
3981 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3986 SPDescriptor.resetPerBBState();
3993 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3997 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
4003 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
4010 ->buildLoad(PtrMemTy, StackSlotPtr,
4015 if (TLI->useStackGuardXorFP()) {
4016 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
4021 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M, *Libcalls)) {
4033 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4034 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
4035 ISD::ArgFlagsTy
Flags;
4036 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4038 CallLowering::ArgInfo GuardArgInfo(
4039 {GuardVal, FnTy->getParamType(0), {
Flags}});
4041 CallLowering::CallLoweringInfo
Info;
4042 Info.OrigArgs.push_back(GuardArgInfo);
4043 Info.CallConv = GuardCheckFn->getCallingConv();
4046 if (!CLI->lowerCall(MIRBuilder,
Info)) {
4047 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
4059 getStackGuard(Guard, *CurBuilder);
4062 const Value *IRGuard = TLI->getSDagStackGuard(M, *Libcalls);
4063 Register GuardPtr = getOrCreateVReg(*IRGuard);
4066 ->buildLoad(PtrMemTy, GuardPtr,
4085 const RTLIB::LibcallImpl LibcallImpl =
4086 Libcalls->getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
4087 if (LibcallImpl == RTLIB::Unsupported)
4090 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4092 CallLowering::CallLoweringInfo
Info;
4093 Info.CallConv = Libcalls->getLibcallImplCallingConv(LibcallImpl);
4095 StringRef LibcallName =
4100 if (!CLI->lowerCall(*CurBuilder,
Info)) {
4101 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4106 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4108 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4113void IRTranslator::finalizeFunction() {
4116 PendingPHIs.clear();
4118 FrameIndices.clear();
4119 MachinePreds.clear();
4123 EntryBuilder.reset();
4126 SPDescriptor.resetPerFunctionState();
4139 return CI && CI->isMustTailCall();
4153 : TPC->isGISelCSEEnabled();
4159 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4160 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4161 EntryBuilder->setCSEInfo(CSEInfo);
4162 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4163 CurBuilder->setCSEInfo(CSEInfo);
4165 EntryBuilder = std::make_unique<MachineIRBuilder>();
4166 CurBuilder = std::make_unique<MachineIRBuilder>();
4169 CurBuilder->setMF(*MF);
4170 EntryBuilder->setMF(*MF);
4171 MRI = &MF->getRegInfo();
4172 DL = &
F.getDataLayout();
4173 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4183 FuncInfo.BPI =
nullptr;
4190 *
F.getParent(), Subtarget);
4192 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4194 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4195 SL->init(*TLI, TM, *DL);
4197 assert(PendingPHIs.empty() &&
"stale PHIs");
4201 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4204 F.getSubprogram(), &
F.getEntryBlock());
4205 R <<
"unable to translate in big endian mode";
4216 EntryBuilder->setMBB(*EntryBB);
4218 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4219 SwiftError.setFunction(CurMF);
4220 SwiftError.createEntriesInEntryBlock(DbgLoc);
4222 bool IsVarArg =
F.isVarArg();
4223 bool HasMustTailInVarArgFn =
false;
4226 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4230 MBB = MF->CreateMachineBasicBlock(&BB);
4238 if (!BA->hasZeroLiveUses())
4242 if (!HasMustTailInVarArgFn)
4246 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4249 EntryBB->addSuccessor(&getMBB(
F.front()));
4251 if (CLI->fallBackToDAGISel(*MF)) {
4253 F.getSubprogram(), &
F.getEntryBlock());
4254 R <<
"unable to lower function: "
4255 <<
ore::NV(
"Prototype",
F.getFunctionType());
4263 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4268 if (Arg.hasSwiftErrorAttr()) {
4269 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4270 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4274 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4276 F.getSubprogram(), &
F.getEntryBlock());
4277 R <<
"unable to lower arguments: "
4278 <<
ore::NV(
"Prototype",
F.getFunctionType());
4285 if (EnableCSE && CSEInfo)
4290 DILocationVerifier Verifier;
4298 CurBuilder->setMBB(
MBB);
4299 HasTailCall =
false;
4309 Verifier.setCurrentInst(&Inst);
4313 translateDbgInfo(Inst, *CurBuilder);
4315 if (translate(Inst))
4320 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4322 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4323 std::string InstStrStorage;
4327 R <<
": '" << InstStrStorage <<
"'";
4334 if (!finalizeBasicBlock(*BB,
MBB)) {
4336 BB->getTerminator()->getDebugLoc(), BB);
4337 R <<
"unable to translate basic block";
4347 finishPendingPhis();
4349 SwiftError.propagateVRegs();
4354 assert(EntryBB->succ_size() == 1 &&
4355 "Custom BB used for lowering should have only one successor");
4359 "LLVM-IR entry block has a predecessor!?");
4362 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4371 EntryBB->removeSuccessor(&NewEntryBB);
4372 MF->remove(EntryBB);
4373 MF->deleteMachineBasicBlock(EntryBB);
4375 assert(&MF->front() == &NewEntryBB &&
4376 "New entry wasn't next in the list of basic block!");
4380 SP.copyToMachineFrameInfo(MF->getFrameInfo());
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool targetSupportsBF16Type(const MachineFunction *MF)
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
The address of a basic block.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Legacy analysis pass which computes BlockFrequencyInfo.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getOne()
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const Target & getTarget() const
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const CallLowering * getCallLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueLLTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.
std::optional< unsigned > fallbackAddressSpace
MachineMemOperand::Flags flags
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
AtomicOrdering failureOrder