64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (
" (in function: " + MF.getName() +
")").str();
125 if (IsGlobalISelAbortEnabled)
142 DILocationVerifier() =
default;
143 ~DILocationVerifier()
override =
default;
145 const Instruction *getCurrentInst()
const {
return CurrInst; }
146 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
148 void erasingInstr(MachineInstr &
MI)
override {}
149 void changingInstr(MachineInstr &
MI)
override {}
150 void changedInstr(MachineInstr &
MI)
override {}
152 void createdInstr(MachineInstr &
MI)
override {
153 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
158 <<
" was copied to " <<
MI);
164 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
165 (
MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
188IRTranslator::ValueToVRegInfo::VRegListT &
189IRTranslator::allocateVRegs(
const Value &Val) {
190 auto VRegsIt = VMap.findVRegs(Val);
191 if (VRegsIt != VMap.vregs_end())
192 return *VRegsIt->second;
193 auto *Regs = VMap.getVRegs(Val);
194 auto *Offsets = VMap.getOffsets(Val);
197 Offsets->empty() ? Offsets :
nullptr);
198 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
204 auto VRegsIt = VMap.findVRegs(Val);
205 if (VRegsIt != VMap.vregs_end())
206 return *VRegsIt->second;
209 return *VMap.getVRegs(Val);
212 auto *VRegs = VMap.getVRegs(Val);
213 auto *Offsets = VMap.getOffsets(Val);
217 "Don't know how to create an empty vreg");
221 Offsets->empty() ? Offsets :
nullptr);
224 for (
auto Ty : SplitTys)
225 VRegs->push_back(
MRI->createGenericVirtualRegister(Ty));
233 while (
auto Elt =
C.getAggregateElement(Idx++)) {
234 auto EltRegs = getOrCreateVRegs(*Elt);
238 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
239 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
242 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
243 MF->getFunction().getSubprogram(),
244 &MF->getFunction().getEntryBlock());
245 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
254int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
255 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
257 return MapEntry->second;
264 Size = std::max<uint64_t>(
Size, 1u);
266 int &FI = MapEntry->second;
267 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
273 return SI->getAlign();
275 return LI->getAlign();
281 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
282 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
288 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
289 assert(
MBB &&
"BasicBlock was not encountered before");
294 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
295 MachinePreds[
Edge].push_back(NewPred);
306 return U.getType()->getScalarType()->isBFloatTy() ||
308 return V->getType()->getScalarType()->isBFloatTy();
312bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
321 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
322 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
334bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
339 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
351 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
354bool IRTranslator::translateCompare(
const User &U,
360 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
361 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
366 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
374 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
382 if (Ret && DL->getTypeStoreSize(Ret->
getType()).isZero())
387 VRegs = getOrCreateVRegs(*Ret);
390 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
391 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
392 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
398 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
401void IRTranslator::emitBranchForMergedCondition(
410 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
413 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
416 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
417 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
418 CurBuilder->getDebugLoc(), TProb, FProb);
419 SL->SwitchCases.push_back(CB);
425 SwitchCG::CaseBlock CB(
427 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
428 SL->SwitchCases.push_back(CB);
433 return I->getParent() == BB;
437void IRTranslator::findMergedConditions(
442 using namespace PatternMatch;
443 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
444 "Expected Opc to be AND/OR");
450 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
456 const Value *BOpOp0, *BOpOp1;
470 if (BOpc == Instruction::And)
471 BOpc = Instruction::Or;
472 else if (BOpc == Instruction::Or)
473 BOpc = Instruction::And;
479 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
483 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
490 MachineBasicBlock *TmpBB =
494 if (
Opc == Instruction::Or) {
515 auto NewTrueProb = TProb / 2;
516 auto NewFalseProb = TProb / 2 + FProb;
518 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
519 NewFalseProb, InvertCond);
525 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
526 Probs[1], InvertCond);
528 assert(
Opc == Instruction::And &&
"Unknown merge op!");
548 auto NewTrueProb = TProb + FProb / 2;
549 auto NewFalseProb = FProb / 2;
551 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
552 NewFalseProb, InvertCond);
558 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
559 Probs[1], InvertCond);
563bool IRTranslator::shouldEmitAsBranches(
564 const std::vector<SwitchCG::CaseBlock> &Cases) {
566 if (Cases.size() != 2)
571 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
572 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
573 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
574 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
580 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
581 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
585 Cases[0].TrueBB == Cases[1].ThisBB)
588 Cases[0].FalseBB == Cases[1].ThisBB)
597 auto &CurMBB = MIRBuilder.
getMBB();
603 !CurMBB.isLayoutSuccessor(Succ0MBB))
607 for (
const BasicBlock *Succ :
successors(&BrInst))
608 CurMBB.addSuccessor(&getMBB(*Succ));
615 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
634 using namespace PatternMatch;
636 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
637 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
640 const Value *BOp0, *BOp1;
642 Opcode = Instruction::And;
644 Opcode = Instruction::Or;
648 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
649 getEdgeProbability(&CurMBB, Succ0MBB),
650 getEdgeProbability(&CurMBB, Succ1MBB),
652 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
655 if (shouldEmitAsBranches(SL->SwitchCases)) {
657 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
658 SL->SwitchCases.erase(SL->SwitchCases.begin());
664 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
665 MF->erase(SL->SwitchCases[
I].ThisBB);
667 SL->SwitchCases.clear();
674 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
675 CurBuilder->getDebugLoc());
679 emitSwitchCase(CB, &CurMBB, *CurBuilder);
687 Src->addSuccessorWithoutProb(Dst);
691 Prob = getEdgeProbability(Src, Dst);
692 Src->addSuccessor(Dst, Prob);
698 const BasicBlock *SrcBB = Src->getBasicBlock();
699 const BasicBlock *DstBB = Dst->getBasicBlock();
703 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
704 return BranchProbability(1, SuccSize);
706 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
710 using namespace SwitchCG;
713 BranchProbabilityInfo *BPI = FuncInfo.BPI;
715 Clusters.reserve(
SI.getNumCases());
716 for (
const auto &
I :
SI.cases()) {
717 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
718 assert(Succ &&
"Could not find successor mbb in mapping");
719 const ConstantInt *CaseVal =
I.getCaseValue();
720 BranchProbability Prob =
722 : BranchProbability(1,
SI.getNumCases() + 1);
723 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
726 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
733 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
736 if (Clusters.empty()) {
743 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
744 SL->findBitTestClusters(Clusters, &SI);
747 dbgs() <<
"Case clusters: ";
748 for (
const CaseCluster &
C : Clusters) {
749 if (
C.Kind == CC_JumpTable)
751 if (
C.Kind == CC_BitTests)
754 C.Low->getValue().print(
dbgs(),
true);
755 if (
C.Low !=
C.High) {
757 C.High->getValue().print(
dbgs(),
true);
764 assert(!Clusters.empty());
768 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
769 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
771 while (!WorkList.empty()) {
772 SwitchWorkListItem
W = WorkList.pop_back_val();
774 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
776 if (NumClusters > 3 &&
779 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
783 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
793 using namespace SwitchCG;
794 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
795 "Clusters not sorted?");
796 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
798 auto [LastLeft, FirstRight, LeftProb, RightProb] =
799 SL->computeSplitWorkItemInfo(W);
804 assert(PivotCluster >
W.FirstCluster);
805 assert(PivotCluster <=
W.LastCluster);
810 const ConstantInt *Pivot = PivotCluster->Low;
819 MachineBasicBlock *LeftMBB;
820 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
821 FirstLeft->Low ==
W.GE &&
822 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
823 LeftMBB = FirstLeft->MBB;
825 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
826 FuncInfo.MF->
insert(BBI, LeftMBB);
828 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
834 MachineBasicBlock *RightMBB;
835 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
836 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
837 RightMBB = FirstRight->MBB;
839 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
840 FuncInfo.MF->
insert(BBI, RightMBB);
842 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
850 if (
W.MBB == SwitchMBB)
851 emitSwitchCase(CB, SwitchMBB, MIB);
853 SL->SwitchCases.push_back(CB);
859 assert(
JT.Reg &&
"Should lower JT Header first!");
874 MachineIRBuilder MIB(*HeaderBB->
getParent());
881 Register SwitchOpReg = getOrCreateVReg(SValue);
883 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
888 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
891 JT.Reg =
Sub.getReg(0);
902 auto Cst = getOrCreateVReg(
943 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
957 "Can only handle SLE ranges");
968 const LLT CmpTy = MRI->getType(CmpOpReg);
969 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
1004 bool FallthroughUnreachable) {
1005 using namespace SwitchCG;
1006 MachineFunction *CurMF = SwitchMBB->
getParent();
1008 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1009 SwitchCG::JumpTable *
JT = &SL->JTCases[
I->JTCasesIndex].second;
1010 BranchProbability DefaultProb =
W.DefaultProb;
1013 MachineBasicBlock *JumpMBB =
JT->MBB;
1014 CurMF->
insert(BBI, JumpMBB);
1024 auto JumpProb =
I->Prob;
1025 auto FallthroughProb = UnhandledProbs;
1033 if (*SI == DefaultMBB) {
1034 JumpProb += DefaultProb / 2;
1035 FallthroughProb -= DefaultProb / 2;
1040 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1045 if (FallthroughUnreachable)
1046 JTH->FallthroughUnreachable =
true;
1048 if (!JTH->FallthroughUnreachable)
1049 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1050 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1055 JTH->HeaderBB = CurMBB;
1056 JT->Default = Fallthrough;
1059 if (CurMBB == SwitchMBB) {
1060 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1062 JTH->Emitted =
true;
1069 bool FallthroughUnreachable,
1074 using namespace SwitchCG;
1077 if (
I->Low ==
I->High) {
1093 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1096 emitSwitchCase(CB, SwitchMBB, MIB);
1102 MachineIRBuilder &MIB = *CurBuilder;
1106 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1108 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1110 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1115 LLT MaskTy = SwitchOpTy;
1121 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1131 if (SwitchOpTy != MaskTy)
1137 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1139 if (!
B.FallthroughUnreachable)
1140 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1141 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1145 if (!
B.FallthroughUnreachable) {
1149 RangeSub, RangeCst);
1163 MachineIRBuilder &MIB = *CurBuilder;
1169 if (PopCount == 1) {
1172 auto MaskTrailingZeros =
1177 }
else if (PopCount == BB.
Range) {
1179 auto MaskTrailingOnes =
1186 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1190 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1197 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1199 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1217bool IRTranslator::lowerBitTestWorkItem(
1223 bool FallthroughUnreachable) {
1224 using namespace SwitchCG;
1225 MachineFunction *CurMF = SwitchMBB->
getParent();
1227 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1229 for (BitTestCase &BTC : BTB->Cases)
1230 CurMF->
insert(BBI, BTC.ThisBB);
1233 BTB->Parent = CurMBB;
1234 BTB->Default = Fallthrough;
1236 BTB->DefaultProb = UnhandledProbs;
1240 if (!BTB->ContiguousRange) {
1241 BTB->Prob += DefaultProb / 2;
1242 BTB->DefaultProb -= DefaultProb / 2;
1245 if (FallthroughUnreachable)
1246 BTB->FallthroughUnreachable =
true;
1249 if (CurMBB == SwitchMBB) {
1250 emitBitTestHeader(*BTB, SwitchMBB);
1251 BTB->Emitted =
true;
1261 using namespace SwitchCG;
1262 MachineFunction *CurMF = FuncInfo.MF;
1263 MachineBasicBlock *NextMBB =
nullptr;
1265 if (++BBI != FuncInfo.MF->end())
1274 [](
const CaseCluster &a,
const CaseCluster &b) {
1275 return a.Prob != b.Prob
1277 : a.Low->getValue().slt(b.Low->getValue());
1282 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1284 if (
I->Prob >
W.LastCluster->Prob)
1286 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1294 BranchProbability DefaultProb =
W.DefaultProb;
1295 BranchProbability UnhandledProbs = DefaultProb;
1296 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1297 UnhandledProbs +=
I->Prob;
1299 MachineBasicBlock *CurMBB =
W.MBB;
1300 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1301 bool FallthroughUnreachable =
false;
1302 MachineBasicBlock *Fallthrough;
1303 if (
I ==
W.LastCluster) {
1305 Fallthrough = DefaultMBB;
1310 CurMF->
insert(BBI, Fallthrough);
1312 UnhandledProbs -=
I->Prob;
1316 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1317 DefaultProb, UnhandledProbs,
I, Fallthrough,
1318 FallthroughUnreachable)) {
1326 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1327 UnhandledProbs,
I, Fallthrough,
1328 FallthroughUnreachable)) {
1335 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1336 FallthroughUnreachable, UnhandledProbs,
1337 CurMBB, MIB, SwitchMBB)) {
1344 CurMBB = Fallthrough;
1350bool IRTranslator::translateIndirectBr(
const User &U,
1358 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1359 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1360 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1364 if (!AddedSuccessors.
insert(Succ).second)
1374 return Arg->hasSwiftErrorAttr();
1382 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1387 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1392 Type *OffsetIRTy = DL->getIndexType(Ptr->
getType());
1396 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1398 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(), Ptr);
1404 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1406 if (AA->pointsToConstantMemory(
1414 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1419 Align BaseAlign = getMemOpAlign(LI);
1421 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),
1424 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1432 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1436 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1439 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1442 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1443 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1445 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1446 SI.getPointerOperand());
1453 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1457 MachinePointerInfo Ptr(
SI.getPointerOperand(), Offsets[i]);
1458 Align BaseAlign = getMemOpAlign(SI);
1459 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),
1461 SI.getAAMetadata(),
nullptr,
1462 SI.getSyncScopeID(),
SI.getOrdering());
1469 const Value *Src = U.getOperand(0);
1478 for (
auto Idx : EVI->indices())
1481 for (
auto Idx : IVI->indices())
1488 DL.getIndexedOffsetInType(Src->getType(), Indices));
1491bool IRTranslator::translateExtractValue(
const User &U,
1493 const Value *Src =
U.getOperand(0);
1496 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1498 auto &DstRegs = allocateVRegs(U);
1500 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1501 DstRegs[i] = SrcRegs[Idx++];
1506bool IRTranslator::translateInsertValue(
const User &U,
1508 const Value *Src =
U.getOperand(0);
1510 auto &DstRegs = allocateVRegs(U);
1511 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1514 auto *InsertedIt = InsertedRegs.
begin();
1516 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1517 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1518 DstRegs[i] = *InsertedIt++;
1520 DstRegs[i] = SrcRegs[i];
1526bool IRTranslator::translateSelect(
const User &U,
1528 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1537 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1538 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1544bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1547 auto &Regs = *VMap.getVRegs(U);
1549 Regs.push_back(Src);
1550 VMap.getOffsets(U)->push_back(0);
1559bool IRTranslator::translateBitCast(
const User &U,
1567 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1569 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1572 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1575bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1590bool IRTranslator::translateGetElementPtr(
const User &U,
1592 Value &Op0 = *
U.getOperand(0);
1596 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1599 uint32_t PtrAddFlags = 0;
1605 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1615 unsigned VectorWidth = 0;
1619 bool WantSplatVector =
false;
1623 WantSplatVector = VectorWidth > 1;
1628 if (WantSplatVector && !PtrTy.
isVector()) {
1635 OffsetIRTy = DL->getIndexType(PtrIRTy);
1642 const Value *Idx = GTI.getOperand();
1643 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1645 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1648 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1653 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1654 Offset += ElementSize * *Val;
1663 PtrAddFlagsWithConst(
Offset))
1668 Register IdxReg = getOrCreateVReg(*Idx);
1669 LLT IdxTy = MRI->getType(IdxReg);
1670 if (IdxTy != OffsetTy) {
1671 if (!IdxTy.
isVector() && WantSplatVector) {
1684 if (ElementSize != 1) {
1695 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1698 GepOffsetReg = IdxReg;
1702 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1711 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1712 PtrAddFlagsWithConst(
Offset));
1716 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1720bool IRTranslator::translateMemFunc(
const CallInst &CI,
1730 unsigned MinPtrSize = UINT_MAX;
1731 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1732 Register SrcReg = getOrCreateVReg(**AI);
1733 LLT SrcTy = MRI->getType(SrcReg);
1735 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1743 if (MRI->getType(SizeOpReg) != SizeTy)
1755 ConstantInt *CopySize =
nullptr;
1758 DstAlign = MCI->getDestAlign().valueOrOne();
1759 SrcAlign = MCI->getSourceAlign().valueOrOne();
1762 DstAlign = MMI->getDestAlign().valueOrOne();
1763 SrcAlign = MMI->getSourceAlign().valueOrOne();
1767 DstAlign = MSI->getDestAlign().valueOrOne();
1770 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1786 if (AA && CopySize &&
1787 AA->pointsToConstantMemory(MemoryLocation(
1797 ICall.addMemOperand(
1798 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1799 StoreFlags, 1, DstAlign, AAInfo));
1800 if (Opcode != TargetOpcode::G_MEMSET)
1801 ICall.addMemOperand(MF->getMachineMemOperand(
1802 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1807bool IRTranslator::translateTrap(
const CallInst &CI,
1810 StringRef TrapFuncName =
1811 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1812 if (TrapFuncName.
empty()) {
1813 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1822 CallLowering::CallLoweringInfo
Info;
1823 if (Opcode == TargetOpcode::G_UBSANTRAP)
1830 return CLI->lowerCall(MIRBuilder,
Info);
1833bool IRTranslator::translateVectorInterleave2Intrinsic(
1836 "This function can only be called on the interleave2 intrinsic!");
1840 Register Res = getOrCreateVReg(CI);
1842 LLT OpTy = MRI->getType(Op0);
1849bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1852 "This function can only be called on the deinterleave2 intrinsic!");
1859 LLT ResTy = MRI->getType(Res[0]);
1868void IRTranslator::getStackGuard(
Register DstReg,
1870 Value *
Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
1873 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1878 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1879 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1881 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1883 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1884 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1886 MachinePointerInfo MPInfo(
Global);
1889 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1890 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1891 MIB.setMemRefs({MemRef});
1894bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1898 Op, {ResRegs[0], ResRegs[1]},
1904bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1906 Register Dst = getOrCreateVReg(CI);
1910 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1918 case Intrinsic::acos:
1919 return TargetOpcode::G_FACOS;
1920 case Intrinsic::asin:
1921 return TargetOpcode::G_FASIN;
1922 case Intrinsic::atan:
1923 return TargetOpcode::G_FATAN;
1924 case Intrinsic::atan2:
1925 return TargetOpcode::G_FATAN2;
1926 case Intrinsic::bswap:
1927 return TargetOpcode::G_BSWAP;
1928 case Intrinsic::bitreverse:
1929 return TargetOpcode::G_BITREVERSE;
1930 case Intrinsic::fshl:
1931 return TargetOpcode::G_FSHL;
1932 case Intrinsic::fshr:
1933 return TargetOpcode::G_FSHR;
1934 case Intrinsic::ceil:
1935 return TargetOpcode::G_FCEIL;
1936 case Intrinsic::cos:
1937 return TargetOpcode::G_FCOS;
1938 case Intrinsic::cosh:
1939 return TargetOpcode::G_FCOSH;
1940 case Intrinsic::ctpop:
1941 return TargetOpcode::G_CTPOP;
1942 case Intrinsic::exp:
1943 return TargetOpcode::G_FEXP;
1944 case Intrinsic::exp2:
1945 return TargetOpcode::G_FEXP2;
1946 case Intrinsic::exp10:
1947 return TargetOpcode::G_FEXP10;
1948 case Intrinsic::fabs:
1949 return TargetOpcode::G_FABS;
1950 case Intrinsic::copysign:
1951 return TargetOpcode::G_FCOPYSIGN;
1952 case Intrinsic::minnum:
1953 return TargetOpcode::G_FMINNUM;
1954 case Intrinsic::maxnum:
1955 return TargetOpcode::G_FMAXNUM;
1956 case Intrinsic::minimum:
1957 return TargetOpcode::G_FMINIMUM;
1958 case Intrinsic::maximum:
1959 return TargetOpcode::G_FMAXIMUM;
1960 case Intrinsic::minimumnum:
1961 return TargetOpcode::G_FMINIMUMNUM;
1962 case Intrinsic::maximumnum:
1963 return TargetOpcode::G_FMAXIMUMNUM;
1964 case Intrinsic::canonicalize:
1965 return TargetOpcode::G_FCANONICALIZE;
1966 case Intrinsic::floor:
1967 return TargetOpcode::G_FFLOOR;
1968 case Intrinsic::fma:
1969 return TargetOpcode::G_FMA;
1970 case Intrinsic::log:
1971 return TargetOpcode::G_FLOG;
1972 case Intrinsic::log2:
1973 return TargetOpcode::G_FLOG2;
1974 case Intrinsic::log10:
1975 return TargetOpcode::G_FLOG10;
1976 case Intrinsic::ldexp:
1977 return TargetOpcode::G_FLDEXP;
1978 case Intrinsic::nearbyint:
1979 return TargetOpcode::G_FNEARBYINT;
1980 case Intrinsic::pow:
1981 return TargetOpcode::G_FPOW;
1982 case Intrinsic::powi:
1983 return TargetOpcode::G_FPOWI;
1984 case Intrinsic::rint:
1985 return TargetOpcode::G_FRINT;
1986 case Intrinsic::round:
1987 return TargetOpcode::G_INTRINSIC_ROUND;
1988 case Intrinsic::roundeven:
1989 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1990 case Intrinsic::sin:
1991 return TargetOpcode::G_FSIN;
1992 case Intrinsic::sinh:
1993 return TargetOpcode::G_FSINH;
1994 case Intrinsic::sqrt:
1995 return TargetOpcode::G_FSQRT;
1996 case Intrinsic::tan:
1997 return TargetOpcode::G_FTAN;
1998 case Intrinsic::tanh:
1999 return TargetOpcode::G_FTANH;
2000 case Intrinsic::trunc:
2001 return TargetOpcode::G_INTRINSIC_TRUNC;
2002 case Intrinsic::readcyclecounter:
2003 return TargetOpcode::G_READCYCLECOUNTER;
2004 case Intrinsic::readsteadycounter:
2005 return TargetOpcode::G_READSTEADYCOUNTER;
2006 case Intrinsic::ptrmask:
2007 return TargetOpcode::G_PTRMASK;
2008 case Intrinsic::lrint:
2009 return TargetOpcode::G_INTRINSIC_LRINT;
2010 case Intrinsic::llrint:
2011 return TargetOpcode::G_INTRINSIC_LLRINT;
2013 case Intrinsic::vector_reduce_fmin:
2014 return TargetOpcode::G_VECREDUCE_FMIN;
2015 case Intrinsic::vector_reduce_fmax:
2016 return TargetOpcode::G_VECREDUCE_FMAX;
2017 case Intrinsic::vector_reduce_fminimum:
2018 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2019 case Intrinsic::vector_reduce_fmaximum:
2020 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2021 case Intrinsic::vector_reduce_add:
2022 return TargetOpcode::G_VECREDUCE_ADD;
2023 case Intrinsic::vector_reduce_mul:
2024 return TargetOpcode::G_VECREDUCE_MUL;
2025 case Intrinsic::vector_reduce_and:
2026 return TargetOpcode::G_VECREDUCE_AND;
2027 case Intrinsic::vector_reduce_or:
2028 return TargetOpcode::G_VECREDUCE_OR;
2029 case Intrinsic::vector_reduce_xor:
2030 return TargetOpcode::G_VECREDUCE_XOR;
2031 case Intrinsic::vector_reduce_smax:
2032 return TargetOpcode::G_VECREDUCE_SMAX;
2033 case Intrinsic::vector_reduce_smin:
2034 return TargetOpcode::G_VECREDUCE_SMIN;
2035 case Intrinsic::vector_reduce_umax:
2036 return TargetOpcode::G_VECREDUCE_UMAX;
2037 case Intrinsic::vector_reduce_umin:
2038 return TargetOpcode::G_VECREDUCE_UMIN;
2039 case Intrinsic::experimental_vector_compress:
2040 return TargetOpcode::G_VECTOR_COMPRESS;
2041 case Intrinsic::lround:
2042 return TargetOpcode::G_LROUND;
2043 case Intrinsic::llround:
2044 return TargetOpcode::G_LLROUND;
2045 case Intrinsic::get_fpenv:
2046 return TargetOpcode::G_GET_FPENV;
2047 case Intrinsic::get_fpmode:
2048 return TargetOpcode::G_GET_FPMODE;
2053bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2057 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2065 for (
const auto &Arg : CI.
args())
2068 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2076 case Intrinsic::experimental_constrained_fadd:
2077 return TargetOpcode::G_STRICT_FADD;
2078 case Intrinsic::experimental_constrained_fsub:
2079 return TargetOpcode::G_STRICT_FSUB;
2080 case Intrinsic::experimental_constrained_fmul:
2081 return TargetOpcode::G_STRICT_FMUL;
2082 case Intrinsic::experimental_constrained_fdiv:
2083 return TargetOpcode::G_STRICT_FDIV;
2084 case Intrinsic::experimental_constrained_frem:
2085 return TargetOpcode::G_STRICT_FREM;
2086 case Intrinsic::experimental_constrained_fma:
2087 return TargetOpcode::G_STRICT_FMA;
2088 case Intrinsic::experimental_constrained_sqrt:
2089 return TargetOpcode::G_STRICT_FSQRT;
2090 case Intrinsic::experimental_constrained_ldexp:
2091 return TargetOpcode::G_STRICT_FLDEXP;
2097bool IRTranslator::translateConstrainedFPIntrinsic(
2117std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2118 auto VRegs = getOrCreateVRegs(Arg);
2119 if (VRegs.
size() != 1)
2120 return std::nullopt;
2123 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2124 if (!VRegDef || !VRegDef->isCopy())
2125 return std::nullopt;
2126 return VRegDef->getOperand(1).getReg().asMCReg();
2129bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2141 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2143 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2144 <<
": expression is entry_value but "
2145 <<
"couldn't find a physical register\n");
2153 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2165 case Intrinsic::experimental_convergence_anchor:
2166 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2167 case Intrinsic::experimental_convergence_entry:
2168 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2169 case Intrinsic::experimental_convergence_loop:
2170 return TargetOpcode::CONVERGENCECTRL_LOOP;
2174bool IRTranslator::translateConvergenceControlIntrinsic(
2177 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2180 if (
ID == Intrinsic::experimental_convergence_loop) {
2182 assert(Bundle &&
"Expected a convergence control token.");
2184 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2194 if (ORE->enabled()) {
2196 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2204 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2210 case Intrinsic::lifetime_start:
2211 case Intrinsic::lifetime_end: {
2214 MF->getFunction().hasOptNone())
2217 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2218 : TargetOpcode::LIFETIME_END;
2227 case Intrinsic::fake_use: {
2229 for (
const auto &Arg : CI.
args())
2231 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2232 MF->setHasFakeUses(
true);
2235 case Intrinsic::dbg_declare: {
2242 case Intrinsic::dbg_label: {
2248 "Expected inlined-at fields to agree");
2253 case Intrinsic::vaend:
2257 case Intrinsic::vastart: {
2259 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2262 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2263 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2265 ListSize, Alignment));
2268 case Intrinsic::dbg_assign:
2275 case Intrinsic::dbg_value: {
2282 case Intrinsic::uadd_with_overflow:
2283 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2284 case Intrinsic::sadd_with_overflow:
2285 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2286 case Intrinsic::usub_with_overflow:
2287 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2288 case Intrinsic::ssub_with_overflow:
2289 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2290 case Intrinsic::umul_with_overflow:
2291 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2292 case Intrinsic::smul_with_overflow:
2293 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2294 case Intrinsic::uadd_sat:
2295 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2296 case Intrinsic::sadd_sat:
2297 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2298 case Intrinsic::usub_sat:
2299 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2300 case Intrinsic::ssub_sat:
2301 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2302 case Intrinsic::ushl_sat:
2303 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2304 case Intrinsic::sshl_sat:
2305 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2306 case Intrinsic::umin:
2307 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2308 case Intrinsic::umax:
2309 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2310 case Intrinsic::smin:
2311 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2312 case Intrinsic::smax:
2313 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2314 case Intrinsic::abs:
2316 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2317 case Intrinsic::smul_fix:
2318 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2319 case Intrinsic::umul_fix:
2320 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2321 case Intrinsic::smul_fix_sat:
2322 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2323 case Intrinsic::umul_fix_sat:
2324 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2325 case Intrinsic::sdiv_fix:
2326 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2327 case Intrinsic::udiv_fix:
2328 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2329 case Intrinsic::sdiv_fix_sat:
2330 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2331 case Intrinsic::udiv_fix_sat:
2332 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2333 case Intrinsic::fmuladd: {
2334 const TargetMachine &TM = MF->getTarget();
2335 Register Dst = getOrCreateVReg(CI);
2340 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2341 TLI->getValueType(*DL, CI.
getType()))) {
2344 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2355 case Intrinsic::convert_from_fp16:
2361 case Intrinsic::convert_to_fp16:
2367 case Intrinsic::frexp: {
2374 case Intrinsic::modf: {
2376 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2381 case Intrinsic::sincos: {
2388 case Intrinsic::fptosi_sat:
2392 case Intrinsic::fptoui_sat:
2396 case Intrinsic::memcpy_inline:
2397 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2398 case Intrinsic::memcpy:
2399 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2400 case Intrinsic::memmove:
2401 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2402 case Intrinsic::memset:
2403 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2404 case Intrinsic::eh_typeid_for: {
2407 unsigned TypeID = MF->getTypeIDFor(GV);
2411 case Intrinsic::objectsize:
2414 case Intrinsic::is_constant:
2417 case Intrinsic::stackguard:
2418 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2420 case Intrinsic::stackprotector: {
2423 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2424 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2425 getStackGuard(GuardVal, MIRBuilder);
2430 int FI = getOrCreateFrameIndex(*Slot);
2431 MF->getFrameInfo().setStackProtectorIndex(FI);
2434 GuardVal, getOrCreateVReg(*Slot),
2441 case Intrinsic::stacksave: {
2442 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2445 case Intrinsic::stackrestore: {
2446 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2450 case Intrinsic::cttz:
2451 case Intrinsic::ctlz: {
2453 bool isTrailing =
ID == Intrinsic::cttz;
2454 unsigned Opcode = isTrailing
2455 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2456 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2457 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2458 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2459 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2463 case Intrinsic::invariant_start: {
2467 case Intrinsic::invariant_end:
2469 case Intrinsic::expect:
2470 case Intrinsic::expect_with_probability:
2471 case Intrinsic::annotation:
2472 case Intrinsic::ptr_annotation:
2473 case Intrinsic::launder_invariant_group:
2474 case Intrinsic::strip_invariant_group: {
2476 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2480 case Intrinsic::assume:
2481 case Intrinsic::experimental_noalias_scope_decl:
2482 case Intrinsic::var_annotation:
2483 case Intrinsic::sideeffect:
2486 case Intrinsic::read_volatile_register:
2487 case Intrinsic::read_register: {
2490 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2494 case Intrinsic::write_register: {
2496 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2501 case Intrinsic::localescape: {
2502 MachineBasicBlock &EntryMBB = MF->front();
2507 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2514 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2527 case Intrinsic::vector_reduce_fadd:
2528 case Intrinsic::vector_reduce_fmul: {
2531 Register Dst = getOrCreateVReg(CI);
2537 Opc =
ID == Intrinsic::vector_reduce_fadd
2538 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2539 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2540 if (!MRI->getType(VecSrc).isVector())
2541 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2542 : TargetOpcode::G_FMUL;
2550 if (
ID == Intrinsic::vector_reduce_fadd) {
2551 Opc = TargetOpcode::G_VECREDUCE_FADD;
2552 ScalarOpc = TargetOpcode::G_FADD;
2554 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2555 ScalarOpc = TargetOpcode::G_FMUL;
2557 LLT DstTy = MRI->getType(Dst);
2560 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2565 case Intrinsic::trap:
2566 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2567 case Intrinsic::debugtrap:
2568 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2569 case Intrinsic::ubsantrap:
2570 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2571 case Intrinsic::allow_runtime_check:
2572 case Intrinsic::allow_ubsan_check:
2573 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2576 case Intrinsic::amdgcn_cs_chain:
2577 case Intrinsic::amdgcn_call_whole_wave:
2578 return translateCallBase(CI, MIRBuilder);
2579 case Intrinsic::fptrunc_round: {
2584 std::optional<RoundingMode> RoundMode =
2589 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2590 {getOrCreateVReg(CI)},
2592 .addImm((
int)*RoundMode);
2596 case Intrinsic::is_fpclass: {
2601 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2602 {getOrCreateVReg(*FpValue)})
2607 case Intrinsic::set_fpenv: {
2612 case Intrinsic::reset_fpenv:
2615 case Intrinsic::set_fpmode: {
2620 case Intrinsic::reset_fpmode:
2623 case Intrinsic::get_rounding:
2626 case Intrinsic::set_rounding:
2629 case Intrinsic::vscale: {
2633 case Intrinsic::scmp:
2634 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2638 case Intrinsic::ucmp:
2639 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2643 case Intrinsic::vector_extract:
2644 return translateExtractVector(CI, MIRBuilder);
2645 case Intrinsic::vector_insert:
2646 return translateInsertVector(CI, MIRBuilder);
2647 case Intrinsic::stepvector: {
2651 case Intrinsic::prefetch: {
2658 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2661 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2667 case Intrinsic::vector_interleave2:
2668 case Intrinsic::vector_deinterleave2: {
2676 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2678 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2681#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2682 case Intrinsic::INTRINSIC:
2683#include "llvm/IR/ConstrainedOps.def"
2686 case Intrinsic::experimental_convergence_anchor:
2687 case Intrinsic::experimental_convergence_entry:
2688 case Intrinsic::experimental_convergence_loop:
2689 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2690 case Intrinsic::reloc_none: {
2693 MIRBuilder.
buildInstr(TargetOpcode::RELOC_NONE)
2701bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2706 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2710 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2715 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2718bool IRTranslator::translateCallBase(
const CallBase &CB,
2725 for (
const auto &Arg : CB.
args()) {
2727 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2729 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2730 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2731 &CB, &MIRBuilder.
getMBB(), Arg));
2734 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2737 Args.push_back(getOrCreateVRegs(*Arg));
2741 if (ORE->enabled()) {
2743 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2749 std::optional<CallLowering::PtrAuthInfo> PAI;
2754 const Value *
Key = Bundle->Inputs[0];
2761 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2762 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2764 Register DiscReg = getOrCreateVReg(*Discriminator);
2772 const auto &Token = *Bundle->Inputs[0].get();
2773 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2779 bool Success = CLI->lowerCall(
2780 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2785 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2786 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2802 if (
F && (
F->hasDLLImportStorageClass() ||
2803 (MF->getTarget().getTargetTriple().isOSWindows() &&
2804 F->hasExternalWeakLinkage())))
2816 return translateInlineAsm(CI, MIRBuilder);
2820 if (translateCallBase(CI, MIRBuilder)) {
2829 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2832 TargetLowering::IntrinsicInfo
Info;
2833 bool IsTgtMemIntrinsic = TLI->getTgtMemIntrinsic(
Info, CI, *MF,
ID);
2835 return translateIntrinsic(CI,
ID, MIRBuilder,
2836 IsTgtMemIntrinsic ? &
Info :
nullptr);
2843bool IRTranslator::translateIntrinsic(
2848 ResultRegs = getOrCreateVRegs(CB);
2863 assert(CI->getBitWidth() <= 64 &&
2864 "large intrinsic immediates not handled");
2865 MIB.
addImm(CI->getSExtValue());
2870 auto *MD = MDVal->getMetadata();
2874 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2881 if (VRegs.
size() > 1)
2888 if (TgtMemIntrinsicInfo) {
2891 Align Alignment = TgtMemIntrinsicInfo->
align.value_or(DL->getABITypeAlign(
2896 : LLT::scalar(TgtMemIntrinsicInfo->memVT.getStoreSizeInBits());
2900 MachinePointerInfo MPI;
2901 if (TgtMemIntrinsicInfo->
ptrVal) {
2902 MPI = MachinePointerInfo(TgtMemIntrinsicInfo->ptrVal,
2903 TgtMemIntrinsicInfo->offset);
2905 MPI = MachinePointerInfo(*TgtMemIntrinsicInfo->fallbackAddressSpace);
2909 nullptr, TgtMemIntrinsicInfo->
ssid,
2915 auto *Token = Bundle->Inputs[0].get();
2916 Register TokenReg = getOrCreateVReg(*Token);
2927bool IRTranslator::findUnwindDestinations(
2949 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2955 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2956 UnwindDests.back().first->setIsEHScopeEntry();
2957 UnwindDests.back().first->setIsEHFuncletEntry();
2962 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2963 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2965 if (IsMSVCCXX || IsCoreCLR)
2966 UnwindDests.back().first->setIsEHFuncletEntry();
2968 UnwindDests.back().first->setIsEHScopeEntry();
2970 NewEHPadBB = CatchSwitch->getUnwindDest();
2975 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2976 if (BPI && NewEHPadBB)
2978 EHPadBB = NewEHPadBB;
2983bool IRTranslator::translateInvoke(
const User &U,
2986 MCContext &
Context = MF->getContext();
2991 const Function *Fn =
I.getCalledFunction();
2998 if (
I.hasDeoptState())
3012 (MF->getTarget().getTargetTriple().isOSWindows() &&
3016 bool LowerInlineAsm =
I.isInlineAsm();
3017 bool NeedEHLabel =
true;
3023 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
3024 BeginSymbol =
Context.createTempSymbol();
3028 if (LowerInlineAsm) {
3029 if (!translateInlineAsm(
I, MIRBuilder))
3031 }
else if (!translateCallBase(
I, MIRBuilder))
3036 EndSymbol =
Context.createTempSymbol();
3041 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3042 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3043 BranchProbability EHPadBBProb =
3047 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3050 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3051 &ReturnMBB = getMBB(*ReturnBB);
3053 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3054 for (
auto &UnwindDest : UnwindDests) {
3055 UnwindDest.first->setIsEHPad();
3056 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3061 assert(BeginSymbol &&
"Expected a begin symbol!");
3062 assert(EndSymbol &&
"Expected an end symbol!");
3063 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3066 MIRBuilder.
buildBr(ReturnMBB);
3072bool IRTranslator::translateCallBr(
const User &U,
3078 MachineBasicBlock *CallBrMBB = &MIRBuilder.
getMBB();
3081 if (
I.isInlineAsm()) {
3087 if (!translateIntrinsic(
I, IID, MIRBuilder))
3091 SmallPtrSet<BasicBlock *, 8> Dests = {
I.getDefaultDest()};
3092 MachineBasicBlock *
Return = &getMBB(*
I.getDefaultDest());
3101 for (BasicBlock *Dest :
I.getIndirectDests()) {
3102 MachineBasicBlock &
Target = getMBB(*Dest);
3103 Target.setIsInlineAsmBrIndirectTarget();
3104 Target.setLabelMustBeEmitted();
3106 if (Dests.
insert(Dest).second)
3118bool IRTranslator::translateLandingPad(
const User &U,
3122 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3128 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3129 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3130 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3142 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3147 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3148 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3149 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3158 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3161 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3167 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3169 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3174 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3175 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3176 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3181bool IRTranslator::translateAlloca(
const User &U,
3189 Register Res = getOrCreateVReg(AI);
3190 int FI = getOrCreateFrameIndex(AI);
3196 if (MF->getTarget().getTargetTriple().isOSWindows())
3201 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3203 if (MRI->getType(NumElts) != IntPtrTy) {
3204 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3211 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3213 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3214 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3219 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3221 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3225 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3227 Align Alignment = std::max(AI.
getAlign(), DL->getPrefTypeAlign(Ty));
3228 if (Alignment <= StackAlign)
3229 Alignment =
Align(1);
3232 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3233 assert(MF->getFrameInfo().hasVarSizedObjects());
3242 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3243 {getOrCreateVReg(*
U.getOperand(0)),
3244 DL->getABITypeAlign(
U.getType()).value()});
3248bool IRTranslator::translateUnreachable(
const User &U,
3251 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3252 MF->getTarget().Options.NoTrapAfterNoreturn))
3259bool IRTranslator::translateInsertElement(
const User &U,
3264 FVT && FVT->getNumElements() == 1)
3265 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3268 Register Val = getOrCreateVReg(*
U.getOperand(0));
3269 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3270 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3273 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3274 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3275 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3276 Idx = getOrCreateVReg(*NewIdxCI);
3280 Idx = getOrCreateVReg(*
U.getOperand(2));
3281 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3282 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3289bool IRTranslator::translateInsertVector(
const User &U,
3292 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3293 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3296 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3301 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3306 ResultType && ResultType->getNumElements() == 1) {
3308 InputType && InputType->getNumElements() == 1) {
3312 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3318 Register Idx = getOrCreateVReg(*CI);
3326 Register Idx = getOrCreateVReg(*CI);
3327 auto ScaledIndex = MIRBuilder.
buildMul(
3328 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3335 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3340bool IRTranslator::translateExtractElement(
const User &U,
3344 if (
const FixedVectorType *FVT =
3346 if (FVT->getNumElements() == 1)
3347 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3350 Register Val = getOrCreateVReg(*
U.getOperand(0));
3351 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3356 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3357 Idx = getOrCreateVReg(*NewIdxCI);
3361 Idx = getOrCreateVReg(*
U.getOperand(1));
3362 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3363 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3370bool IRTranslator::translateExtractVector(
const User &U,
3373 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3375 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3380 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3385 ResultType && ResultType->getNumElements() == 1) {
3387 InputType && InputType->getNumElements() == 1) {
3390 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3396 Register Idx = getOrCreateVReg(*CI);
3404 Register Idx = getOrCreateVReg(*CI);
3405 auto ScaledIndex = MIRBuilder.
buildMul(
3406 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3413 getOrCreateVReg(*
U.getOperand(0)),
3418bool IRTranslator::translateShuffleVector(
const User &U,
3424 if (
U.getOperand(0)->getType()->isScalableTy()) {
3425 Register Val = getOrCreateVReg(*
U.getOperand(0));
3427 MRI->getType(Val).getElementType(), Val, 0);
3434 Mask = SVI->getShuffleMask();
3445 unsigned M =
Mask[0];
3447 if (M == 0 || M == 1)
3448 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3454 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3455 }
else if (M < SrcElts * 2) {
3457 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3469 for (
int M : Mask) {
3471 if (M == 0 || M == 1) {
3472 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3474 if (!
Undef.isValid()) {
3475 Undef = MRI->createGenericVirtualRegister(SrcTy);
3478 Ops.push_back(Undef);
3485 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3487 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3488 {getOrCreateVReg(*
U.getOperand(0)),
3489 getOrCreateVReg(*
U.getOperand(1))})
3490 .addShuffleMask(MaskAlloc);
3497 SmallVector<MachineInstr *, 4> Insts;
3498 for (
auto Reg : getOrCreateVRegs(PI)) {
3499 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3503 PendingPHIs.emplace_back(&PI, std::move(Insts));
3507bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3511 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3513 auto Res = getOrCreateVRegs(
I);
3516 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3517 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3518 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3521 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3522 *MF->getMachineMemOperand(
3523 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3524 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3525 I.getSuccessOrdering(),
I.getFailureOrdering()));
3529bool IRTranslator::translateAtomicRMW(
const User &U,
3535 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3538 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3539 Register Val = getOrCreateVReg(*
I.getValOperand());
3541 unsigned Opcode = 0;
3542 switch (
I.getOperation()) {
3546 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3549 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3552 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3555 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3558 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3561 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3564 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3567 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3570 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3573 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3576 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3579 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3582 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3585 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3588 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3591 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3594 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3597 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3600 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3603 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3606 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3611 Opcode, Res, Addr, Val,
3612 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3613 Flags, MRI->getType(Val), getMemOpAlign(
I),
3614 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3619bool IRTranslator::translateFence(
const User &U,
3627bool IRTranslator::translateFreeze(
const User &U,
3633 "Freeze with different source and destination type?");
3635 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3642void IRTranslator::finishPendingPhis() {
3645 GISelObserverWrapper WrapperObserver(&
Verifier);
3646 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3648 for (
auto &Phi : PendingPHIs) {
3649 const PHINode *PI =
Phi.first;
3653 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3659 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3663 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3667 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3668 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3677void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3683 "Expected inlined-at fields to agree");
3687 if (!V || HasArgList) {
3705 auto *ExprDerefRemoved =
3711 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3723void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3729 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3734 "Expected inlined-at fields to agree");
3739 MF->setVariableDbgInfo(Variable, Expression,
3740 getOrCreateFrameIndex(*AI), DL);
3744 if (translateIfEntryValueArgument(
true,
Address, Variable,
3756void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3761 assert(DLR->getLabel() &&
"Missing label");
3762 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3764 "Expected inlined-at fields to agree");
3773 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3776 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3781bool IRTranslator::translate(
const Instruction &Inst) {
3783 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3784 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3786 if (TLI->fallBackToDAGISel(Inst))
3790#define HANDLE_INST(NUM, OPCODE, CLASS) \
3791 case Instruction::OPCODE: \
3792 return translate##OPCODE(Inst, *CurBuilder.get());
3793#include "llvm/IR/Instruction.def"
3802 if (
auto CurrInstDL = CurBuilder->getDL())
3803 EntryBuilder->setDebugLoc(
DebugLoc());
3809 EntryBuilder->buildConstant(
Reg, *CI);
3813 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3814 EntryBuilder->buildFConstant(
Reg, *CF);
3816 EntryBuilder->buildUndef(
Reg);
3818 EntryBuilder->buildConstant(
Reg, 0);
3820 EntryBuilder->buildGlobalValue(
Reg, GV);
3822 Register Addr = getOrCreateVReg(*CPA->getPointer());
3823 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3824 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3826 Constant &Elt = *CAZ->getElementValue(0u);
3828 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3832 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3834 return translateCopy(
C, Elt, *EntryBuilder);
3836 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3839 if (CV->getNumElements() == 1)
3840 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3842 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3843 Constant &Elt = *CV->getElementAsConstant(i);
3844 Ops.push_back(getOrCreateVReg(Elt));
3846 EntryBuilder->buildBuildVector(
Reg,
Ops);
3848 switch(
CE->getOpcode()) {
3849#define HANDLE_INST(NUM, OPCODE, CLASS) \
3850 case Instruction::OPCODE: \
3851 return translate##OPCODE(*CE, *EntryBuilder.get());
3852#include "llvm/IR/Instruction.def"
3857 if (CV->getNumOperands() == 1)
3858 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3860 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3861 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3863 EntryBuilder->buildBuildVector(
Reg,
Ops);
3865 EntryBuilder->buildBlockAddress(
Reg, BA);
3872bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3874 for (
auto &BTB : SL->BitTestCases) {
3877 emitBitTestHeader(BTB, BTB.Parent);
3879 BranchProbability UnhandledProb = BTB.Prob;
3880 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3881 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3883 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3892 MachineBasicBlock *NextMBB;
3893 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3896 NextMBB = BTB.Cases[
j + 1].TargetBB;
3897 }
else if (j + 1 == ej) {
3899 NextMBB = BTB.Default;
3902 NextMBB = BTB.Cases[
j + 1].ThisBB;
3905 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3907 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3911 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3912 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3915 BTB.Cases.pop_back();
3921 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3922 BTB.Default->getBasicBlock()};
3923 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3924 if (!BTB.ContiguousRange) {
3925 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3928 SL->BitTestCases.clear();
3930 for (
auto &JTCase : SL->JTCases) {
3932 if (!JTCase.first.Emitted)
3933 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3935 emitJumpTable(JTCase.second, JTCase.second.MBB);
3937 SL->JTCases.clear();
3939 for (
auto &SwCase : SL->SwitchCases)
3940 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3941 SL->SwitchCases.clear();
3945 if (
SP.shouldEmitSDCheck(BB)) {
3946 bool FunctionBasedInstrumentation =
3947 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
3948 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3951 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3954 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3955 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3956 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3965 ParentMBB, *MF->getSubtarget().getInstrInfo());
3968 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3972 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3976 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3977 if (FailureMBB->
empty()) {
3978 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3983 SPDescriptor.resetPerBBState();
3990 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3994 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
4000 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
4007 ->buildLoad(PtrMemTy, StackSlotPtr,
4012 if (TLI->useStackGuardXorFP()) {
4013 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
4018 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
4030 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4031 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
4032 ISD::ArgFlagsTy
Flags;
4033 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4035 CallLowering::ArgInfo GuardArgInfo(
4036 {GuardVal, FnTy->getParamType(0), {
Flags}});
4038 CallLowering::CallLoweringInfo
Info;
4039 Info.OrigArgs.push_back(GuardArgInfo);
4040 Info.CallConv = GuardCheckFn->getCallingConv();
4043 if (!CLI->lowerCall(MIRBuilder,
Info)) {
4044 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
4056 getStackGuard(Guard, *CurBuilder);
4059 const Value *IRGuard = TLI->getSDagStackGuard(M);
4060 Register GuardPtr = getOrCreateVReg(*IRGuard);
4063 ->buildLoad(PtrMemTy, GuardPtr,
4082 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4084 const RTLIB::Libcall
Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
4085 const char *
Name = TLI->getLibcallName(Libcall);
4087 CallLowering::CallLoweringInfo
Info;
4088 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
4092 if (!CLI->lowerCall(*CurBuilder,
Info)) {
4093 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4098 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4100 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4105void IRTranslator::finalizeFunction() {
4108 PendingPHIs.clear();
4110 FrameIndices.clear();
4111 MachinePreds.clear();
4115 EntryBuilder.reset();
4118 SPDescriptor.resetPerFunctionState();
4131 return CI && CI->isMustTailCall();
4145 : TPC->isGISelCSEEnabled();
4146 TLI = MF->getSubtarget().getTargetLowering();
4149 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4150 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4151 EntryBuilder->setCSEInfo(CSEInfo);
4152 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4153 CurBuilder->setCSEInfo(CSEInfo);
4155 EntryBuilder = std::make_unique<MachineIRBuilder>();
4156 CurBuilder = std::make_unique<MachineIRBuilder>();
4158 CLI = MF->getSubtarget().getCallLowering();
4159 CurBuilder->setMF(*MF);
4160 EntryBuilder->setMF(*MF);
4161 MRI = &MF->getRegInfo();
4162 DL = &
F.getDataLayout();
4163 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4173 FuncInfo.BPI =
nullptr;
4179 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4181 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4182 SL->init(*TLI, TM, *DL);
4184 assert(PendingPHIs.empty() &&
"stale PHIs");
4188 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4191 F.getSubprogram(), &
F.getEntryBlock());
4192 R <<
"unable to translate in big endian mode";
4198 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
4203 EntryBuilder->setMBB(*EntryBB);
4205 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4206 SwiftError.setFunction(CurMF);
4207 SwiftError.createEntriesInEntryBlock(DbgLoc);
4209 bool IsVarArg =
F.isVarArg();
4210 bool HasMustTailInVarArgFn =
false;
4213 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4217 MBB = MF->CreateMachineBasicBlock(&BB);
4223 if (!HasMustTailInVarArgFn)
4227 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4230 EntryBB->addSuccessor(&getMBB(
F.front()));
4232 if (CLI->fallBackToDAGISel(*MF)) {
4234 F.getSubprogram(), &
F.getEntryBlock());
4235 R <<
"unable to lower function: "
4236 <<
ore::NV(
"Prototype",
F.getFunctionType());
4244 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4249 if (Arg.hasSwiftErrorAttr()) {
4250 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4251 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4255 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4257 F.getSubprogram(), &
F.getEntryBlock());
4258 R <<
"unable to lower arguments: "
4259 <<
ore::NV(
"Prototype",
F.getFunctionType());
4266 if (EnableCSE && CSEInfo)
4271 DILocationVerifier Verifier;
4279 CurBuilder->setMBB(
MBB);
4280 HasTailCall =
false;
4290 Verifier.setCurrentInst(&Inst);
4294 translateDbgInfo(Inst, *CurBuilder);
4296 if (translate(Inst))
4301 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4303 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4304 std::string InstStrStorage;
4308 R <<
": '" << InstStrStorage <<
"'";
4315 if (!finalizeBasicBlock(*BB,
MBB)) {
4317 BB->getTerminator()->getDebugLoc(), BB);
4318 R <<
"unable to translate basic block";
4328 finishPendingPhis();
4330 SwiftError.propagateVRegs();
4335 assert(EntryBB->succ_size() == 1 &&
4336 "Custom BB used for lowering should have only one successor");
4340 "LLVM-IR entry block has a predecessor!?");
4343 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4352 EntryBB->removeSuccessor(&NewEntryBB);
4353 MF->remove(EntryBB);
4354 MF->deleteMachineBasicBlock(EntryBB);
4356 assert(&MF->front() == &NewEntryBB &&
4357 "New entry wasn't next in the list of basic block!");
4361 SP.copyToMachineFrameInfo(MF->getFrameInfo());
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool targetSupportsBF16Type(const MachineFunction *MF)
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Legacy analysis pass which computes BlockFrequencyInfo.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getOne()
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const Target & getTarget() const
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
Target-Independent Code Generator Pass Configuration Options.
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
@ Libcall
The operation should be implemented as a call to some kind of runtime support library.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
std::optional< unsigned > fallbackAddressSpace
MachineMemOperand::Flags flags
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
AtomicOrdering failureOrder