64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
117 MF.getProperties().setFailedISel();
121 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
122 R << (
" (in function: " + MF.getName() +
")").str();
124 if (TPC.isGlobalISelAbortEnabled())
141 DILocationVerifier() =
default;
142 ~DILocationVerifier()
override =
default;
144 const Instruction *getCurrentInst()
const {
return CurrInst; }
145 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
147 void erasingInstr(MachineInstr &
MI)
override {}
148 void changingInstr(MachineInstr &
MI)
override {}
149 void changedInstr(MachineInstr &
MI)
override {}
151 void createdInstr(MachineInstr &
MI)
override {
152 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
157 <<
" was copied to " <<
MI);
163 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
164 (
MI.isDebugInstr())) &&
165 "Line info was not transferred to all instructions");
187IRTranslator::ValueToVRegInfo::VRegListT &
188IRTranslator::allocateVRegs(
const Value &Val) {
189 auto VRegsIt = VMap.findVRegs(Val);
190 if (VRegsIt != VMap.vregs_end())
191 return *VRegsIt->second;
192 auto *Regs = VMap.getVRegs(Val);
193 auto *Offsets = VMap.getOffsets(Val);
196 Offsets->empty() ? Offsets :
nullptr);
197 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
203 auto VRegsIt = VMap.findVRegs(Val);
204 if (VRegsIt != VMap.vregs_end())
205 return *VRegsIt->second;
208 return *VMap.getVRegs(Val);
211 auto *VRegs = VMap.getVRegs(Val);
212 auto *Offsets = VMap.getOffsets(Val);
216 "Don't know how to create an empty vreg");
220 Offsets->empty() ? Offsets :
nullptr);
223 for (
auto Ty : SplitTys)
224 VRegs->push_back(
MRI->createGenericVirtualRegister(Ty));
232 while (
auto Elt =
C.getAggregateElement(Idx++)) {
233 auto EltRegs = getOrCreateVRegs(*Elt);
237 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
238 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
241 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
242 MF->getFunction().getSubprogram(),
243 &MF->getFunction().getEntryBlock());
244 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
253int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
254 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
256 return MapEntry->second;
263 Size = std::max<uint64_t>(
Size, 1u);
265 int &FI = MapEntry->second;
266 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
272 return SI->getAlign();
274 return LI->getAlign();
280 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
281 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
287 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
288 assert(
MBB &&
"BasicBlock was not encountered before");
293 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
294 MachinePreds[
Edge].push_back(NewPred);
301 return U.getType()->getScalarType()->isBFloatTy() ||
303 return V->getType()->getScalarType()->isBFloatTy();
307bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
316 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
317 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
329bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
334 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
346 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
349bool IRTranslator::translateCompare(
const User &U,
355 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
356 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
361 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
369 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
377 if (Ret && DL->getTypeStoreSize(
Ret->getType()).isZero())
382 VRegs = getOrCreateVRegs(*Ret);
385 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
386 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
387 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
393 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
396void IRTranslator::emitBranchForMergedCondition(
405 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
408 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
411 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
412 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
413 CurBuilder->getDebugLoc(), TProb, FProb);
414 SL->SwitchCases.push_back(CB);
420 SwitchCG::CaseBlock CB(
422 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
423 SL->SwitchCases.push_back(CB);
428 return I->getParent() == BB;
432void IRTranslator::findMergedConditions(
437 using namespace PatternMatch;
438 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
439 "Expected Opc to be AND/OR");
445 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
451 const Value *BOpOp0, *BOpOp1;
465 if (BOpc == Instruction::And)
466 BOpc = Instruction::Or;
467 else if (BOpc == Instruction::Or)
468 BOpc = Instruction::And;
474 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
478 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
485 MachineBasicBlock *TmpBB =
489 if (
Opc == Instruction::Or) {
510 auto NewTrueProb = TProb / 2;
511 auto NewFalseProb = TProb / 2 + FProb;
513 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
514 NewFalseProb, InvertCond);
520 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
521 Probs[1], InvertCond);
523 assert(
Opc == Instruction::And &&
"Unknown merge op!");
543 auto NewTrueProb = TProb + FProb / 2;
544 auto NewFalseProb = FProb / 2;
546 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
547 NewFalseProb, InvertCond);
553 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
554 Probs[1], InvertCond);
558bool IRTranslator::shouldEmitAsBranches(
559 const std::vector<SwitchCG::CaseBlock> &Cases) {
561 if (Cases.size() != 2)
566 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
567 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
568 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
569 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
575 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
576 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
580 Cases[0].TrueBB == Cases[1].ThisBB)
583 Cases[0].FalseBB == Cases[1].ThisBB)
592 auto &CurMBB = MIRBuilder.
getMBB();
598 !CurMBB.isLayoutSuccessor(Succ0MBB))
602 for (
const BasicBlock *Succ :
successors(&BrInst))
603 CurMBB.addSuccessor(&getMBB(*Succ));
610 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
629 using namespace PatternMatch;
631 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
632 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
635 const Value *BOp0, *BOp1;
637 Opcode = Instruction::And;
639 Opcode = Instruction::Or;
643 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
644 getEdgeProbability(&CurMBB, Succ0MBB),
645 getEdgeProbability(&CurMBB, Succ1MBB),
647 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
650 if (shouldEmitAsBranches(SL->SwitchCases)) {
652 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
653 SL->SwitchCases.erase(SL->SwitchCases.begin());
659 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
660 MF->erase(SL->SwitchCases[
I].ThisBB);
662 SL->SwitchCases.clear();
669 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
670 CurBuilder->getDebugLoc());
674 emitSwitchCase(CB, &CurMBB, *CurBuilder);
682 Src->addSuccessorWithoutProb(Dst);
686 Prob = getEdgeProbability(Src, Dst);
687 Src->addSuccessor(Dst, Prob);
693 const BasicBlock *SrcBB = Src->getBasicBlock();
694 const BasicBlock *DstBB = Dst->getBasicBlock();
698 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
699 return BranchProbability(1, SuccSize);
701 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
705 using namespace SwitchCG;
708 BranchProbabilityInfo *BPI = FuncInfo.BPI;
710 Clusters.reserve(
SI.getNumCases());
711 for (
const auto &
I :
SI.cases()) {
712 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
713 assert(Succ &&
"Could not find successor mbb in mapping");
714 const ConstantInt *CaseVal =
I.getCaseValue();
715 BranchProbability Prob =
717 : BranchProbability(1,
SI.getNumCases() + 1);
718 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
721 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
728 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
731 if (Clusters.empty()) {
738 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
739 SL->findBitTestClusters(Clusters, &SI);
742 dbgs() <<
"Case clusters: ";
743 for (
const CaseCluster &
C : Clusters) {
744 if (
C.Kind == CC_JumpTable)
746 if (
C.Kind == CC_BitTests)
749 C.Low->getValue().print(
dbgs(),
true);
750 if (
C.Low !=
C.High) {
752 C.High->getValue().print(
dbgs(),
true);
759 assert(!Clusters.empty());
763 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
764 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
766 while (!WorkList.empty()) {
767 SwitchWorkListItem
W = WorkList.pop_back_val();
769 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
771 if (NumClusters > 3 &&
774 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
778 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
788 using namespace SwitchCG;
789 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
790 "Clusters not sorted?");
791 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
793 auto [LastLeft, FirstRight, LeftProb, RightProb] =
794 SL->computeSplitWorkItemInfo(W);
799 assert(PivotCluster >
W.FirstCluster);
800 assert(PivotCluster <=
W.LastCluster);
805 const ConstantInt *Pivot = PivotCluster->Low;
814 MachineBasicBlock *LeftMBB;
815 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
816 FirstLeft->Low ==
W.GE &&
817 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
818 LeftMBB = FirstLeft->MBB;
820 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
821 FuncInfo.MF->
insert(BBI, LeftMBB);
823 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
829 MachineBasicBlock *RightMBB;
830 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
831 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
832 RightMBB = FirstRight->MBB;
834 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
835 FuncInfo.MF->
insert(BBI, RightMBB);
837 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
845 if (
W.MBB == SwitchMBB)
846 emitSwitchCase(CB, SwitchMBB, MIB);
848 SL->SwitchCases.push_back(CB);
854 assert(
JT.Reg &&
"Should lower JT Header first!");
869 MachineIRBuilder MIB(*HeaderBB->
getParent());
876 Register SwitchOpReg = getOrCreateVReg(SValue);
878 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
883 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
886 JT.Reg =
Sub.getReg(0);
897 auto Cst = getOrCreateVReg(
938 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
952 "Can only handle SLE ranges");
963 const LLT CmpTy = MRI->getType(CmpOpReg);
964 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
999 bool FallthroughUnreachable) {
1000 using namespace SwitchCG;
1001 MachineFunction *CurMF = SwitchMBB->
getParent();
1003 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1004 SwitchCG::JumpTable *
JT = &SL->JTCases[
I->JTCasesIndex].second;
1005 BranchProbability DefaultProb =
W.DefaultProb;
1008 MachineBasicBlock *JumpMBB =
JT->MBB;
1009 CurMF->
insert(BBI, JumpMBB);
1019 auto JumpProb =
I->Prob;
1020 auto FallthroughProb = UnhandledProbs;
1028 if (*SI == DefaultMBB) {
1029 JumpProb += DefaultProb / 2;
1030 FallthroughProb -= DefaultProb / 2;
1035 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1040 if (FallthroughUnreachable)
1041 JTH->FallthroughUnreachable =
true;
1043 if (!JTH->FallthroughUnreachable)
1044 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1045 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1050 JTH->HeaderBB = CurMBB;
1051 JT->Default = Fallthrough;
1054 if (CurMBB == SwitchMBB) {
1055 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1057 JTH->Emitted =
true;
1064 bool FallthroughUnreachable,
1069 using namespace SwitchCG;
1072 if (
I->Low ==
I->High) {
1088 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1091 emitSwitchCase(CB, SwitchMBB, MIB);
1097 MachineIRBuilder &MIB = *CurBuilder;
1101 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1103 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1105 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1110 LLT MaskTy = SwitchOpTy;
1116 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1126 if (SwitchOpTy != MaskTy)
1132 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1134 if (!
B.FallthroughUnreachable)
1135 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1136 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1140 if (!
B.FallthroughUnreachable) {
1144 RangeSub, RangeCst);
1158 MachineIRBuilder &MIB = *CurBuilder;
1164 if (PopCount == 1) {
1167 auto MaskTrailingZeros =
1172 }
else if (PopCount == BB.
Range) {
1174 auto MaskTrailingOnes =
1181 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1185 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1192 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1194 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1212bool IRTranslator::lowerBitTestWorkItem(
1218 bool FallthroughUnreachable) {
1219 using namespace SwitchCG;
1220 MachineFunction *CurMF = SwitchMBB->
getParent();
1222 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1224 for (BitTestCase &BTC : BTB->Cases)
1225 CurMF->
insert(BBI, BTC.ThisBB);
1228 BTB->Parent = CurMBB;
1229 BTB->Default = Fallthrough;
1231 BTB->DefaultProb = UnhandledProbs;
1235 if (!BTB->ContiguousRange) {
1236 BTB->Prob += DefaultProb / 2;
1237 BTB->DefaultProb -= DefaultProb / 2;
1240 if (FallthroughUnreachable)
1241 BTB->FallthroughUnreachable =
true;
1244 if (CurMBB == SwitchMBB) {
1245 emitBitTestHeader(*BTB, SwitchMBB);
1246 BTB->Emitted =
true;
1256 using namespace SwitchCG;
1257 MachineFunction *CurMF = FuncInfo.MF;
1258 MachineBasicBlock *NextMBB =
nullptr;
1260 if (++BBI != FuncInfo.MF->end())
1269 [](
const CaseCluster &a,
const CaseCluster &b) {
1270 return a.Prob != b.Prob
1272 : a.Low->getValue().slt(b.Low->getValue());
1277 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1279 if (
I->Prob >
W.LastCluster->Prob)
1281 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1289 BranchProbability DefaultProb =
W.DefaultProb;
1290 BranchProbability UnhandledProbs = DefaultProb;
1291 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1292 UnhandledProbs +=
I->Prob;
1294 MachineBasicBlock *CurMBB =
W.MBB;
1295 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1296 bool FallthroughUnreachable =
false;
1297 MachineBasicBlock *Fallthrough;
1298 if (
I ==
W.LastCluster) {
1300 Fallthrough = DefaultMBB;
1305 CurMF->
insert(BBI, Fallthrough);
1307 UnhandledProbs -=
I->Prob;
1311 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1312 DefaultProb, UnhandledProbs,
I, Fallthrough,
1313 FallthroughUnreachable)) {
1321 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1322 UnhandledProbs,
I, Fallthrough,
1323 FallthroughUnreachable)) {
1330 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1331 FallthroughUnreachable, UnhandledProbs,
1332 CurMBB, MIB, SwitchMBB)) {
1339 CurMBB = Fallthrough;
1345bool IRTranslator::translateIndirectBr(
const User &U,
1353 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1354 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1355 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1359 if (!AddedSuccessors.
insert(Succ).second)
1369 return Arg->hasSwiftErrorAttr();
1377 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1382 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1387 Type *OffsetIRTy = DL->getIndexType(
Ptr->getType());
1391 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1393 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(),
Ptr);
1399 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1401 if (AA->pointsToConstantMemory(
1409 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1414 Align BaseAlign = getMemOpAlign(LI);
1415 auto MMO = MF->getMachineMemOperand(
1416 Ptr, Flags, MRI->getType(Regs[i]),
1419 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1427 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1431 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1434 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1437 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1438 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1440 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1441 SI.getPointerOperand());
1448 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1452 MachinePointerInfo
Ptr(
SI.getPointerOperand(), Offsets[i] / 8);
1453 Align BaseAlign = getMemOpAlign(SI);
1454 auto MMO = MF->getMachineMemOperand(
1455 Ptr, Flags, MRI->getType(Vals[i]),
1457 SI.getSyncScopeID(),
SI.getOrdering());
1464 const Value *Src = U.getOperand(0);
1473 for (
auto Idx : EVI->indices())
1476 for (
auto Idx : IVI->indices())
1483 DL.getIndexedOffsetInType(Src->getType(), Indices));
1486bool IRTranslator::translateExtractValue(
const User &U,
1488 const Value *Src =
U.getOperand(0);
1491 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1493 auto &DstRegs = allocateVRegs(U);
1495 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1496 DstRegs[i] = SrcRegs[Idx++];
1501bool IRTranslator::translateInsertValue(
const User &U,
1503 const Value *Src =
U.getOperand(0);
1505 auto &DstRegs = allocateVRegs(U);
1506 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1509 auto *InsertedIt = InsertedRegs.
begin();
1511 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1512 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1513 DstRegs[i] = *InsertedIt++;
1515 DstRegs[i] = SrcRegs[i];
1521bool IRTranslator::translateSelect(
const User &U,
1523 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1532 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1533 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1539bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1542 auto &Regs = *VMap.getVRegs(U);
1544 Regs.push_back(Src);
1545 VMap.getOffsets(U)->push_back(0);
1554bool IRTranslator::translateBitCast(
const User &U,
1562 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1564 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1567 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1570bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1585bool IRTranslator::translateGetElementPtr(
const User &U,
1587 Value &Op0 = *
U.getOperand(0);
1591 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1594 uint32_t PtrAddFlags = 0;
1600 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1610 unsigned VectorWidth = 0;
1614 bool WantSplatVector =
false;
1618 WantSplatVector = VectorWidth > 1;
1623 if (WantSplatVector && !PtrTy.
isVector()) {
1630 OffsetIRTy = DL->getIndexType(PtrIRTy);
1637 const Value *Idx = GTI.getOperand();
1638 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1640 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1643 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1648 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1649 Offset += ElementSize * *Val;
1658 PtrAddFlagsWithConst(
Offset))
1663 Register IdxReg = getOrCreateVReg(*Idx);
1664 LLT IdxTy = MRI->getType(IdxReg);
1665 if (IdxTy != OffsetTy) {
1666 if (!IdxTy.
isVector() && WantSplatVector) {
1679 if (ElementSize != 1) {
1690 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1693 GepOffsetReg = IdxReg;
1697 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1706 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1707 PtrAddFlagsWithConst(
Offset));
1711 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1715bool IRTranslator::translateMemFunc(
const CallInst &CI,
1725 unsigned MinPtrSize = UINT_MAX;
1726 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1727 Register SrcReg = getOrCreateVReg(**AI);
1728 LLT SrcTy = MRI->getType(SrcReg);
1730 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1738 if (MRI->getType(SizeOpReg) != SizeTy)
1750 ConstantInt *CopySize =
nullptr;
1753 DstAlign = MCI->getDestAlign().valueOrOne();
1754 SrcAlign = MCI->getSourceAlign().valueOrOne();
1757 DstAlign = MMI->getDestAlign().valueOrOne();
1758 SrcAlign = MMI->getSourceAlign().valueOrOne();
1762 DstAlign = MSI->getDestAlign().valueOrOne();
1765 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1781 if (AA && CopySize &&
1782 AA->pointsToConstantMemory(MemoryLocation(
1792 ICall.addMemOperand(
1793 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1794 StoreFlags, 1, DstAlign, AAInfo));
1795 if (Opcode != TargetOpcode::G_MEMSET)
1796 ICall.addMemOperand(MF->getMachineMemOperand(
1797 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1802bool IRTranslator::translateTrap(
const CallInst &CI,
1805 StringRef TrapFuncName =
1806 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1807 if (TrapFuncName.
empty()) {
1808 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1817 CallLowering::CallLoweringInfo
Info;
1818 if (Opcode == TargetOpcode::G_UBSANTRAP)
1825 return CLI->lowerCall(MIRBuilder,
Info);
1828bool IRTranslator::translateVectorInterleave2Intrinsic(
1831 "This function can only be called on the interleave2 intrinsic!");
1835 Register Res = getOrCreateVReg(CI);
1837 LLT OpTy = MRI->getType(Op0);
1844bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1847 "This function can only be called on the deinterleave2 intrinsic!");
1854 LLT ResTy = MRI->getType(Res[0]);
1863void IRTranslator::getStackGuard(
Register DstReg,
1865 Value *
Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
1868 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1873 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1874 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1876 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1878 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1879 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1881 MachinePointerInfo MPInfo(
Global);
1884 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1885 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1886 MIB.setMemRefs({MemRef});
1889bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1893 Op, {ResRegs[0], ResRegs[1]},
1899bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1901 Register Dst = getOrCreateVReg(CI);
1905 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1913 case Intrinsic::acos:
1914 return TargetOpcode::G_FACOS;
1915 case Intrinsic::asin:
1916 return TargetOpcode::G_FASIN;
1917 case Intrinsic::atan:
1918 return TargetOpcode::G_FATAN;
1919 case Intrinsic::atan2:
1920 return TargetOpcode::G_FATAN2;
1921 case Intrinsic::bswap:
1922 return TargetOpcode::G_BSWAP;
1923 case Intrinsic::bitreverse:
1924 return TargetOpcode::G_BITREVERSE;
1925 case Intrinsic::fshl:
1926 return TargetOpcode::G_FSHL;
1927 case Intrinsic::fshr:
1928 return TargetOpcode::G_FSHR;
1929 case Intrinsic::ceil:
1930 return TargetOpcode::G_FCEIL;
1931 case Intrinsic::cos:
1932 return TargetOpcode::G_FCOS;
1933 case Intrinsic::cosh:
1934 return TargetOpcode::G_FCOSH;
1935 case Intrinsic::ctpop:
1936 return TargetOpcode::G_CTPOP;
1937 case Intrinsic::exp:
1938 return TargetOpcode::G_FEXP;
1939 case Intrinsic::exp2:
1940 return TargetOpcode::G_FEXP2;
1941 case Intrinsic::exp10:
1942 return TargetOpcode::G_FEXP10;
1943 case Intrinsic::fabs:
1944 return TargetOpcode::G_FABS;
1945 case Intrinsic::copysign:
1946 return TargetOpcode::G_FCOPYSIGN;
1947 case Intrinsic::minnum:
1948 return TargetOpcode::G_FMINNUM;
1949 case Intrinsic::maxnum:
1950 return TargetOpcode::G_FMAXNUM;
1951 case Intrinsic::minimum:
1952 return TargetOpcode::G_FMINIMUM;
1953 case Intrinsic::maximum:
1954 return TargetOpcode::G_FMAXIMUM;
1955 case Intrinsic::minimumnum:
1956 return TargetOpcode::G_FMINIMUMNUM;
1957 case Intrinsic::maximumnum:
1958 return TargetOpcode::G_FMAXIMUMNUM;
1959 case Intrinsic::canonicalize:
1960 return TargetOpcode::G_FCANONICALIZE;
1961 case Intrinsic::floor:
1962 return TargetOpcode::G_FFLOOR;
1963 case Intrinsic::fma:
1964 return TargetOpcode::G_FMA;
1965 case Intrinsic::log:
1966 return TargetOpcode::G_FLOG;
1967 case Intrinsic::log2:
1968 return TargetOpcode::G_FLOG2;
1969 case Intrinsic::log10:
1970 return TargetOpcode::G_FLOG10;
1971 case Intrinsic::ldexp:
1972 return TargetOpcode::G_FLDEXP;
1973 case Intrinsic::nearbyint:
1974 return TargetOpcode::G_FNEARBYINT;
1975 case Intrinsic::pow:
1976 return TargetOpcode::G_FPOW;
1977 case Intrinsic::powi:
1978 return TargetOpcode::G_FPOWI;
1979 case Intrinsic::rint:
1980 return TargetOpcode::G_FRINT;
1981 case Intrinsic::round:
1982 return TargetOpcode::G_INTRINSIC_ROUND;
1983 case Intrinsic::roundeven:
1984 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1985 case Intrinsic::sin:
1986 return TargetOpcode::G_FSIN;
1987 case Intrinsic::sinh:
1988 return TargetOpcode::G_FSINH;
1989 case Intrinsic::sqrt:
1990 return TargetOpcode::G_FSQRT;
1991 case Intrinsic::tan:
1992 return TargetOpcode::G_FTAN;
1993 case Intrinsic::tanh:
1994 return TargetOpcode::G_FTANH;
1995 case Intrinsic::trunc:
1996 return TargetOpcode::G_INTRINSIC_TRUNC;
1997 case Intrinsic::readcyclecounter:
1998 return TargetOpcode::G_READCYCLECOUNTER;
1999 case Intrinsic::readsteadycounter:
2000 return TargetOpcode::G_READSTEADYCOUNTER;
2001 case Intrinsic::ptrmask:
2002 return TargetOpcode::G_PTRMASK;
2003 case Intrinsic::lrint:
2004 return TargetOpcode::G_INTRINSIC_LRINT;
2005 case Intrinsic::llrint:
2006 return TargetOpcode::G_INTRINSIC_LLRINT;
2008 case Intrinsic::vector_reduce_fmin:
2009 return TargetOpcode::G_VECREDUCE_FMIN;
2010 case Intrinsic::vector_reduce_fmax:
2011 return TargetOpcode::G_VECREDUCE_FMAX;
2012 case Intrinsic::vector_reduce_fminimum:
2013 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2014 case Intrinsic::vector_reduce_fmaximum:
2015 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2016 case Intrinsic::vector_reduce_add:
2017 return TargetOpcode::G_VECREDUCE_ADD;
2018 case Intrinsic::vector_reduce_mul:
2019 return TargetOpcode::G_VECREDUCE_MUL;
2020 case Intrinsic::vector_reduce_and:
2021 return TargetOpcode::G_VECREDUCE_AND;
2022 case Intrinsic::vector_reduce_or:
2023 return TargetOpcode::G_VECREDUCE_OR;
2024 case Intrinsic::vector_reduce_xor:
2025 return TargetOpcode::G_VECREDUCE_XOR;
2026 case Intrinsic::vector_reduce_smax:
2027 return TargetOpcode::G_VECREDUCE_SMAX;
2028 case Intrinsic::vector_reduce_smin:
2029 return TargetOpcode::G_VECREDUCE_SMIN;
2030 case Intrinsic::vector_reduce_umax:
2031 return TargetOpcode::G_VECREDUCE_UMAX;
2032 case Intrinsic::vector_reduce_umin:
2033 return TargetOpcode::G_VECREDUCE_UMIN;
2034 case Intrinsic::experimental_vector_compress:
2035 return TargetOpcode::G_VECTOR_COMPRESS;
2036 case Intrinsic::lround:
2037 return TargetOpcode::G_LROUND;
2038 case Intrinsic::llround:
2039 return TargetOpcode::G_LLROUND;
2040 case Intrinsic::get_fpenv:
2041 return TargetOpcode::G_GET_FPENV;
2042 case Intrinsic::get_fpmode:
2043 return TargetOpcode::G_GET_FPMODE;
2048bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2052 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2060 for (
const auto &Arg : CI.
args())
2063 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2071 case Intrinsic::experimental_constrained_fadd:
2072 return TargetOpcode::G_STRICT_FADD;
2073 case Intrinsic::experimental_constrained_fsub:
2074 return TargetOpcode::G_STRICT_FSUB;
2075 case Intrinsic::experimental_constrained_fmul:
2076 return TargetOpcode::G_STRICT_FMUL;
2077 case Intrinsic::experimental_constrained_fdiv:
2078 return TargetOpcode::G_STRICT_FDIV;
2079 case Intrinsic::experimental_constrained_frem:
2080 return TargetOpcode::G_STRICT_FREM;
2081 case Intrinsic::experimental_constrained_fma:
2082 return TargetOpcode::G_STRICT_FMA;
2083 case Intrinsic::experimental_constrained_sqrt:
2084 return TargetOpcode::G_STRICT_FSQRT;
2085 case Intrinsic::experimental_constrained_ldexp:
2086 return TargetOpcode::G_STRICT_FLDEXP;
2092bool IRTranslator::translateConstrainedFPIntrinsic(
2112std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2113 auto VRegs = getOrCreateVRegs(Arg);
2114 if (VRegs.
size() != 1)
2115 return std::nullopt;
2118 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2119 if (!VRegDef || !VRegDef->isCopy())
2120 return std::nullopt;
2121 return VRegDef->getOperand(1).getReg().asMCReg();
2124bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2136 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2138 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2139 <<
": expression is entry_value but "
2140 <<
"couldn't find a physical register\n");
2148 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2160 case Intrinsic::experimental_convergence_anchor:
2161 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2162 case Intrinsic::experimental_convergence_entry:
2163 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2164 case Intrinsic::experimental_convergence_loop:
2165 return TargetOpcode::CONVERGENCECTRL_LOOP;
2169bool IRTranslator::translateConvergenceControlIntrinsic(
2172 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2175 if (
ID == Intrinsic::experimental_convergence_loop) {
2177 assert(Bundle &&
"Expected a convergence control token.");
2179 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2189 if (ORE->enabled()) {
2191 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2199 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2205 case Intrinsic::lifetime_start:
2206 case Intrinsic::lifetime_end: {
2209 MF->getFunction().hasOptNone())
2212 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2213 : TargetOpcode::LIFETIME_END;
2222 case Intrinsic::fake_use: {
2224 for (
const auto &Arg : CI.
args())
2226 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2227 MF->setHasFakeUses(
true);
2230 case Intrinsic::dbg_declare: {
2237 case Intrinsic::dbg_label: {
2243 "Expected inlined-at fields to agree");
2248 case Intrinsic::vaend:
2252 case Intrinsic::vastart: {
2254 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2257 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*
Ptr)})
2258 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(
Ptr),
2260 ListSize, Alignment));
2263 case Intrinsic::dbg_assign:
2270 case Intrinsic::dbg_value: {
2277 case Intrinsic::uadd_with_overflow:
2278 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2279 case Intrinsic::sadd_with_overflow:
2280 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2281 case Intrinsic::usub_with_overflow:
2282 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2283 case Intrinsic::ssub_with_overflow:
2284 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2285 case Intrinsic::umul_with_overflow:
2286 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2287 case Intrinsic::smul_with_overflow:
2288 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2289 case Intrinsic::uadd_sat:
2290 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2291 case Intrinsic::sadd_sat:
2292 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2293 case Intrinsic::usub_sat:
2294 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2295 case Intrinsic::ssub_sat:
2296 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2297 case Intrinsic::ushl_sat:
2298 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2299 case Intrinsic::sshl_sat:
2300 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2301 case Intrinsic::umin:
2302 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2303 case Intrinsic::umax:
2304 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2305 case Intrinsic::smin:
2306 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2307 case Intrinsic::smax:
2308 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2309 case Intrinsic::abs:
2311 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2312 case Intrinsic::smul_fix:
2313 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2314 case Intrinsic::umul_fix:
2315 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2316 case Intrinsic::smul_fix_sat:
2317 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2318 case Intrinsic::umul_fix_sat:
2319 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2320 case Intrinsic::sdiv_fix:
2321 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2322 case Intrinsic::udiv_fix:
2323 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2324 case Intrinsic::sdiv_fix_sat:
2325 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2326 case Intrinsic::udiv_fix_sat:
2327 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2328 case Intrinsic::fmuladd: {
2329 const TargetMachine &
TM = MF->getTarget();
2330 Register Dst = getOrCreateVReg(CI);
2335 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2336 TLI->getValueType(*DL, CI.
getType()))) {
2339 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2350 case Intrinsic::convert_from_fp16:
2356 case Intrinsic::convert_to_fp16:
2362 case Intrinsic::frexp: {
2369 case Intrinsic::modf: {
2371 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2376 case Intrinsic::sincos: {
2383 case Intrinsic::fptosi_sat:
2387 case Intrinsic::fptoui_sat:
2391 case Intrinsic::memcpy_inline:
2392 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2393 case Intrinsic::memcpy:
2394 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2395 case Intrinsic::memmove:
2396 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2397 case Intrinsic::memset:
2398 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2399 case Intrinsic::eh_typeid_for: {
2402 unsigned TypeID = MF->getTypeIDFor(GV);
2406 case Intrinsic::objectsize:
2409 case Intrinsic::is_constant:
2412 case Intrinsic::stackguard:
2413 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2415 case Intrinsic::stackprotector: {
2418 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2419 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2420 getStackGuard(GuardVal, MIRBuilder);
2425 int FI = getOrCreateFrameIndex(*Slot);
2426 MF->getFrameInfo().setStackProtectorIndex(FI);
2429 GuardVal, getOrCreateVReg(*Slot),
2436 case Intrinsic::stacksave: {
2437 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2440 case Intrinsic::stackrestore: {
2441 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2445 case Intrinsic::cttz:
2446 case Intrinsic::ctlz: {
2448 bool isTrailing =
ID == Intrinsic::cttz;
2449 unsigned Opcode = isTrailing
2450 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2451 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2452 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2453 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2454 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2458 case Intrinsic::invariant_start: {
2462 case Intrinsic::invariant_end:
2464 case Intrinsic::expect:
2465 case Intrinsic::expect_with_probability:
2466 case Intrinsic::annotation:
2467 case Intrinsic::ptr_annotation:
2468 case Intrinsic::launder_invariant_group:
2469 case Intrinsic::strip_invariant_group: {
2471 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2475 case Intrinsic::assume:
2476 case Intrinsic::experimental_noalias_scope_decl:
2477 case Intrinsic::var_annotation:
2478 case Intrinsic::sideeffect:
2481 case Intrinsic::read_volatile_register:
2482 case Intrinsic::read_register: {
2485 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2489 case Intrinsic::write_register: {
2491 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2496 case Intrinsic::localescape: {
2497 MachineBasicBlock &EntryMBB = MF->front();
2502 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2509 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2522 case Intrinsic::vector_reduce_fadd:
2523 case Intrinsic::vector_reduce_fmul: {
2526 Register Dst = getOrCreateVReg(CI);
2532 Opc =
ID == Intrinsic::vector_reduce_fadd
2533 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2534 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2535 if (!MRI->getType(VecSrc).isVector())
2536 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2537 : TargetOpcode::G_FMUL;
2545 if (
ID == Intrinsic::vector_reduce_fadd) {
2546 Opc = TargetOpcode::G_VECREDUCE_FADD;
2547 ScalarOpc = TargetOpcode::G_FADD;
2549 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2550 ScalarOpc = TargetOpcode::G_FMUL;
2552 LLT DstTy = MRI->getType(Dst);
2555 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2560 case Intrinsic::trap:
2561 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2562 case Intrinsic::debugtrap:
2563 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2564 case Intrinsic::ubsantrap:
2565 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2566 case Intrinsic::allow_runtime_check:
2567 case Intrinsic::allow_ubsan_check:
2568 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2571 case Intrinsic::amdgcn_cs_chain:
2572 case Intrinsic::amdgcn_call_whole_wave:
2573 return translateCallBase(CI, MIRBuilder);
2574 case Intrinsic::fptrunc_round: {
2579 std::optional<RoundingMode> RoundMode =
2584 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2585 {getOrCreateVReg(CI)},
2587 .addImm((
int)*RoundMode);
2591 case Intrinsic::is_fpclass: {
2596 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2597 {getOrCreateVReg(*FpValue)})
2602 case Intrinsic::set_fpenv: {
2607 case Intrinsic::reset_fpenv:
2610 case Intrinsic::set_fpmode: {
2615 case Intrinsic::reset_fpmode:
2618 case Intrinsic::get_rounding:
2621 case Intrinsic::set_rounding:
2624 case Intrinsic::vscale: {
2628 case Intrinsic::scmp:
2629 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2633 case Intrinsic::ucmp:
2634 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2638 case Intrinsic::vector_extract:
2639 return translateExtractVector(CI, MIRBuilder);
2640 case Intrinsic::vector_insert:
2641 return translateInsertVector(CI, MIRBuilder);
2642 case Intrinsic::stepvector: {
2646 case Intrinsic::prefetch: {
2653 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2656 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2662 case Intrinsic::vector_interleave2:
2663 case Intrinsic::vector_deinterleave2: {
2671 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2673 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2676#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2677 case Intrinsic::INTRINSIC:
2678#include "llvm/IR/ConstrainedOps.def"
2681 case Intrinsic::experimental_convergence_anchor:
2682 case Intrinsic::experimental_convergence_entry:
2683 case Intrinsic::experimental_convergence_loop:
2684 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2689bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2694 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2698 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2703 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2706bool IRTranslator::translateCallBase(
const CallBase &CB,
2713 for (
const auto &Arg : CB.
args()) {
2715 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2717 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2718 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2719 &CB, &MIRBuilder.
getMBB(), Arg));
2722 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2725 Args.push_back(getOrCreateVRegs(*Arg));
2729 if (ORE->enabled()) {
2731 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2737 std::optional<CallLowering::PtrAuthInfo> PAI;
2742 const Value *
Key = Bundle->Inputs[0];
2749 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2750 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2752 Register DiscReg = getOrCreateVReg(*Discriminator);
2760 const auto &Token = *Bundle->Inputs[0].get();
2761 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2767 bool Success = CLI->lowerCall(
2768 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2773 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2774 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2790 if (
F && (
F->hasDLLImportStorageClass() ||
2791 (MF->getTarget().getTargetTriple().isOSWindows() &&
2792 F->hasExternalWeakLinkage())))
2804 return translateInlineAsm(CI, MIRBuilder);
2808 if (translateCallBase(CI, MIRBuilder)) {
2817 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2822 ResultRegs = getOrCreateVRegs(CI);
2837 assert(CI->getBitWidth() <= 64 &&
2838 "large intrinsic immediates not handled");
2839 MIB.
addImm(CI->getSExtValue());
2844 auto *MD = MDVal->getMetadata();
2848 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2855 if (VRegs.
size() > 1)
2862 TargetLowering::IntrinsicInfo
Info;
2864 if (TLI->getTgtMemIntrinsic(
Info, CI, *MF,
ID)) {
2866 DL->getABITypeAlign(
Info.memVT.getTypeForEVT(
F->getContext())));
2867 LLT MemTy =
Info.memVT.isSimple()
2869 : LLT::scalar(
Info.memVT.getStoreSizeInBits());
2873 MachinePointerInfo MPI;
2875 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
2876 else if (
Info.fallbackAddressSpace)
2877 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
2885 auto *Token = Bundle->Inputs[0].get();
2886 Register TokenReg = getOrCreateVReg(*Token);
2894bool IRTranslator::findUnwindDestinations(
2916 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2922 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2923 UnwindDests.back().first->setIsEHScopeEntry();
2924 UnwindDests.back().first->setIsEHFuncletEntry();
2929 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2930 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2932 if (IsMSVCCXX || IsCoreCLR)
2933 UnwindDests.back().first->setIsEHFuncletEntry();
2935 UnwindDests.back().first->setIsEHScopeEntry();
2937 NewEHPadBB = CatchSwitch->getUnwindDest();
2942 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2943 if (BPI && NewEHPadBB)
2945 EHPadBB = NewEHPadBB;
2950bool IRTranslator::translateInvoke(
const User &U,
2953 MCContext &
Context = MF->getContext();
2958 const Function *Fn =
I.getCalledFunction();
2965 if (
I.hasDeoptState())
2979 (MF->getTarget().getTargetTriple().isOSWindows() &&
2983 bool LowerInlineAsm =
I.isInlineAsm();
2984 bool NeedEHLabel =
true;
2990 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2991 BeginSymbol =
Context.createTempSymbol();
2995 if (LowerInlineAsm) {
2996 if (!translateInlineAsm(
I, MIRBuilder))
2998 }
else if (!translateCallBase(
I, MIRBuilder))
3003 EndSymbol =
Context.createTempSymbol();
3008 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3009 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3010 BranchProbability EHPadBBProb =
3014 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3017 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3018 &ReturnMBB = getMBB(*ReturnBB);
3020 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3021 for (
auto &UnwindDest : UnwindDests) {
3022 UnwindDest.first->setIsEHPad();
3023 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3028 assert(BeginSymbol &&
"Expected a begin symbol!");
3029 assert(EndSymbol &&
"Expected an end symbol!");
3030 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3033 MIRBuilder.
buildBr(ReturnMBB);
3037bool IRTranslator::translateCallBr(
const User &U,
3043bool IRTranslator::translateLandingPad(
const User &U,
3047 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3053 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3054 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3055 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3067 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3072 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3073 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3074 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3083 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3086 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3092 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3094 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3099 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3100 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3101 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3106bool IRTranslator::translateAlloca(
const User &U,
3114 Register Res = getOrCreateVReg(AI);
3115 int FI = getOrCreateFrameIndex(AI);
3121 if (MF->getTarget().getTargetTriple().isOSWindows())
3126 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3128 if (MRI->getType(NumElts) != IntPtrTy) {
3129 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3136 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3138 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3139 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3144 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3146 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3150 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3152 Align Alignment = std::max(AI.
getAlign(), DL->getPrefTypeAlign(Ty));
3153 if (Alignment <= StackAlign)
3154 Alignment =
Align(1);
3157 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3158 assert(MF->getFrameInfo().hasVarSizedObjects());
3167 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3168 {getOrCreateVReg(*
U.getOperand(0)),
3169 DL->getABITypeAlign(
U.getType()).value()});
3173bool IRTranslator::translateUnreachable(
const User &U,
3176 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3177 MF->getTarget().Options.NoTrapAfterNoreturn))
3184bool IRTranslator::translateInsertElement(
const User &U,
3189 FVT && FVT->getNumElements() == 1)
3190 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3193 Register Val = getOrCreateVReg(*
U.getOperand(0));
3194 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3195 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3198 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3199 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3200 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3201 Idx = getOrCreateVReg(*NewIdxCI);
3205 Idx = getOrCreateVReg(*
U.getOperand(2));
3206 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3207 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3214bool IRTranslator::translateInsertVector(
const User &U,
3217 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3218 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3221 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3226 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3231 ResultType && ResultType->getNumElements() == 1) {
3233 InputType && InputType->getNumElements() == 1) {
3237 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3243 Register Idx = getOrCreateVReg(*CI);
3251 Register Idx = getOrCreateVReg(*CI);
3252 auto ScaledIndex = MIRBuilder.
buildMul(
3253 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3260 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3265bool IRTranslator::translateExtractElement(
const User &U,
3269 if (
const FixedVectorType *FVT =
3271 if (FVT->getNumElements() == 1)
3272 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3275 Register Val = getOrCreateVReg(*
U.getOperand(0));
3276 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3281 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3282 Idx = getOrCreateVReg(*NewIdxCI);
3286 Idx = getOrCreateVReg(*
U.getOperand(1));
3287 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3288 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3295bool IRTranslator::translateExtractVector(
const User &U,
3298 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3300 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3305 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3310 ResultType && ResultType->getNumElements() == 1) {
3312 InputType && InputType->getNumElements() == 1) {
3315 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3321 Register Idx = getOrCreateVReg(*CI);
3329 Register Idx = getOrCreateVReg(*CI);
3330 auto ScaledIndex = MIRBuilder.
buildMul(
3331 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3338 getOrCreateVReg(*
U.getOperand(0)),
3343bool IRTranslator::translateShuffleVector(
const User &U,
3349 if (
U.getOperand(0)->getType()->isScalableTy()) {
3350 Register Val = getOrCreateVReg(*
U.getOperand(0));
3352 MRI->getType(Val).getElementType(), Val, 0);
3359 Mask = SVI->getShuffleMask();
3370 unsigned M =
Mask[0];
3372 if (M == 0 || M == 1)
3373 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3379 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3380 }
else if (M < SrcElts * 2) {
3382 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3394 for (
int M : Mask) {
3396 if (M == 0 || M == 1) {
3397 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3399 if (!
Undef.isValid()) {
3400 Undef = MRI->createGenericVirtualRegister(SrcTy);
3403 Ops.push_back(Undef);
3410 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3412 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3413 {getOrCreateVReg(*
U.getOperand(0)),
3414 getOrCreateVReg(*
U.getOperand(1))})
3415 .addShuffleMask(MaskAlloc);
3422 SmallVector<MachineInstr *, 4> Insts;
3423 for (
auto Reg : getOrCreateVRegs(PI)) {
3424 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3428 PendingPHIs.emplace_back(&PI, std::move(Insts));
3432bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3436 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3438 auto Res = getOrCreateVRegs(
I);
3441 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3442 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3443 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3446 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3447 *MF->getMachineMemOperand(
3448 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3449 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3450 I.getSuccessOrdering(),
I.getFailureOrdering()));
3454bool IRTranslator::translateAtomicRMW(
const User &U,
3460 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3463 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3464 Register Val = getOrCreateVReg(*
I.getValOperand());
3466 unsigned Opcode = 0;
3467 switch (
I.getOperation()) {
3471 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3474 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3477 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3480 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3483 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3486 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3489 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3492 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3495 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3498 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3501 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3504 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3507 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3510 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3513 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3516 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3519 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3522 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3525 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3528 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3531 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3536 Opcode, Res, Addr, Val,
3537 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3538 Flags, MRI->getType(Val), getMemOpAlign(
I),
3539 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3544bool IRTranslator::translateFence(
const User &U,
3552bool IRTranslator::translateFreeze(
const User &U,
3558 "Freeze with different source and destination type?");
3560 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3567void IRTranslator::finishPendingPhis() {
3570 GISelObserverWrapper WrapperObserver(&
Verifier);
3571 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3573 for (
auto &Phi : PendingPHIs) {
3574 const PHINode *PI =
Phi.first;
3578 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3584 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3588 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3592 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3593 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3602void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3608 "Expected inlined-at fields to agree");
3612 if (!V || HasArgList) {
3630 auto *ExprDerefRemoved =
3636 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3648void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3654 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3659 "Expected inlined-at fields to agree");
3664 MF->setVariableDbgInfo(Variable, Expression,
3665 getOrCreateFrameIndex(*AI), DL);
3669 if (translateIfEntryValueArgument(
true,
Address, Variable,
3681void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3686 assert(DLR->getLabel() &&
"Missing label");
3687 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3689 "Expected inlined-at fields to agree");
3698 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3701 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3706bool IRTranslator::translate(
const Instruction &Inst) {
3708 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3709 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3711 if (TLI->fallBackToDAGISel(Inst))
3715#define HANDLE_INST(NUM, OPCODE, CLASS) \
3716 case Instruction::OPCODE: \
3717 return translate##OPCODE(Inst, *CurBuilder.get());
3718#include "llvm/IR/Instruction.def"
3727 if (
auto CurrInstDL = CurBuilder->getDL())
3728 EntryBuilder->setDebugLoc(
DebugLoc());
3734 EntryBuilder->buildConstant(
Reg, *CI);
3738 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3739 EntryBuilder->buildFConstant(
Reg, *CF);
3741 EntryBuilder->buildUndef(
Reg);
3743 EntryBuilder->buildConstant(
Reg, 0);
3745 EntryBuilder->buildGlobalValue(
Reg, GV);
3747 Register Addr = getOrCreateVReg(*CPA->getPointer());
3748 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3749 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3751 Constant &Elt = *CAZ->getElementValue(0u);
3753 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3757 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3759 return translateCopy(
C, Elt, *EntryBuilder);
3761 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3764 if (CV->getNumElements() == 1)
3765 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3767 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3768 Constant &Elt = *CV->getElementAsConstant(i);
3769 Ops.push_back(getOrCreateVReg(Elt));
3771 EntryBuilder->buildBuildVector(
Reg,
Ops);
3773 switch(
CE->getOpcode()) {
3774#define HANDLE_INST(NUM, OPCODE, CLASS) \
3775 case Instruction::OPCODE: \
3776 return translate##OPCODE(*CE, *EntryBuilder.get());
3777#include "llvm/IR/Instruction.def"
3782 if (CV->getNumOperands() == 1)
3783 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3785 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3786 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3788 EntryBuilder->buildBuildVector(
Reg,
Ops);
3790 EntryBuilder->buildBlockAddress(
Reg, BA);
3797bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3799 for (
auto &BTB : SL->BitTestCases) {
3802 emitBitTestHeader(BTB, BTB.Parent);
3804 BranchProbability UnhandledProb = BTB.Prob;
3805 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3806 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3808 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3817 MachineBasicBlock *NextMBB;
3818 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3821 NextMBB = BTB.Cases[
j + 1].TargetBB;
3822 }
else if (j + 1 == ej) {
3824 NextMBB = BTB.Default;
3827 NextMBB = BTB.Cases[
j + 1].ThisBB;
3830 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3832 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3836 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3837 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3840 BTB.Cases.pop_back();
3846 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3847 BTB.Default->getBasicBlock()};
3848 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3849 if (!BTB.ContiguousRange) {
3850 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3853 SL->BitTestCases.clear();
3855 for (
auto &JTCase : SL->JTCases) {
3857 if (!JTCase.first.Emitted)
3858 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3860 emitJumpTable(JTCase.second, JTCase.second.MBB);
3862 SL->JTCases.clear();
3864 for (
auto &SwCase : SL->SwitchCases)
3865 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3866 SL->SwitchCases.clear();
3870 if (
SP.shouldEmitSDCheck(BB)) {
3871 bool FunctionBasedInstrumentation =
3872 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
3873 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3876 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3879 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3880 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3881 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3890 ParentMBB, *MF->getSubtarget().getInstrInfo());
3893 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3897 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3901 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3902 if (FailureMBB->
empty()) {
3903 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3908 SPDescriptor.resetPerBBState();
3915 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3919 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
3925 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3932 ->buildLoad(PtrMemTy, StackSlotPtr,
3937 if (TLI->useStackGuardXorFP()) {
3938 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
3943 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
3955 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3956 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
3957 ISD::ArgFlagsTy
Flags;
3958 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3960 CallLowering::ArgInfo GuardArgInfo(
3961 {GuardVal, FnTy->getParamType(0), {
Flags}});
3963 CallLowering::CallLoweringInfo
Info;
3964 Info.OrigArgs.push_back(GuardArgInfo);
3965 Info.CallConv = GuardCheckFn->getCallingConv();
3968 if (!CLI->lowerCall(MIRBuilder,
Info)) {
3969 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
3981 getStackGuard(Guard, *CurBuilder);
3984 const Value *IRGuard = TLI->getSDagStackGuard(M);
3985 Register GuardPtr = getOrCreateVReg(*IRGuard);
3988 ->buildLoad(PtrMemTy, GuardPtr,
4007 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4009 const RTLIB::Libcall
Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
4010 const char *
Name = TLI->getLibcallName(Libcall);
4012 CallLowering::CallLoweringInfo
Info;
4013 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
4017 if (!CLI->lowerCall(*CurBuilder,
Info)) {
4018 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4023 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4025 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4030void IRTranslator::finalizeFunction() {
4033 PendingPHIs.clear();
4035 FrameIndices.clear();
4036 MachinePreds.clear();
4040 EntryBuilder.reset();
4043 SPDescriptor.resetPerFunctionState();
4056 return CI && CI->isMustTailCall();
4070 : TPC->isGISelCSEEnabled();
4071 TLI = MF->getSubtarget().getTargetLowering();
4074 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4075 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4076 EntryBuilder->setCSEInfo(CSEInfo);
4077 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4078 CurBuilder->setCSEInfo(CSEInfo);
4080 EntryBuilder = std::make_unique<MachineIRBuilder>();
4081 CurBuilder = std::make_unique<MachineIRBuilder>();
4083 CLI = MF->getSubtarget().getCallLowering();
4084 CurBuilder->setMF(*MF);
4085 EntryBuilder->setMF(*MF);
4086 MRI = &MF->getRegInfo();
4087 DL = &
F.getDataLayout();
4088 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4090 TM.resetTargetOptions(
F);
4098 FuncInfo.BPI =
nullptr;
4104 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4106 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4107 SL->init(*TLI, TM, *DL);
4109 assert(PendingPHIs.empty() &&
"stale PHIs");
4113 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4116 F.getSubprogram(), &
F.getEntryBlock());
4117 R <<
"unable to translate in big endian mode";
4123 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
4128 EntryBuilder->setMBB(*EntryBB);
4130 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4131 SwiftError.setFunction(CurMF);
4132 SwiftError.createEntriesInEntryBlock(DbgLoc);
4134 bool IsVarArg =
F.isVarArg();
4135 bool HasMustTailInVarArgFn =
false;
4138 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4142 MBB = MF->CreateMachineBasicBlock(&BB);
4148 if (!HasMustTailInVarArgFn)
4152 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4155 EntryBB->addSuccessor(&getMBB(
F.front()));
4157 if (CLI->fallBackToDAGISel(*MF)) {
4159 F.getSubprogram(), &
F.getEntryBlock());
4160 R <<
"unable to lower function: "
4161 <<
ore::NV(
"Prototype",
F.getFunctionType());
4169 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4174 if (Arg.hasSwiftErrorAttr()) {
4175 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4176 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4180 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4182 F.getSubprogram(), &
F.getEntryBlock());
4183 R <<
"unable to lower arguments: "
4184 <<
ore::NV(
"Prototype",
F.getFunctionType());
4191 if (EnableCSE && CSEInfo)
4196 DILocationVerifier Verifier;
4204 CurBuilder->setMBB(
MBB);
4205 HasTailCall =
false;
4215 Verifier.setCurrentInst(&Inst);
4219 translateDbgInfo(Inst, *CurBuilder);
4221 if (translate(Inst))
4226 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4228 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4229 std::string InstStrStorage;
4233 R <<
": '" << InstStrStorage <<
"'";
4240 if (!finalizeBasicBlock(*BB,
MBB)) {
4242 BB->getTerminator()->getDebugLoc(), BB);
4243 R <<
"unable to translate basic block";
4253 finishPendingPhis();
4255 SwiftError.propagateVRegs();
4260 assert(EntryBB->succ_size() == 1 &&
4261 "Custom BB used for lowering should have only one successor");
4265 "LLVM-IR entry block has a predecessor!?");
4268 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4277 EntryBB->removeSuccessor(&NewEntryBB);
4278 MF->remove(EntryBB);
4279 MF->deleteMachineBasicBlock(EntryBB);
4281 assert(&MF->front() == &NewEntryBB &&
4282 "New entry wasn't next in the list of basic block!");
4286 SP.copyToMachineFrameInfo(MF->getFrameInfo());
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Legacy analysis pass which computes BlockFrequencyInfo.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Primary interface to the complete machine description for the target machine.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
@ Libcall
The operation should be implemented as a call to some kind of runtime support library.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB