32#include "llvm/IR/IntrinsicsAArch64.h"
37#define GET_TARGET_REGBANK_IMPL
38#include "AArch64GenRegisterBank.inc"
41#include "AArch64GenRegisterBankInfo.def"
50 static auto InitializeRegisterBankOnce = [&]() {
59 assert(&AArch64::GPRRegBank == &RBGPR &&
60 "The order in RegBanks is messed up");
64 assert(&AArch64::FPRRegBank == &RBFPR &&
65 "The order in RegBanks is messed up");
69 assert(&AArch64::CCRegBank == &RBCCR &&
70 "The order in RegBanks is messed up");
75 "Subclass not added?");
77 "GPRs should hold up to 128-bit");
82 "Subclass not added?");
84 "Subclass not added?");
86 "FPRs should hold up to 512-bit via QQQQ sequence");
91 "CCR should hold up to 32-bit");
97 "PartialMappingIdx's are incorrectly ordered");
101 "PartialMappingIdx's are incorrectly ordered");
104#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
107 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
108 #Idx " is incorrectly initialized"); \
122#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
124 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
125 PartialMappingIdx::PMI_First##RBName, Size, \
127 #RBName #Size " " #Offset " is incorrectly initialized"); \
130#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
144#define CHECK_VALUEMAP_3OPS(RBName, Size) \
146 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
147 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
148 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
160#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
162 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
163 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
164 (void)PartialMapDstIdx; \
165 (void)PartialMapSrcIdx; \
166 const ValueMapping *Map = getCopyMapping(AArch64::RBNameDst##RegBankID, \
167 AArch64::RBNameSrc##RegBankID, \
168 TypeSize::getFixed(Size)); \
170 assert(Map[0].BreakDown == \
171 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
172 Map[0].NumBreakDowns == 1 && \
173 #RBNameDst #Size " Dst is incorrectly initialized"); \
174 assert(Map[1].BreakDown == \
175 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
176 Map[1].NumBreakDowns == 1 && \
177 #RBNameSrc #Size " Src is incorrectly initialized"); \
190#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
192 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
193 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
194 (void)PartialMapDstIdx; \
195 (void)PartialMapSrcIdx; \
196 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
198 assert(Map[0].BreakDown == \
199 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
200 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
201 " Dst is incorrectly initialized"); \
202 assert(Map[1].BreakDown == \
203 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
204 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
205 " Src is incorrectly initialized"); \
217 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
231 if (&
A == &AArch64::GPRRegBank && &
B == &AArch64::FPRRegBank)
234 if (&
A == &AArch64::FPRRegBank && &
B == &AArch64::GPRRegBank)
244 switch (RC.
getID()) {
245 case AArch64::GPR64sponlyRegClassID:
260 switch (
MI.getOpcode()) {
261 case TargetOpcode::G_OR: {
270 if (
MI.getNumOperands() != 3)
284 case TargetOpcode::G_BITCAST: {
291 if (
MI.getNumOperands() != 2)
306 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
313 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
324 case TargetOpcode::G_LOAD: {
331 if (
MI.getNumOperands() != 2)
360void AArch64RegisterBankInfo::applyMappingImpl(
365 switch (
MI.getOpcode()) {
366 case TargetOpcode::G_OR:
367 case TargetOpcode::G_BITCAST:
368 case TargetOpcode::G_LOAD:
370 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
371 OpdMapper.getInstrMapping().getID() <= 4) &&
372 "Don't know how to handle that ID");
374 case TargetOpcode::G_INSERT_VECTOR_ELT: {
378 MRI.setRegBank(Ext.getReg(0),
getRegBank(AArch64::GPRRegBankID));
379 MI.getOperand(2).setReg(Ext.getReg(0));
382 case AArch64::G_DUP: {
384 assert(
MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits() < 32 &&
385 "Expected sources smaller than 32-bits");
389 auto ConstMI =
MRI.getVRegDef(
MI.getOperand(1).getReg());
390 if (ConstMI->getOpcode() == TargetOpcode::G_CONSTANT) {
391 auto CstVal = ConstMI->getOperand(1).getCImm()->getValue();
399 MI.getOperand(1).setReg(ConstReg);
408AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
410 const unsigned Opc =
MI.getOpcode();
414 unsigned NumOperands =
MI.getNumOperands();
415 assert(NumOperands <= 3 &&
416 "This code is for instructions with 3 or less operands");
418 LLT Ty =
MRI.getType(
MI.getOperand(0).getReg());
433 for (
unsigned Idx = 1;
Idx != NumOperands; ++
Idx) {
434 LLT OpTy =
MRI.getType(
MI.getOperand(
Idx).getReg());
439 "Operand has incompatible size");
442 assert(IsFPR == OpIsFPR &&
"Operand has incompatible type");
457 case Intrinsic::aarch64_neon_uaddlv:
458 case Intrinsic::aarch64_neon_uaddv:
459 case Intrinsic::aarch64_neon_saddv:
460 case Intrinsic::aarch64_neon_umaxv:
461 case Intrinsic::aarch64_neon_smaxv:
462 case Intrinsic::aarch64_neon_uminv:
463 case Intrinsic::aarch64_neon_sminv:
464 case Intrinsic::aarch64_neon_faddv:
465 case Intrinsic::aarch64_neon_fmaxv:
466 case Intrinsic::aarch64_neon_fminv:
467 case Intrinsic::aarch64_neon_fmaxnmv:
468 case Intrinsic::aarch64_neon_fminnmv:
470 case Intrinsic::aarch64_neon_saddlv: {
471 const LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
478bool AArch64RegisterBankInfo::isPHIWithFPContraints(
481 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
484 return any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
486 if (onlyUsesFP(UseMI, MRI, TRI, Depth + 1))
488 return isPHIWithFPContraints(UseMI, MRI, TRI, Depth + 1);
492bool AArch64RegisterBankInfo::hasFPConstraints(
const MachineInstr &
MI,
495 unsigned Depth)
const {
496 unsigned Op =
MI.getOpcode();
506 if (
Op != TargetOpcode::COPY && !
MI.isPHI() &&
512 if (RB == &AArch64::FPRRegBank)
514 if (RB == &AArch64::GPRRegBank)
521 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
526 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
533 unsigned Depth)
const {
534 switch (
MI.getOpcode()) {
535 case TargetOpcode::G_FPTOSI:
536 case TargetOpcode::G_FPTOUI:
537 case TargetOpcode::G_FPTOSI_SAT:
538 case TargetOpcode::G_FPTOUI_SAT:
539 case TargetOpcode::G_FCMP:
540 case TargetOpcode::G_LROUND:
541 case TargetOpcode::G_LLROUND:
549bool AArch64RegisterBankInfo::onlyDefinesFP(
const MachineInstr &
MI,
552 unsigned Depth)
const {
553 switch (
MI.getOpcode()) {
555 case TargetOpcode::G_SITOFP:
556 case TargetOpcode::G_UITOFP:
557 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
558 case TargetOpcode::G_INSERT_VECTOR_ELT:
559 case TargetOpcode::G_BUILD_VECTOR:
560 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
562 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
564 case Intrinsic::aarch64_neon_ld1x2:
565 case Intrinsic::aarch64_neon_ld1x3:
566 case Intrinsic::aarch64_neon_ld1x4:
567 case Intrinsic::aarch64_neon_ld2:
568 case Intrinsic::aarch64_neon_ld2lane:
569 case Intrinsic::aarch64_neon_ld2r:
570 case Intrinsic::aarch64_neon_ld3:
571 case Intrinsic::aarch64_neon_ld3lane:
572 case Intrinsic::aarch64_neon_ld3r:
573 case Intrinsic::aarch64_neon_ld4:
574 case Intrinsic::aarch64_neon_ld4lane:
575 case Intrinsic::aarch64_neon_ld4r:
587bool AArch64RegisterBankInfo::isLoadFromFPType(
const MachineInstr &
MI)
const {
589 auto *
MemOp = cast<GMemOperation>(&
MI);
590 const Value *LdVal =
MemOp->getMMO().getValue();
594 Type *EltTy =
nullptr;
595 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(LdVal)) {
596 EltTy = GV->getValueType();
599 while (
StructType *StructEltTy = dyn_cast<StructType>(EltTy)) {
600 if (StructEltTy->getNumElements() == 0)
602 EltTy = StructEltTy->getTypeAtIndex(0U);
605 if (isa<ArrayType>(EltTy))
610 for (
const auto *LdUser : LdVal->
users()) {
611 if (isa<LoadInst>(LdUser)) {
612 EltTy = LdUser->getType();
615 if (isa<StoreInst>(LdUser) && LdUser->getOperand(1) == LdVal) {
616 EltTy = LdUser->getOperand(0)->getType();
626 const unsigned Opc =
MI.getOpcode();
631 Opc == TargetOpcode::G_PHI) {
646 case TargetOpcode::G_ADD:
647 case TargetOpcode::G_SUB:
648 case TargetOpcode::G_PTR_ADD:
649 case TargetOpcode::G_MUL:
650 case TargetOpcode::G_SDIV:
651 case TargetOpcode::G_UDIV:
653 case TargetOpcode::G_AND:
654 case TargetOpcode::G_OR:
655 case TargetOpcode::G_XOR:
657 case TargetOpcode::G_FADD:
658 case TargetOpcode::G_FSUB:
659 case TargetOpcode::G_FMUL:
660 case TargetOpcode::G_FDIV:
661 case TargetOpcode::G_FMAXIMUM:
662 case TargetOpcode::G_FMINIMUM:
663 return getSameKindOfOperandsMapping(
MI);
664 case TargetOpcode::G_FPEXT: {
665 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
666 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
673 case TargetOpcode::G_SHL:
674 case TargetOpcode::G_LSHR:
675 case TargetOpcode::G_ASHR: {
676 LLT ShiftAmtTy =
MRI.getType(
MI.getOperand(2).getReg());
677 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
681 return getSameKindOfOperandsMapping(
MI);
683 case TargetOpcode::COPY: {
687 if ((DstReg.
isPhysical() || !
MRI.getType(DstReg).isValid()) ||
697 assert(DstRB && SrcRB &&
"Both RegBank were nullptr");
708 case TargetOpcode::G_BITCAST: {
709 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
710 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
715 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
717 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
722 Opc == TargetOpcode::G_BITCAST ? 2 : 1);
728 unsigned NumOperands =
MI.getNumOperands();
734 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx) {
735 auto &MO =
MI.getOperand(
Idx);
736 if (!MO.isReg() || !MO.getReg())
739 LLT Ty =
MRI.getType(MO.getReg());
760 case AArch64::G_DUP: {
761 Register ScalarReg =
MI.getOperand(1).getReg();
762 LLT ScalarTy =
MRI.getType(ScalarReg);
763 auto ScalarDef =
MRI.getVRegDef(ScalarReg);
765 if (ScalarDef->getOpcode() == TargetOpcode::G_LOAD)
770 onlyDefinesFP(*ScalarDef,
MRI,
TRI)))
782 case TargetOpcode::G_TRUNC: {
783 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
788 case TargetOpcode::G_SITOFP:
789 case TargetOpcode::G_UITOFP: {
790 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
801 case TargetOpcode::G_FPTOSI:
802 case TargetOpcode::G_FPTOUI:
803 case TargetOpcode::G_FPTOSI_SAT:
804 case TargetOpcode::G_FPTOUI_SAT:
805 case TargetOpcode::G_INTRINSIC_LRINT:
806 case TargetOpcode::G_INTRINSIC_LLRINT:
807 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
811 case TargetOpcode::G_FCMP: {
816 OpRegBankIdx = {Idx0,
820 case TargetOpcode::G_BITCAST:
822 if (OpRegBankIdx[0] != OpRegBankIdx[1])
828 case TargetOpcode::G_LOAD: {
840 if (cast<GLoad>(
MI).isAtomic()) {
847 if (isLoadFromFPType(
MI)) {
855 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
866 if (isPHIWithFPContraints(UseMI, MRI, TRI))
869 return onlyUsesFP(UseMI, MRI, TRI) ||
870 onlyDefinesFP(UseMI, MRI, TRI);
875 case TargetOpcode::G_STORE:
887 case TargetOpcode::G_INDEXED_STORE:
898 case TargetOpcode::G_INDEXED_SEXTLOAD:
899 case TargetOpcode::G_INDEXED_ZEXTLOAD:
903 case TargetOpcode::G_INDEXED_LOAD: {
904 if (isLoadFromFPType(
MI))
908 case TargetOpcode::G_SELECT: {
915 LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
932 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
964 case TargetOpcode::G_UNMERGE_VALUES: {
970 LLT SrcTy =
MRI.getType(
MI.getOperand(
MI.getNumOperands()-1).getReg());
974 any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
977 for (
unsigned Idx = 0, NumOperands =
MI.getNumOperands();
983 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
991 case TargetOpcode::G_INSERT_VECTOR_ELT:
1001 LLT Ty =
MRI.getType(
MI.getOperand(2).getReg());
1012 case TargetOpcode::G_EXTRACT: {
1014 auto Src =
MI.getOperand(1).getReg();
1015 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
1018 auto Idx =
MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
1021 OpRegBankIdx[0] =
Idx;
1022 OpRegBankIdx[1] =
Idx;
1025 case TargetOpcode::G_BUILD_VECTOR: {
1041 const LLT SrcTy =
MRI.getType(VReg);
1043 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
1044 TargetOpcode::G_CONSTANT;
1052 unsigned NumOperands =
MI.getNumOperands();
1053 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx)
1058 case TargetOpcode::G_VECREDUCE_FADD:
1059 case TargetOpcode::G_VECREDUCE_FMUL:
1060 case TargetOpcode::G_VECREDUCE_FMAX:
1061 case TargetOpcode::G_VECREDUCE_FMIN:
1062 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1063 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1064 case TargetOpcode::G_VECREDUCE_ADD:
1065 case TargetOpcode::G_VECREDUCE_MUL:
1066 case TargetOpcode::G_VECREDUCE_AND:
1067 case TargetOpcode::G_VECREDUCE_OR:
1068 case TargetOpcode::G_VECREDUCE_XOR:
1069 case TargetOpcode::G_VECREDUCE_SMAX:
1070 case TargetOpcode::G_VECREDUCE_SMIN:
1071 case TargetOpcode::G_VECREDUCE_UMAX:
1072 case TargetOpcode::G_VECREDUCE_UMIN:
1077 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1078 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
1083 case TargetOpcode::G_INTRINSIC:
1084 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1089 for (
const auto &
Op :
MI.defs()) {
1095 Idx +=
MI.getNumExplicitDefs();
1098 for (
const auto &
Op :
MI.explicit_uses()) {
1105 case TargetOpcode::G_LROUND:
1106 case TargetOpcode::G_LLROUND: {
1115 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx) {
1116 if (
MI.getOperand(
Idx).isReg() &&
MI.getOperand(
Idx).getReg()) {
1117 LLT Ty =
MRI.getType(
MI.getOperand(
Idx).getReg());
1122 if (!Mapping->isValid())
1125 OpdsMapping[
Idx] = Mapping;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static unsigned getIntrinsicID(const SDNode *N)
#define CHECK_VALUEMAP(RBName, Size)
static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
static const unsigned CustomMappingID
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
This file declares the targeting of the RegisterBankInfo class for AArch64.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, TypeSize Size)
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, TypeSize Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static const RegisterBankInfo::PartialMapping PartMappings[]
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, TypeSize Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static const RegisterBankInfo::ValueMapping ValMappings[]
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const override
Get a register bank that covers RC.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
This class represents an Operation in the Expression.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
virtual const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const
Get a register bank that covers RC.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
Type * getArrayElementType() const
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM Value Representation.
iterator_range< user_iterator > users()
constexpr ScalarTy getFixedValue() const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
The llvm::once_flag structure.