33#include "llvm/IR/IntrinsicsAArch64.h"
38#define GET_TARGET_REGBANK_IMPL
39#include "AArch64GenRegisterBank.inc"
42#include "AArch64GenRegisterBankInfo.def"
51 static auto InitializeRegisterBankOnce = [&]() {
60 assert(&AArch64::GPRRegBank == &RBGPR &&
61 "The order in RegBanks is messed up");
65 assert(&AArch64::FPRRegBank == &RBFPR &&
66 "The order in RegBanks is messed up");
70 assert(&AArch64::CCRegBank == &RBCCR &&
71 "The order in RegBanks is messed up");
76 "Subclass not added?");
78 "GPRs should hold up to 128-bit");
83 "Subclass not added?");
85 "Subclass not added?");
87 "FPRs should hold up to 512-bit via QQQQ sequence");
92 "CCR should hold up to 32-bit");
98 "PartialMappingIdx's are incorrectly ordered");
102 "PartialMappingIdx's are incorrectly ordered");
105#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
108 checkPartialMap(PartialMappingIdx::Idx, ValStartIdx, ValLength, RB) && \
109 #Idx " is incorrectly initialized"); \
123#define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
125 assert(checkValueMapImpl(PartialMappingIdx::PMI_##RBName##Size, \
126 PartialMappingIdx::PMI_First##RBName, Size, \
128 #RBName #Size " " #Offset " is incorrectly initialized"); \
131#define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
145#define CHECK_VALUEMAP_3OPS(RBName, Size) \
147 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
148 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
149 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
161#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
163 unsigned PartialMapDstIdx = PMI_##RBNameDst##Size - PMI_Min; \
164 unsigned PartialMapSrcIdx = PMI_##RBNameSrc##Size - PMI_Min; \
165 (void)PartialMapDstIdx; \
166 (void)PartialMapSrcIdx; \
167 const ValueMapping *Map = getCopyMapping(AArch64::RBNameDst##RegBankID, \
168 AArch64::RBNameSrc##RegBankID, \
169 TypeSize::getFixed(Size)); \
171 assert(Map[0].BreakDown == \
172 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
173 Map[0].NumBreakDowns == 1 && \
174 #RBNameDst #Size " Dst is incorrectly initialized"); \
175 assert(Map[1].BreakDown == \
176 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
177 Map[1].NumBreakDowns == 1 && \
178 #RBNameSrc #Size " Src is incorrectly initialized"); \
191#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize) \
193 unsigned PartialMapDstIdx = PMI_FPR##DstSize - PMI_Min; \
194 unsigned PartialMapSrcIdx = PMI_FPR##SrcSize - PMI_Min; \
195 (void)PartialMapDstIdx; \
196 (void)PartialMapSrcIdx; \
197 const ValueMapping *Map = getFPExtMapping(DstSize, SrcSize); \
199 assert(Map[0].BreakDown == \
200 &AArch64GenRegisterBankInfo::PartMappings[PartialMapDstIdx] && \
201 Map[0].NumBreakDowns == 1 && "FPR" #DstSize \
202 " Dst is incorrectly initialized"); \
203 assert(Map[1].BreakDown == \
204 &AArch64GenRegisterBankInfo::PartMappings[PartialMapSrcIdx] && \
205 Map[1].NumBreakDowns == 1 && "FPR" #SrcSize \
206 " Src is incorrectly initialized"); \
218 llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce);
232 if (&
A == &AArch64::GPRRegBank && &
B == &AArch64::FPRRegBank)
235 if (&
A == &AArch64::FPRRegBank && &
B == &AArch64::GPRRegBank)
245 switch (RC.
getID()) {
246 case AArch64::GPR64sponlyRegClassID:
261 switch (
MI.getOpcode()) {
262 case TargetOpcode::G_OR: {
271 if (
MI.getNumOperands() != 3)
285 case TargetOpcode::G_BITCAST: {
292 if (
MI.getNumOperands() != 2)
307 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
314 copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
325 case TargetOpcode::G_LOAD: {
332 if (
MI.getNumOperands() != 2)
361void AArch64RegisterBankInfo::applyMappingImpl(
366 switch (
MI.getOpcode()) {
367 case TargetOpcode::G_OR:
368 case TargetOpcode::G_BITCAST:
369 case TargetOpcode::G_LOAD:
371 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
372 OpdMapper.getInstrMapping().getID() <= 4) &&
373 "Don't know how to handle that ID");
375 case TargetOpcode::G_INSERT_VECTOR_ELT: {
379 MRI.setRegBank(Ext.getReg(0),
getRegBank(AArch64::GPRRegBankID));
380 MI.getOperand(2).setReg(Ext.getReg(0));
383 case AArch64::G_DUP: {
385 assert(
MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits() < 32 &&
386 "Expected sources smaller than 32-bits");
390 auto ConstMI =
MRI.getVRegDef(
MI.getOperand(1).getReg());
391 if (ConstMI->getOpcode() == TargetOpcode::G_CONSTANT) {
392 auto CstVal = ConstMI->getOperand(1).getCImm()->getValue();
400 MI.getOperand(1).setReg(ConstReg);
409AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
411 const unsigned Opc =
MI.getOpcode();
415 unsigned NumOperands =
MI.getNumOperands();
416 assert(NumOperands <= 3 &&
417 "This code is for instructions with 3 or less operands");
419 LLT Ty =
MRI.getType(
MI.getOperand(0).getReg());
434 for (
unsigned Idx = 1;
Idx != NumOperands; ++
Idx) {
435 LLT OpTy =
MRI.getType(
MI.getOperand(
Idx).getReg());
440 "Operand has incompatible size");
443 assert(IsFPR == OpIsFPR &&
"Operand has incompatible type");
458 case Intrinsic::aarch64_neon_uaddlv:
459 case Intrinsic::aarch64_neon_uaddv:
460 case Intrinsic::aarch64_neon_saddv:
461 case Intrinsic::aarch64_neon_umaxv:
462 case Intrinsic::aarch64_neon_smaxv:
463 case Intrinsic::aarch64_neon_uminv:
464 case Intrinsic::aarch64_neon_sminv:
465 case Intrinsic::aarch64_neon_faddv:
466 case Intrinsic::aarch64_neon_fmaxv:
467 case Intrinsic::aarch64_neon_fminv:
468 case Intrinsic::aarch64_neon_fmaxnmv:
469 case Intrinsic::aarch64_neon_fminnmv:
470 case Intrinsic::aarch64_neon_fmulx:
471 case Intrinsic::aarch64_neon_frecpe:
472 case Intrinsic::aarch64_neon_frecps:
473 case Intrinsic::aarch64_neon_frecpx:
474 case Intrinsic::aarch64_neon_frsqrte:
475 case Intrinsic::aarch64_neon_frsqrts:
476 case Intrinsic::aarch64_neon_facge:
477 case Intrinsic::aarch64_neon_facgt:
478 case Intrinsic::aarch64_neon_fabd:
479 case Intrinsic::aarch64_sisd_fabd:
480 case Intrinsic::aarch64_neon_sqrdmlah:
481 case Intrinsic::aarch64_neon_sqrdmlsh:
482 case Intrinsic::aarch64_neon_sqrdmulh:
483 case Intrinsic::aarch64_neon_sqadd:
484 case Intrinsic::aarch64_neon_sqsub:
486 case Intrinsic::aarch64_neon_saddlv: {
487 const LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
494bool AArch64RegisterBankInfo::isPHIWithFPConstraints(
497 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
500 return any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
502 if (onlyUsesFP(UseMI, MRI, TRI, Depth + 1))
504 return isPHIWithFPConstraints(UseMI, MRI, TRI, Depth + 1);
508bool AArch64RegisterBankInfo::hasFPConstraints(
const MachineInstr &
MI,
511 unsigned Depth)
const {
512 unsigned Op =
MI.getOpcode();
522 if (
Op != TargetOpcode::COPY && !
MI.isPHI() &&
528 if (RB == &AArch64::FPRRegBank)
530 if (RB == &AArch64::GPRRegBank)
537 if (!
MI.isPHI() ||
Depth > MaxFPRSearchDepth)
542 onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
549 unsigned Depth)
const {
550 switch (
MI.getOpcode()) {
551 case TargetOpcode::G_FPTOSI:
552 case TargetOpcode::G_FPTOUI:
553 case TargetOpcode::G_FPTOSI_SAT:
554 case TargetOpcode::G_FPTOUI_SAT:
555 case TargetOpcode::G_FCMP:
556 case TargetOpcode::G_LROUND:
557 case TargetOpcode::G_LLROUND:
559 case TargetOpcode::G_INTRINSIC:
561 case Intrinsic::aarch64_neon_fcvtas:
562 case Intrinsic::aarch64_neon_fcvtau:
563 case Intrinsic::aarch64_neon_fcvtzs:
564 case Intrinsic::aarch64_neon_fcvtzu:
565 case Intrinsic::aarch64_neon_fcvtms:
566 case Intrinsic::aarch64_neon_fcvtmu:
567 case Intrinsic::aarch64_neon_fcvtns:
568 case Intrinsic::aarch64_neon_fcvtnu:
569 case Intrinsic::aarch64_neon_fcvtps:
570 case Intrinsic::aarch64_neon_fcvtpu:
584bool AArch64RegisterBankInfo::onlyDefinesFP(
const MachineInstr &
MI,
587 unsigned Depth)
const {
588 switch (
MI.getOpcode()) {
590 case TargetOpcode::G_SITOFP:
591 case TargetOpcode::G_UITOFP:
592 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
593 case TargetOpcode::G_INSERT_VECTOR_ELT:
594 case TargetOpcode::G_BUILD_VECTOR:
595 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
597 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
599 case Intrinsic::aarch64_neon_ld1x2:
600 case Intrinsic::aarch64_neon_ld1x3:
601 case Intrinsic::aarch64_neon_ld1x4:
602 case Intrinsic::aarch64_neon_ld2:
603 case Intrinsic::aarch64_neon_ld2lane:
604 case Intrinsic::aarch64_neon_ld2r:
605 case Intrinsic::aarch64_neon_ld3:
606 case Intrinsic::aarch64_neon_ld3lane:
607 case Intrinsic::aarch64_neon_ld3r:
608 case Intrinsic::aarch64_neon_ld4:
609 case Intrinsic::aarch64_neon_ld4lane:
610 case Intrinsic::aarch64_neon_ld4r:
625 unsigned Depth)
const {
626 switch (
MI.getOpcode()) {
627 case TargetOpcode::G_SITOFP:
628 case TargetOpcode::G_UITOFP:
629 return MRI.getType(
MI.getOperand(0).getReg()).getSizeInBits() ==
630 MRI.getType(
MI.getOperand(1).getReg()).getSizeInBits();
635bool AArch64RegisterBankInfo::isLoadFromFPType(
const MachineInstr &
MI)
const {
637 auto *
MemOp = cast<GMemOperation>(&
MI);
638 const Value *LdVal =
MemOp->getMMO().getValue();
642 Type *EltTy =
nullptr;
643 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(LdVal)) {
644 EltTy = GV->getValueType();
647 while (
StructType *StructEltTy = dyn_cast<StructType>(EltTy)) {
648 if (StructEltTy->getNumElements() == 0)
650 EltTy = StructEltTy->getTypeAtIndex(0U);
653 if (isa<ArrayType>(EltTy))
655 }
else if (!isa<Constant>(LdVal)) {
658 for (
const auto *LdUser : LdVal->
users()) {
659 if (isa<LoadInst>(LdUser)) {
660 EltTy = LdUser->getType();
663 if (isa<StoreInst>(LdUser) && LdUser->getOperand(1) == LdVal) {
664 EltTy = LdUser->getOperand(0)->getType();
674 const unsigned Opc =
MI.getOpcode();
679 Opc == TargetOpcode::G_PHI) {
694 case TargetOpcode::G_ADD:
695 case TargetOpcode::G_SUB:
696 case TargetOpcode::G_PTR_ADD:
697 case TargetOpcode::G_MUL:
698 case TargetOpcode::G_SDIV:
699 case TargetOpcode::G_UDIV:
701 case TargetOpcode::G_AND:
702 case TargetOpcode::G_OR:
703 case TargetOpcode::G_XOR:
705 case TargetOpcode::G_FADD:
706 case TargetOpcode::G_FSUB:
707 case TargetOpcode::G_FMUL:
708 case TargetOpcode::G_FDIV:
709 case TargetOpcode::G_FMAXIMUM:
710 case TargetOpcode::G_FMINIMUM:
711 return getSameKindOfOperandsMapping(
MI);
712 case TargetOpcode::G_FPEXT: {
713 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
714 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
721 case TargetOpcode::G_SHL:
722 case TargetOpcode::G_LSHR:
723 case TargetOpcode::G_ASHR: {
724 LLT ShiftAmtTy =
MRI.getType(
MI.getOperand(2).getReg());
725 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
729 return getSameKindOfOperandsMapping(
MI);
731 case TargetOpcode::COPY: {
735 if ((DstReg.
isPhysical() || !
MRI.getType(DstReg).isValid()) ||
745 assert(DstRB && SrcRB &&
"Both RegBank were nullptr");
756 case TargetOpcode::G_BITCAST: {
757 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
758 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
763 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
765 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
770 Opc == TargetOpcode::G_BITCAST ? 2 : 1);
776 unsigned NumOperands =
MI.getNumOperands();
782 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx) {
783 auto &MO =
MI.getOperand(
Idx);
784 if (!MO.isReg() || !MO.getReg())
787 LLT Ty =
MRI.getType(MO.getReg());
808 case AArch64::G_DUP: {
809 Register ScalarReg =
MI.getOperand(1).getReg();
810 LLT ScalarTy =
MRI.getType(ScalarReg);
811 auto ScalarDef =
MRI.getVRegDef(ScalarReg);
813 if (ScalarDef->getOpcode() == TargetOpcode::G_LOAD)
818 onlyDefinesFP(*ScalarDef,
MRI,
TRI)))
830 case TargetOpcode::G_TRUNC: {
831 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
836 case TargetOpcode::G_SITOFP:
837 case TargetOpcode::G_UITOFP: {
838 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
844 MRI.getType(SrcReg).getSizeInBits() ==
845 MRI.getType(
MI.getOperand(0).getReg()).getSizeInBits())
851 case TargetOpcode::G_FPTOSI_SAT:
852 case TargetOpcode::G_FPTOUI_SAT: {
853 LLT DstType =
MRI.getType(
MI.getOperand(0).getReg());
863 case TargetOpcode::G_FPTOSI:
864 case TargetOpcode::G_FPTOUI:
865 case TargetOpcode::G_INTRINSIC_LRINT:
866 case TargetOpcode::G_INTRINSIC_LLRINT:
867 if (
MRI.getType(
MI.getOperand(0).getReg()).isVector())
871 case TargetOpcode::G_FCMP: {
876 OpRegBankIdx = {Idx0,
880 case TargetOpcode::G_BITCAST:
882 if (OpRegBankIdx[0] != OpRegBankIdx[1])
888 case TargetOpcode::G_LOAD: {
900 if (cast<GLoad>(
MI).isAtomic()) {
907 if (isLoadFromFPType(
MI)) {
915 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
926 if (isPHIWithFPConstraints(UseMI, MRI, TRI))
929 return onlyUsesFP(UseMI, MRI, TRI) ||
930 prefersFPUse(UseMI, MRI, TRI);
935 case TargetOpcode::G_STORE:
947 case TargetOpcode::G_INDEXED_STORE:
958 case TargetOpcode::G_INDEXED_SEXTLOAD:
959 case TargetOpcode::G_INDEXED_ZEXTLOAD:
963 case TargetOpcode::G_INDEXED_LOAD: {
964 if (isLoadFromFPType(
MI))
968 case TargetOpcode::G_SELECT: {
975 LLT SrcTy =
MRI.getType(
MI.getOperand(2).getReg());
992 if (
any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
1009 for (
unsigned Idx = 2;
Idx < 4; ++
Idx) {
1024 case TargetOpcode::G_UNMERGE_VALUES: {
1030 LLT SrcTy =
MRI.getType(
MI.getOperand(
MI.getNumOperands()-1).getReg());
1034 any_of(
MRI.use_nodbg_instructions(
MI.getOperand(0).getReg()),
1037 for (
unsigned Idx = 0, NumOperands =
MI.getNumOperands();
1038 Idx < NumOperands; ++
Idx)
1043 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1051 case TargetOpcode::G_INSERT_VECTOR_ELT:
1061 LLT Ty =
MRI.getType(
MI.getOperand(2).getReg());
1072 case TargetOpcode::G_EXTRACT: {
1074 auto Src =
MI.getOperand(1).getReg();
1075 LLT SrcTy =
MRI.getType(
MI.getOperand(1).getReg());
1078 auto Idx =
MRI.getRegClassOrNull(Src) == &AArch64::XSeqPairsClassRegClass
1081 OpRegBankIdx[0] =
Idx;
1082 OpRegBankIdx[1] =
Idx;
1085 case TargetOpcode::G_BUILD_VECTOR: {
1101 const LLT SrcTy =
MRI.getType(VReg);
1103 return Op.isDef() || MRI.getVRegDef(Op.getReg())->getOpcode() ==
1104 TargetOpcode::G_CONSTANT;
1112 unsigned NumOperands =
MI.getNumOperands();
1113 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx)
1118 case TargetOpcode::G_VECREDUCE_FADD:
1119 case TargetOpcode::G_VECREDUCE_FMUL:
1120 case TargetOpcode::G_VECREDUCE_FMAX:
1121 case TargetOpcode::G_VECREDUCE_FMIN:
1122 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1123 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1124 case TargetOpcode::G_VECREDUCE_ADD:
1125 case TargetOpcode::G_VECREDUCE_MUL:
1126 case TargetOpcode::G_VECREDUCE_AND:
1127 case TargetOpcode::G_VECREDUCE_OR:
1128 case TargetOpcode::G_VECREDUCE_XOR:
1129 case TargetOpcode::G_VECREDUCE_SMAX:
1130 case TargetOpcode::G_VECREDUCE_SMIN:
1131 case TargetOpcode::G_VECREDUCE_UMAX:
1132 case TargetOpcode::G_VECREDUCE_UMIN:
1137 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1138 case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
1143 case TargetOpcode::G_INTRINSIC:
1144 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1146 case Intrinsic::aarch64_neon_vcvtfxs2fp:
1147 case Intrinsic::aarch64_neon_vcvtfxu2fp:
1148 case Intrinsic::aarch64_neon_vcvtfp2fxs:
1149 case Intrinsic::aarch64_neon_vcvtfp2fxu:
1163 for (
const auto &
Op :
MI.defs()) {
1169 Idx +=
MI.getNumExplicitDefs();
1172 for (
const auto &
Op :
MI.explicit_uses()) {
1182 case TargetOpcode::G_LROUND:
1183 case TargetOpcode::G_LLROUND: {
1192 for (
unsigned Idx = 0;
Idx < NumOperands; ++
Idx) {
1193 if (
MI.getOperand(
Idx).isReg() &&
MI.getOperand(
Idx).getReg()) {
1194 LLT Ty =
MRI.getType(
MI.getOperand(
Idx).getReg());
1199 if (!Mapping->isValid())
1202 OpdsMapping[
Idx] = Mapping;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define CHECK_VALUEMAP(RBName, Size)
static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI)
#define CHECK_VALUEMAP_3OPS(RBName, Size)
static const unsigned CustomMappingID
#define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB)
#define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size)
#define CHECK_VALUEMAP_FPEXT(DstSize, SrcSize)
This file declares the targeting of the RegisterBankInfo class for AArch64.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
This file defines the SmallVector class.
static unsigned getRegBankBaseIdxOffset(unsigned RBIdx, TypeSize Size)
static const RegisterBankInfo::ValueMapping * getCopyMapping(unsigned DstBankID, unsigned SrcBankID, TypeSize Size)
Get the pointer to the ValueMapping of the operands of a copy instruction from the SrcBankID register...
static bool checkPartialMappingIdx(PartialMappingIdx FirstAlias, PartialMappingIdx LastAlias, ArrayRef< PartialMappingIdx > Order)
static const RegisterBankInfo::PartialMapping PartMappings[]
static const RegisterBankInfo::ValueMapping * getFPExtMapping(unsigned DstSize, unsigned SrcSize)
Get the instruction mapping for G_FPEXT.
static const RegisterBankInfo::ValueMapping * getValueMapping(PartialMappingIdx RBIdx, TypeSize Size)
Get the pointer to the ValueMapping representing the RegisterBank at RBIdx with a size of Size.
static const RegisterBankInfo::ValueMapping ValMappings[]
InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const override
Get the alternative mappings for MI.
unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const override
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const override
Get a register bank that covers RC.
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
const InstructionMapping & getInstrMapping(const MachineInstr &MI) const override
Get the mapping of the different operands of MI on the register bank.
const AArch64RegisterInfo * getRegisterInfo() const override
This class represents an Operation in the Expression.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT float16()
Get a 16-bit IEEE half value.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Helper class that represents how the value of an instruction may be mapped and what is the related co...
bool isValid() const
Check whether this object is valid.
virtual InstructionMappings getInstrAlternativeMappings(const MachineInstr &MI) const
Get the alternative mappings for MI.
const InstructionMapping & getInstructionMapping(unsigned ID, unsigned Cost, const ValueMapping *OperandsMapping, unsigned NumOperands) const
Method to get a uniquely generated InstructionMapping.
static void applyDefaultMapping(const OperandsMapper &OpdMapper)
Helper method to apply something that is like the default mapping.
const InstructionMapping & getInvalidInstructionMapping() const
Method to get a uniquely generated invalid InstructionMapping.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getMaximumSize(unsigned RegBankID) const
Get the maximum size in bits that fits in the given register bank.
TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
virtual const RegisterBank & getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const
Get a register bank that covers RC.
const ValueMapping * getOperandsMapping(Iterator Begin, Iterator End) const
Get the uniquely generated array of ValueMapping for the elements of between Begin and End.
static const unsigned DefaultMappingID
Identifier used when the related instruction mapping instance is generated by target independent code...
virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B, TypeSize Size) const
Get the cost of a copy from B to A, or put differently, get the cost of A = COPY B.
const InstructionMapping & getInstrMappingImpl(const MachineInstr &MI) const
Try to get the mapping of MI.
This class implements the register bank concept.
LLVM_ABI bool covers(const TargetRegisterClass &RC) const
Check whether this register bank covers RC.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
Type * getArrayElementType() const
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
LLVM Value Representation.
iterator_range< user_iterator > users()
constexpr ScalarTy getFixedValue() const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
LLVM_ABI bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
The llvm::once_flag structure.