89 enum BaseKind { RegBase, FrameIndexBase };
92 BaseKind Kind = RegBase;
104 void setKind(BaseKind K) { Kind =
K; }
105 BaseKind getKind()
const {
return Kind; }
106 bool isRegBase()
const {
return Kind == RegBase; }
107 bool isFIBase()
const {
return Kind == FrameIndexBase; }
110 assert(isRegBase() &&
"Invalid base register access!");
115 assert(isRegBase() &&
"Invalid base register access!");
120 assert(isFIBase() &&
"Invalid base frame index access!");
125 assert(isFIBase() &&
"Invalid base frame index access!");
129 void setOffset(
int O) { Offset =
O; }
133class ARMFastISel final :
public FastISel {
136 const ARMSubtarget *Subtarget;
138 const ARMBaseInstrInfo &TII;
139 const ARMTargetLowering &TLI;
140 const ARMBaseTargetMachine &TM;
141 ARMFunctionInfo *AFI;
145 LLVMContext *Context;
148 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
149 const TargetLibraryInfo *libInfo,
150 const LibcallLoweringInfo *libcallLowering)
151 : FastISel(funcInfo, libInfo, libcallLowering),
152 Subtarget(&funcInfo.MF->getSubtarget<ARMSubtarget>()),
154 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()),
156 AFI = funcInfo.
MF->
getInfo<ARMFunctionInfo>();
157 isThumb2 = AFI->isThumbFunction();
164 Register fastEmitInst_r(
unsigned MachineInstOpcode,
165 const TargetRegisterClass *RC,
Register Op0);
166 Register fastEmitInst_rr(
unsigned MachineInstOpcode,
167 const TargetRegisterClass *RC,
Register Op0,
169 Register fastEmitInst_ri(
unsigned MachineInstOpcode,
170 const TargetRegisterClass *RC,
Register Op0,
172 Register fastEmitInst_i(
unsigned MachineInstOpcode,
173 const TargetRegisterClass *RC, uint64_t Imm);
177 bool fastSelectInstruction(
const Instruction *
I)
override;
178 Register fastMaterializeConstant(
const Constant *
C)
override;
179 Register fastMaterializeAlloca(
const AllocaInst *AI)
override;
180 bool tryToFoldLoadIntoMI(MachineInstr *
MI,
unsigned OpNo,
181 const LoadInst *LI)
override;
182 bool fastLowerArguments()
override;
184#include "ARMGenFastISel.inc"
188 bool SelectLoad(
const Instruction *
I);
189 bool SelectStore(
const Instruction *
I);
190 bool SelectBranch(
const Instruction *
I);
191 bool SelectIndirectBr(
const Instruction *
I);
192 bool SelectCmp(
const Instruction *
I);
193 bool SelectFPExt(
const Instruction *
I);
194 bool SelectFPTrunc(
const Instruction *
I);
195 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
196 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
197 bool SelectIToFP(
const Instruction *
I,
bool isSigned);
198 bool SelectFPToI(
const Instruction *
I,
bool isSigned);
199 bool SelectDiv(
const Instruction *
I,
bool isSigned);
200 bool SelectRem(
const Instruction *
I,
bool isSigned);
201 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
202 bool SelectIntrinsicCall(
const IntrinsicInst &
I);
203 bool SelectSelect(
const Instruction *
I);
204 bool SelectRet(
const Instruction *
I);
205 bool SelectTrunc(
const Instruction *
I);
206 bool SelectIntExt(
const Instruction *
I);
211 bool isPositionIndependent()
const;
212 bool isTypeLegal(
Type *Ty, MVT &VT);
213 bool isLoadTypeLegal(
Type *Ty, MVT &VT);
214 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
217 MaybeAlign Alignment = std::nullopt,
bool isZExt =
true,
218 bool allocReg =
true);
220 MaybeAlign Alignment = std::nullopt);
221 bool ARMComputeAddress(
const Value *Obj,
Address &Addr);
222 void ARMSimplifyAddress(
Address &Addr, MVT VT,
bool useAM3);
223 bool ARMIsMemCpySmall(uint64_t Len);
224 bool ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
225 MaybeAlign Alignment);
226 Register ARMEmitIntExt(MVT SrcVT,
Register SrcReg, MVT DestVT,
bool isZExt);
227 Register ARMMaterializeFP(
const ConstantFP *CFP, MVT VT);
228 Register ARMMaterializeInt(
const Constant *
C, MVT VT);
229 Register ARMMaterializeGV(
const GlobalValue *GV, MVT VT);
232 unsigned ARMSelectCallOp(
bool UseReg);
233 Register ARMLowerPICELF(
const GlobalValue *GV, MVT VT);
235 const TargetLowering *getTargetLowering() {
return &TLI; }
239 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
242 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
243 SmallVectorImpl<Register> &ArgRegs,
244 SmallVectorImpl<MVT> &ArgVTs,
245 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
246 SmallVectorImpl<Register> &RegArgs,
250 Register getLibcallReg(
const Twine &Name);
251 bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
252 const Instruction *
I, CallingConv::ID CC,
253 unsigned &NumBytes,
bool isVarArg);
254 bool ARMEmitLibcall(
const Instruction *
I, RTLIB::Libcall
Call);
258 bool isARMNEONPred(
const MachineInstr *
MI);
259 bool DefinesOptionalPredicate(MachineInstr *
MI,
bool *CPSR);
260 const MachineInstrBuilder &AddOptionalDefs(
const MachineInstrBuilder &MIB);
261 void AddLoadStoreOperands(MVT VT,
Address &Addr,
262 const MachineInstrBuilder &MIB,
271bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
272 if (!
MI->hasOptionalDef())
276 for (
const MachineOperand &MO :
MI->operands()) {
277 if (!MO.isReg() || !MO.isDef())
continue;
278 if (MO.getReg() == ARM::CPSR)
284bool ARMFastISel::isARMNEONPred(
const MachineInstr *
MI) {
285 const MCInstrDesc &MCID =
MI->getDesc();
290 return MI->isPredicable();
292 for (
const MCOperandInfo &opInfo : MCID.
operands())
293 if (opInfo.isPredicate())
304const MachineInstrBuilder &
305ARMFastISel::AddOptionalDefs(
const MachineInstrBuilder &MIB) {
306 MachineInstr *
MI = &*MIB;
311 if (isARMNEONPred(
MI))
317 if (DefinesOptionalPredicate(
MI, &CPSR))
322Register ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
323 const TargetRegisterClass *RC,
325 Register ResultReg = createResultReg(RC);
326 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
331 if (
II.getNumDefs() >= 1) {
332 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
335 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
337 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
338 TII.get(TargetOpcode::COPY), ResultReg)
344Register ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
345 const TargetRegisterClass *RC,
347 Register ResultReg = createResultReg(RC);
348 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
355 if (
II.getNumDefs() >= 1) {
357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
361 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
364 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
365 TII.get(TargetOpcode::COPY), ResultReg)
371Register ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
372 const TargetRegisterClass *RC,
374 Register ResultReg = createResultReg(RC);
375 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
380 if (
II.getNumDefs() >= 1) {
382 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
386 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
389 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
390 TII.get(TargetOpcode::COPY), ResultReg)
396Register ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
397 const TargetRegisterClass *RC,
399 Register ResultReg = createResultReg(RC);
400 const MCInstrDesc &
II =
TII.get(MachineInstOpcode);
402 if (
II.getNumDefs() >= 1) {
403 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
406 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
408 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
409 TII.get(TargetOpcode::COPY), ResultReg)
422 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
423 TII.get(ARM::VMOVSR), MoveReg)
433 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
434 TII.get(ARM::VMOVRS), MoveReg)
442Register ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP, MVT VT) {
443 if (VT != MVT::f32 && VT != MVT::f64)
447 bool is64bit = VT == MVT::f64;
462 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
468 if (!Subtarget->hasVFP2Base())
return false;
472 unsigned Idx = MCP.getConstantPoolIndex(
cast<Constant>(CFP), Alignment);
474 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), DestReg)
484Register ARMFastISel::ARMMaterializeInt(
const Constant *
C, MVT VT) {
485 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
492 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
493 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
495 Register ImmReg = createResultReg(RC);
496 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
503 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->
isNegative()) {
508 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
509 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
511 Register ImmReg = createResultReg(RC);
512 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
520 if (Subtarget->useMovt())
531 Align Alignment =
DL.getPrefTypeAlign(
C->getType());
532 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
535 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
536 TII.get(ARM::t2LDRpci), ResultReg)
541 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
542 TII.get(ARM::LDRcp), ResultReg)
549bool ARMFastISel::isPositionIndependent()
const {
553Register ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV, MVT VT) {
559 if (Subtarget->isROPI() || Subtarget->isRWPI())
562 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
563 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
565 Register DestReg = createResultReg(RC);
570 if (!Subtarget->isTargetMachO() && IsThreadLocal)
573 bool IsPositionIndependent = isPositionIndependent();
576 if (Subtarget->useMovt() &&
577 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
579 unsigned char TF = 0;
580 if (Subtarget->isTargetMachO())
583 if (IsPositionIndependent)
584 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
586 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
587 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
593 if (Subtarget->isTargetELF() && IsPositionIndependent)
594 return ARMLowerPICELF(GV, VT);
597 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
602 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
605 MachineInstrBuilder MIB;
607 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
608 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc),
610 if (IsPositionIndependent)
612 AddOptionalDefs(MIB);
616 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
617 TII.get(ARM::LDRcp), DestReg)
620 AddOptionalDefs(MIB);
622 if (IsPositionIndependent) {
623 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
626 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
627 MIMD,
TII.get(
Opc), NewDestReg)
630 AddOptionalDefs(MIB);
636 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
637 (Subtarget->isTargetMachO() && IsIndirect)) {
638 MachineInstrBuilder MIB;
641 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
642 TII.get(ARM::t2LDRi12), NewDestReg)
646 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
647 TII.get(ARM::LDRi12), NewDestReg)
650 DestReg = NewDestReg;
651 AddOptionalDefs(MIB);
657Register ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
666 return ARMMaterializeFP(CFP, VT);
668 return ARMMaterializeGV(GV, VT);
670 return ARMMaterializeInt(
C, VT);
677Register ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
679 if (!FuncInfo.StaticAllocaMap.count(AI))
683 if (!isLoadTypeLegal(AI->
getType(), VT))
686 DenseMap<const AllocaInst*, int>::iterator
SI =
687 FuncInfo.StaticAllocaMap.find(AI);
691 if (SI != FuncInfo.StaticAllocaMap.end()) {
692 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
694 Register ResultReg = createResultReg(RC);
697 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
707bool ARMFastISel::isTypeLegal(
Type *Ty, MVT &VT) {
711 if (evt == MVT::Other || !evt.
isSimple())
return false;
719bool ARMFastISel::isLoadTypeLegal(
Type *Ty, MVT &VT) {
720 if (isTypeLegal(Ty, VT))
return true;
724 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
731bool ARMFastISel::ARMComputeAddress(
const Value *Obj,
Address &Addr) {
733 const User *
U =
nullptr;
734 unsigned Opcode = Instruction::UserOp1;
738 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
739 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
740 Opcode =
I->getOpcode();
744 Opcode =
C->getOpcode();
749 if (Ty->getAddressSpace() > 255)
757 case Instruction::BitCast:
759 return ARMComputeAddress(
U->getOperand(0), Addr);
760 case Instruction::IntToPtr:
764 return ARMComputeAddress(
U->getOperand(0), Addr);
766 case Instruction::PtrToInt:
769 return ARMComputeAddress(
U->getOperand(0), Addr);
771 case Instruction::GetElementPtr: {
773 int TmpOffset = Addr.getOffset();
779 i != e; ++i, ++GTI) {
782 const StructLayout *SL =
DL.getStructLayout(STy);
793 if (canFoldAddIntoGEP(U,
Op)) {
803 goto unsupported_gep;
809 Addr.setOffset(TmpOffset);
810 if (ARMComputeAddress(
U->getOperand(0), Addr))
return true;
818 case Instruction::Alloca: {
820 DenseMap<const AllocaInst*, int>::iterator
SI =
821 FuncInfo.StaticAllocaMap.find(AI);
822 if (SI != FuncInfo.StaticAllocaMap.end()) {
823 Addr.setKind(Address::FrameIndexBase);
824 Addr.setFI(
SI->second);
833 Addr.setReg(getRegForValue(Obj));
834 return Addr.getReg();
837void ARMFastISel::ARMSimplifyAddress(
Address &Addr, MVT VT,
bool useAM3) {
838 bool needsLowering =
false;
847 needsLowering = ((Addr.getOffset() & 0xfff) != Addr.getOffset());
849 if (needsLowering && isThumb2)
850 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.getOffset() < 0 &&
851 Addr.getOffset() > -256);
854 needsLowering = (Addr.getOffset() > 255 || Addr.getOffset() < -255);
860 needsLowering = ((Addr.getOffset() & 0xff) != Addr.getOffset());
867 if (needsLowering && Addr.isFIBase()) {
868 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
870 Register ResultReg = createResultReg(RC);
871 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
873 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), ResultReg)
876 Addr.setKind(Address::RegBase);
877 Addr.setReg(ResultReg);
883 Addr.setReg(fastEmit_ri_(MVT::i32,
ISD::ADD, Addr.getReg(),
884 Addr.getOffset(), MVT::i32));
889void ARMFastISel::AddLoadStoreOperands(MVT VT,
Address &Addr,
890 const MachineInstrBuilder &MIB,
896 Addr.setOffset(Addr.getOffset() / 4);
899 if (Addr.isFIBase()) {
900 int FI = Addr.getFI();
901 int Offset = Addr.getOffset();
902 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
904 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
911 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())
916 MIB.
addImm(Addr.getOffset());
921 MIB.
addReg(Addr.getReg());
926 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())
931 MIB.
addImm(Addr.getOffset());
934 AddOptionalDefs(MIB);
937bool ARMFastISel::ARMEmitLoad(MVT VT,
Register &ResultReg,
Address &Addr,
938 MaybeAlign Alignment,
bool isZExt,
942 bool needVMOV =
false;
943 const TargetRegisterClass *RC;
946 default:
return false;
950 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
951 Subtarget->hasV6T2Ops())
952 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
954 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
963 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
966 if (Alignment && *Alignment <
Align(2) &&
967 !Subtarget->allowsUnalignedMem())
971 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
972 Subtarget->hasV6T2Ops())
973 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
975 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
977 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
980 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
983 if (Alignment && *Alignment <
Align(4) &&
984 !Subtarget->allowsUnalignedMem())
988 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
989 Subtarget->hasV6T2Ops())
996 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
999 if (!Subtarget->hasVFP2Base())
return false;
1001 if (Alignment && *Alignment <
Align(4)) {
1004 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1005 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
1013 if (!Subtarget->hasVFP2Base())
return false;
1016 if (Alignment && *Alignment <
Align(4))
1024 ARMSimplifyAddress(Addr, VT, useAM3);
1028 ResultReg = createResultReg(RC);
1029 assert(ResultReg.
isVirtual() &&
"Expected an allocated virtual register.");
1030 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1031 TII.get(
Opc), ResultReg);
1038 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1039 TII.get(ARM::VMOVSR), MoveReg)
1046bool ARMFastISel::SelectLoad(
const Instruction *
I) {
1051 const Value *SV =
I->getOperand(0);
1056 if (Arg->hasSwiftErrorAttr())
1061 if (Alloca->isSwiftError())
1068 if (!isLoadTypeLegal(
I->getType(), VT))
1073 if (!ARMComputeAddress(
I->getOperand(0), Addr))
return false;
1078 updateValueMap(
I, ResultReg);
1083 MaybeAlign Alignment) {
1085 bool useAM3 =
false;
1088 default:
return false;
1090 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1091 : &ARM::GPRRegClass);
1092 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1094 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1102 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1103 Subtarget->hasV6T2Ops())
1104 StrOpc = ARM::t2STRBi8;
1106 StrOpc = ARM::t2STRBi12;
1108 StrOpc = ARM::STRBi12;
1112 if (Alignment && *Alignment <
Align(2) &&
1113 !Subtarget->allowsUnalignedMem())
1117 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1118 Subtarget->hasV6T2Ops())
1119 StrOpc = ARM::t2STRHi8;
1121 StrOpc = ARM::t2STRHi12;
1128 if (Alignment && *Alignment <
Align(4) &&
1129 !Subtarget->allowsUnalignedMem())
1133 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&
1134 Subtarget->hasV6T2Ops())
1135 StrOpc = ARM::t2STRi8;
1137 StrOpc = ARM::t2STRi12;
1139 StrOpc = ARM::STRi12;
1143 if (!Subtarget->hasVFP2Base())
return false;
1145 if (Alignment && *Alignment <
Align(4)) {
1147 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1148 TII.get(ARM::VMOVRS), MoveReg)
1152 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1154 StrOpc = ARM::VSTRS;
1159 if (!Subtarget->hasVFP2Base())
return false;
1162 if (Alignment && *Alignment <
Align(4))
1165 StrOpc = ARM::VSTRD;
1169 ARMSimplifyAddress(Addr, VT, useAM3);
1173 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1180bool ARMFastISel::SelectStore(
const Instruction *
I) {
1181 Value *Op0 =
I->getOperand(0);
1188 const Value *PtrV =
I->getOperand(1);
1193 if (Arg->hasSwiftErrorAttr())
1198 if (Alloca->isSwiftError())
1205 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1209 SrcReg = getRegForValue(Op0);
1215 if (!ARMComputeAddress(
I->getOperand(1), Addr))
1270bool ARMFastISel::SelectBranch(
const Instruction *
I) {
1273 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->
getSuccessor(1));
1280 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1284 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1298 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1299 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1306 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1307 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1308 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1309 Register OpReg = getRegForValue(TI->getOperand(0));
1311 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1316 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1321 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1328 }
else if (
const ConstantInt *CI =
1332 fastEmitBranch(Target, MIMD.getDL());
1347 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1350 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1355 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1360 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1361 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1367bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1368 Register AddrReg = getRegForValue(
I->getOperand(0));
1372 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1373 assert(isThumb2 || Subtarget->hasV4TOps());
1375 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1379 for (
const BasicBlock *SuccBB :
IB->successors())
1380 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));
1385bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1389 if (!SrcEVT.
isSimple())
return false;
1392 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1395 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1401 bool UseImm =
false;
1402 bool isNegativeImm =
false;
1406 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1408 const APInt &CIVal = ConstInt->getValue();
1413 if (Imm < 0 && Imm != (
int)0x80000000) {
1414 isNegativeImm =
true;
1421 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1422 if (ConstFP->isZero() && !ConstFP->isNegative())
1428 bool needsExt =
false;
1430 default:
return false;
1434 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1438 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1448 CmpOpc = ARM::t2CMPrr;
1450 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1453 CmpOpc = ARM::CMPrr;
1455 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1460 Register SrcReg1 = getRegForValue(Src1Value);
1466 SrcReg2 = getRegForValue(Src2Value);
1473 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1477 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1483 const MCInstrDesc &
II =
TII.get(CmpOpc);
1487 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1490 MachineInstrBuilder MIB;
1491 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1497 AddOptionalDefs(MIB);
1503 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1504 TII.get(ARM::FMSTAT)));
1508bool ARMFastISel::SelectCmp(
const Instruction *
I) {
1523 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1524 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
1525 : &ARM::GPRRegClass;
1526 Register DestReg = createResultReg(RC);
1528 Register ZeroReg = fastMaterializeConstant(Zero);
1530 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc), DestReg)
1534 updateValueMap(
I, DestReg);
1538bool ARMFastISel::SelectFPExt(
const Instruction *
I) {
1540 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1543 if (!
I->getType()->isDoubleTy() ||
1544 !
V->getType()->isFloatTy())
return false;
1551 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1552 TII.get(ARM::VCVTDS), Result)
1554 updateValueMap(
I, Result);
1558bool ARMFastISel::SelectFPTrunc(
const Instruction *
I) {
1560 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1563 if (!(
I->getType()->isFloatTy() &&
1564 V->getType()->isDoubleTy()))
return false;
1571 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1572 TII.get(ARM::VCVTSD), Result)
1574 updateValueMap(
I, Result);
1578bool ARMFastISel::SelectIToFP(
const Instruction *
I,
bool isSigned) {
1580 if (!Subtarget->hasVFP2Base())
return false;
1583 Type *Ty =
I->getType();
1584 if (!isTypeLegal(Ty, DstVT))
1587 Value *Src =
I->getOperand(0);
1592 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1595 Register SrcReg = getRegForValue(Src);
1600 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1601 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1609 Register FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1614 if (Ty->
isFloatTy())
Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1615 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1616 Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1620 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1622 updateValueMap(
I, ResultReg);
1626bool ARMFastISel::SelectFPToI(
const Instruction *
I,
bool isSigned) {
1628 if (!Subtarget->hasVFP2Base())
return false;
1631 Type *RetTy =
I->getType();
1632 if (!isTypeLegal(RetTy, DstVT))
1640 Type *OpTy =
I->getOperand(0)->getType();
1641 if (OpTy->
isFloatTy())
Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1642 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1643 Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1648 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1653 Register IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1657 updateValueMap(
I, IntReg);
1661bool ARMFastISel::SelectSelect(
const Instruction *
I) {
1663 if (!isTypeLegal(
I->getType(), VT))
1667 if (VT != MVT::i32)
return false;
1669 Register CondReg = getRegForValue(
I->getOperand(0));
1672 Register Op1Reg = getRegForValue(
I->getOperand(1));
1678 bool UseImm =
false;
1679 bool isNegativeImm =
false;
1681 assert(VT == MVT::i32 &&
"Expecting an i32.");
1682 Imm = (int)ConstInt->getValue().getZExtValue();
1684 isNegativeImm =
true;
1693 Op2Reg = getRegForValue(
I->getOperand(2));
1698 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1701 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1706 const TargetRegisterClass *RC;
1708 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1709 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1711 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1713 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1715 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1717 Register ResultReg = createResultReg(RC);
1721 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1729 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1736 updateValueMap(
I, ResultReg);
1740bool ARMFastISel::SelectDiv(
const Instruction *
I,
bool isSigned) {
1742 Type *Ty =
I->getType();
1743 if (!isTypeLegal(Ty, VT))
1749 if (Subtarget->hasDivideInThumbMode())
1753 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1755 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1756 else if (VT == MVT::i16)
1757 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1758 else if (VT == MVT::i32)
1759 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1760 else if (VT == MVT::i64)
1761 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1762 else if (VT == MVT::i128)
1763 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1764 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1766 return ARMEmitLibcall(
I, LC);
1769bool ARMFastISel::SelectRem(
const Instruction *
I,
bool isSigned) {
1771 Type *Ty =
I->getType();
1772 if (!isTypeLegal(Ty, VT))
1782 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
1784 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1785 else if (VT == MVT::i16)
1786 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1787 else if (VT == MVT::i32)
1788 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1789 else if (VT == MVT::i64)
1790 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1791 else if (VT == MVT::i128)
1792 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1793 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1795 return ARMEmitLibcall(
I, LC);
1798bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1803 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1807 switch (ISDOpcode) {
1808 default:
return false;
1810 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1813 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1816 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1820 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1826 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1830 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1833 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1836 updateValueMap(
I, ResultReg);
1840bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1842 if (!FPVT.
isSimple())
return false;
1853 Type *Ty =
I->getType();
1854 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1856 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1860 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1861 switch (ISDOpcode) {
1862 default:
return false;
1864 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1867 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1870 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1873 Register Op1 = getRegForValue(
I->getOperand(0));
1877 Register Op2 = getRegForValue(
I->getOperand(1));
1882 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1885 updateValueMap(
I, ResultReg);
1893CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
1899 case CallingConv::Fast:
1900 if (Subtarget->hasVFP2Base() && !isVarArg) {
1907 case CallingConv::C:
1908 case CallingConv::CXX_FAST_TLS:
1911 if (Subtarget->hasFPRegs() &&
1919 case CallingConv::ARM_AAPCS_VFP:
1920 case CallingConv::Swift:
1921 case CallingConv::SwiftTail:
1927 case CallingConv::ARM_AAPCS:
1929 case CallingConv::ARM_APCS:
1931 case CallingConv::GHC:
1936 case CallingConv::CFGuard_Check:
1941bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
1942 SmallVectorImpl<Register> &ArgRegs,
1943 SmallVectorImpl<MVT> &ArgVTs,
1944 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1945 SmallVectorImpl<Register> &RegArgs,
1951 for (
Value *Arg : Args)
1953 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *
Context);
1954 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, OrigTys,
1955 CCAssignFnForCall(CC,
false, isVarArg));
1959 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1960 CCValAssign &VA = ArgLocs[i];
1974 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1986 if (!Subtarget->hasVFP2Base())
1990 if (!Subtarget->hasVFP2Base())
2000 NumBytes = CCInfo.getStackSize();
2003 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
2004 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2005 TII.get(AdjStackDown))
2009 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2010 CCValAssign &VA = ArgLocs[i];
2016 "We don't handle NEON/vector parameters yet.");
2023 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
2024 assert(Arg &&
"Failed to emit a sext");
2032 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
2033 assert(Arg &&
"Failed to emit a zext");
2039 assert(BC &&
"Failed to emit a bitcast!");
2049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2055 "Custom lowering for v2f64 args not available");
2058 CCValAssign &NextVA = ArgLocs[++i];
2061 "We only handle register args!");
2063 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2078 Addr.setKind(Address::RegBase);
2079 Addr.setReg(ARM::SP);
2082 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2083 assert(EmitRet &&
"Could not emit a store for argument!");
2090bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
2091 const Instruction *
I, CallingConv::ID CC,
2092 unsigned &NumBytes,
bool isVarArg) {
2094 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2095 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2096 TII.get(AdjStackUp))
2100 if (RetVT != MVT::isVoid) {
2102 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *
Context);
2103 CCInfo.AnalyzeCallResult(RetVT,
I->getType(),
2104 CCAssignFnForCall(CC,
true, isVarArg));
2107 if (RVLocs.
size() == 2 && RetVT == MVT::f64) {
2110 MVT DestVT = RVLocs[0].getValVT();
2112 Register ResultReg = createResultReg(DstRC);
2113 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2114 TII.get(ARM::VMOVDRR), ResultReg)
2115 .
addReg(RVLocs[0].getLocReg())
2116 .
addReg(RVLocs[1].getLocReg()));
2118 UsedRegs.
push_back(RVLocs[0].getLocReg());
2119 UsedRegs.
push_back(RVLocs[1].getLocReg());
2122 updateValueMap(
I, ResultReg);
2124 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2125 MVT CopyVT = RVLocs[0].getValVT();
2128 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2133 Register ResultReg = createResultReg(DstRC);
2134 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2135 TII.get(TargetOpcode::COPY),
2136 ResultReg).
addReg(RVLocs[0].getLocReg());
2137 UsedRegs.
push_back(RVLocs[0].getLocReg());
2140 updateValueMap(
I, ResultReg);
2147bool ARMFastISel::SelectRet(
const Instruction *
I) {
2149 const Function &
F = *
I->getParent()->getParent();
2150 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2152 if (!FuncInfo.CanLowerReturn)
2156 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2165 CallingConv::ID CC =
F.getCallingConv();
2172 CCState CCInfo(CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2173 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC,
true ,
2182 if (ValLocs.
size() != 1)
2185 CCValAssign &VA = ValLocs[0];
2196 if (!RVEVT.
isSimple())
return false;
2200 if (RVVT != DestVT) {
2201 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2204 assert(DestVT == MVT::i32 &&
"ARM should always ext to i32");
2208 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
2209 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].
Flags.isZExt());
2217 const TargetRegisterClass* SrcRC =
MRI.getRegClass(SrcReg);
2221 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2222 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2231 RetOpc = ARM::tBXNS_RET;
2235 RetOpc = Subtarget->getReturnOpcode();
2237 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2239 AddOptionalDefs(MIB);
2241 MIB.
addReg(R, RegState::Implicit);
2245unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2249 return isThumb2 ? ARM::tBL : ARM::BL;
2252Register ARMFastISel::getLibcallReg(
const Twine &Name) {
2259 GlobalValue *GV =
M.getNamedGlobal(
Name.str());
2261 GV =
new GlobalVariable(M, Type::getInt32Ty(*
Context),
false,
2264 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2274bool ARMFastISel::ARMEmitLibcall(
const Instruction *
I, RTLIB::Libcall
Call) {
2275 RTLIB::LibcallImpl LCImpl = LibcallLowering->getLibcallImpl(
Call);
2276 if (LCImpl == RTLIB::Unsupported)
2280 Type *RetTy =
I->getType();
2283 RetVT = MVT::isVoid;
2284 else if (!isTypeLegal(RetTy, RetVT))
2287 CallingConv::ID CC = LibcallLowering->getLibcallImplCallingConv(LCImpl);
2290 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2292 CCState CCInfo(CC,
false, *FuncInfo.MF, RVLocs, *
Context);
2293 CCInfo.AnalyzeCallResult(RetVT, RetTy, CCAssignFnForCall(CC,
true,
false));
2294 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2299 SmallVector<Value*, 8>
Args;
2303 Args.reserve(
I->getNumOperands());
2304 ArgRegs.
reserve(
I->getNumOperands());
2305 ArgVTs.
reserve(
I->getNumOperands());
2306 ArgFlags.
reserve(
I->getNumOperands());
2312 Type *ArgTy =
Op->getType();
2314 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2316 ISD::ArgFlagsTy
Flags;
2317 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2328 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2329 RegArgs, CC, NumBytes,
false))
2335 if (Subtarget->genLongCalls()) {
2336 CalleeReg = getLibcallReg(FuncName);
2342 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2343 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2344 MIMD,
TII.get(CallOpc));
2348 if (Subtarget->genLongCalls()) {
2357 MIB.
addReg(R, RegState::Implicit);
2365 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes,
false))
return false;
2368 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2373bool ARMFastISel::SelectCall(
const Instruction *
I,
2374 const char *IntrMemName =
nullptr) {
2390 bool isVarArg = FTy->isVarArg();
2393 Type *RetTy =
I->getType();
2396 RetVT = MVT::isVoid;
2397 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
2398 RetVT != MVT::i8 && RetVT != MVT::i1)
2402 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2403 RetVT != MVT::i16 && RetVT != MVT::i32) {
2405 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *
Context);
2406 CCInfo.AnalyzeCallResult(RetVT, RetTy,
2407 CCAssignFnForCall(CC,
true, isVarArg));
2408 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2413 SmallVector<Value*, 8>
Args;
2417 unsigned arg_size = CI->
arg_size();
2418 Args.reserve(arg_size);
2422 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2425 if (IntrMemName && ArgE - ArgI <= 1)
2428 ISD::ArgFlagsTy
Flags;
2429 unsigned ArgIdx = ArgI - CI->
arg_begin();
2444 Type *ArgTy = (*ArgI)->getType();
2446 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2450 Register Arg = getRegForValue(*ArgI);
2454 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2456 Args.push_back(*ArgI);
2465 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2466 RegArgs, CC, NumBytes, isVarArg))
2471 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2476 CalleeReg = getLibcallReg(IntrMemName);
2478 CalleeReg = getRegForValue(Callee);
2485 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2486 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2487 MIMD,
TII.get(CallOpc));
2496 }
else if (!IntrMemName)
2503 MIB.
addReg(R, RegState::Implicit);
2511 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes, isVarArg))
2515 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2521bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2525bool ARMFastISel::ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
2526 MaybeAlign Alignment) {
2528 if (!ARMIsMemCpySmall(Len))
2533 if (!Alignment || *Alignment >= 4) {
2539 assert(Len == 1 &&
"Expected a length of 1!");
2543 assert(Alignment &&
"Alignment is set in this branch");
2545 if (Len >= 2 && *Alignment == 2)
2554 RV = ARMEmitLoad(VT, ResultReg, Src);
2555 assert(RV &&
"Should be able to handle this load.");
2556 RV = ARMEmitStore(VT, ResultReg, Dest);
2557 assert(RV &&
"Should be able to handle this store.");
2562 Dest.setOffset(Dest.getOffset() +
Size);
2563 Src.setOffset(Src.getOffset() +
Size);
2569bool ARMFastISel::SelectIntrinsicCall(
const IntrinsicInst &
I) {
2571 switch (
I.getIntrinsicID()) {
2572 default:
return false;
2573 case Intrinsic::frameaddress: {
2574 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
2577 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2578 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
2579 : &ARM::GPRRegClass;
2581 const ARMBaseRegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2593 DestReg = createResultReg(RC);
2594 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2595 TII.get(LdrOpc), DestReg)
2599 updateValueMap(&
I, SrcReg);
2602 case Intrinsic::memcpy:
2603 case Intrinsic::memmove: {
2611 bool isMemCpy = (
I.getIntrinsicID() == Intrinsic::memcpy);
2616 if (ARMIsMemCpySmall(Len)) {
2618 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2621 MaybeAlign Alignment;
2625 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2637 return SelectCall(&
I, IntrMemName);
2639 case Intrinsic::memset: {
2651 return SelectCall(&
I,
"memset");
2653 case Intrinsic::trap: {
2654 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2655 TII.get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP));
2661bool ARMFastISel::SelectTrunc(
const Instruction *
I) {
2670 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2672 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2676 if (!SrcReg)
return false;
2680 updateValueMap(
I, SrcReg);
2686 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2688 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2693 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2697 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2698 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2699 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2706 static const TargetRegisterClass *RCTbl[2][2] = {
2708 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2709 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2713 static const struct InstructionTable {
2718 }
IT[2][2][3][2] = {
2760 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2761 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2762 "other sizes unimplemented");
2763 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2764 "other sizes unimplemented");
2766 bool hasV6Ops = Subtarget->hasV6Ops();
2767 unsigned Bitness = SrcBits / 8;
2768 assert((Bitness < 3) &&
"sanity-check table bounds");
2770 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2771 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
2772 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2773 unsigned Opc = ITP->Opc;
2774 assert(ARM::KILL !=
Opc &&
"Invalid table entry");
2775 unsigned hasS = ITP->hasS;
2778 "only MOVsi has shift operand addressing mode");
2779 unsigned Imm = ITP->Imm;
2782 bool setsCPSR = &ARM::tGPRRegClass == RC;
2783 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2798 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2799 for (
unsigned Instr = 0;
Instr != NumInstrsEmitted; ++
Instr) {
2800 ResultReg = createResultReg(RC);
2801 bool isLsl = (0 ==
Instr) && !isSingleInstr;
2802 unsigned Opcode = isLsl ? LSLOpc :
Opc;
2805 bool isKill = 1 ==
Instr;
2806 MachineInstrBuilder MIB =
BuildMI(
2807 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode), ResultReg);
2809 MIB.
addReg(ARM::CPSR, RegState::Define);
2823bool ARMFastISel::SelectIntExt(
const Instruction *
I) {
2826 Type *DestTy =
I->getType();
2827 Value *Src =
I->getOperand(0);
2828 Type *SrcTy = Src->getType();
2831 Register SrcReg = getRegForValue(Src);
2832 if (!SrcReg)
return false;
2834 EVT SrcEVT, DestEVT;
2837 if (!SrcEVT.
isSimple())
return false;
2838 if (!DestEVT.
isSimple())
return false;
2842 Register ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2845 updateValueMap(
I, ResultReg);
2849bool ARMFastISel::SelectShift(
const Instruction *
I,
2858 if (DestVT != MVT::i32)
2861 unsigned Opc = ARM::MOVsr;
2863 Value *Src2Value =
I->getOperand(1);
2865 ShiftImm = CI->getZExtValue();
2869 if (ShiftImm == 0 || ShiftImm >=32)
2875 Value *Src1Value =
I->getOperand(0);
2876 Register Reg1 = getRegForValue(Src1Value);
2881 if (
Opc == ARM::MOVsr) {
2882 Reg2 = getRegForValue(Src2Value);
2887 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2891 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2895 if (
Opc == ARM::MOVsi)
2897 else if (
Opc == ARM::MOVsr) {
2902 AddOptionalDefs(MIB);
2903 updateValueMap(
I, ResultReg);
2908bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2909 switch (
I->getOpcode()) {
2910 case Instruction::Load:
2911 return SelectLoad(
I);
2912 case Instruction::Store:
2913 return SelectStore(
I);
2914 case Instruction::Br:
2915 return SelectBranch(
I);
2916 case Instruction::IndirectBr:
2917 return SelectIndirectBr(
I);
2918 case Instruction::ICmp:
2919 case Instruction::FCmp:
2920 return SelectCmp(
I);
2921 case Instruction::FPExt:
2922 return SelectFPExt(
I);
2923 case Instruction::FPTrunc:
2924 return SelectFPTrunc(
I);
2925 case Instruction::SIToFP:
2926 return SelectIToFP(
I,
true);
2927 case Instruction::UIToFP:
2928 return SelectIToFP(
I,
false);
2929 case Instruction::FPToSI:
2930 return SelectFPToI(
I,
true);
2931 case Instruction::FPToUI:
2932 return SelectFPToI(
I,
false);
2933 case Instruction::Add:
2935 case Instruction::Or:
2936 return SelectBinaryIntOp(
I,
ISD::OR);
2937 case Instruction::Sub:
2939 case Instruction::FAdd:
2941 case Instruction::FSub:
2943 case Instruction::FMul:
2945 case Instruction::SDiv:
2946 return SelectDiv(
I,
true);
2947 case Instruction::UDiv:
2948 return SelectDiv(
I,
false);
2949 case Instruction::SRem:
2950 return SelectRem(
I,
true);
2951 case Instruction::URem:
2952 return SelectRem(
I,
false);
2953 case Instruction::Call:
2955 return SelectIntrinsicCall(*
II);
2956 return SelectCall(
I);
2957 case Instruction::Select:
2958 return SelectSelect(
I);
2959 case Instruction::Ret:
2960 return SelectRet(
I);
2961 case Instruction::Trunc:
2962 return SelectTrunc(
I);
2963 case Instruction::ZExt:
2964 case Instruction::SExt:
2965 return SelectIntExt(
I);
2966 case Instruction::Shl:
2968 case Instruction::LShr:
2970 case Instruction::AShr:
2987 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2988 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2989 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2990 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2991 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2998bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *
MI,
unsigned OpNo,
2999 const LoadInst *LI) {
3002 if (!isLoadTypeLegal(LI->
getType(), VT))
3009 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
3011 const uint64_t
Imm =
MI->getOperand(2).getImm();
3016 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
3017 (uint64_t)FLE.ExpectedImm ==
Imm &&
3020 isZExt = FLE.isZExt;
3023 if (!Found)
return false;
3027 if (!ARMComputeAddress(LI->
getOperand(0), Addr))
return false;
3029 Register ResultReg =
MI->getOperand(0).getReg();
3030 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->
getAlign(), isZExt,
false))
3033 removeDeadCode(
I, std::next(
I));
3037Register ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV, MVT VT) {
3039 LLVMContext *
Context = &MF->getFunction().getContext();
3041 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3048 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*
Context, 0));
3049 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
3050 MachineMemOperand *CPMMO =
3054 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
3055 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
3056 MachineInstrBuilder MIB =
3057 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), TempReg)
3060 if (
Opc == ARM::LDRcp)
3066 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
3069 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), DestReg)
3071 .
addImm(ARMPCLabelIndex);
3073 if (!Subtarget->isThumb())
3076 if (UseGOT_PREL && Subtarget->isThumb()) {
3078 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3079 TII.get(ARM::t2LDRi12), NewDestReg)
3082 DestReg = NewDestReg;
3083 AddOptionalDefs(MIB);
3088bool ARMFastISel::fastLowerArguments() {
3089 if (!FuncInfo.CanLowerReturn)
3096 CallingConv::ID CC =
F->getCallingConv();
3100 case CallingConv::Fast:
3101 case CallingConv::C:
3102 case CallingConv::ARM_AAPCS_VFP:
3103 case CallingConv::ARM_AAPCS:
3104 case CallingConv::ARM_APCS:
3105 case CallingConv::Swift:
3106 case CallingConv::SwiftTail:
3112 for (
const Argument &Arg :
F->args()) {
3113 if (Arg.getArgNo() >= 4)
3116 if (Arg.hasAttribute(Attribute::InReg) ||
3117 Arg.hasAttribute(Attribute::StructRet) ||
3118 Arg.hasAttribute(Attribute::SwiftSelf) ||
3119 Arg.hasAttribute(Attribute::SwiftError) ||
3120 Arg.hasAttribute(Attribute::ByVal))
3123 Type *ArgTy = Arg.getType();
3128 if (!ArgVT.
isSimple())
return false;
3140 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3143 const TargetRegisterClass *RC = &ARM::rGPRRegClass;
3144 for (
const Argument &Arg :
F->args()) {
3145 unsigned ArgNo = Arg.getArgNo();
3147 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3151 Register ResultReg = createResultReg(RC);
3152 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3153 TII.get(TargetOpcode::COPY),
3155 updateValueMap(&Arg, ResultReg);
3167 return new ARMFastISel(funcInfo, libInfo, libcallLowering);
unsigned const MachineRegisterInfo * MRI
static const MCPhysReg GPRArgRegs[]
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the FastISel class.
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const GCNTargetMachine & getTM(const GCNSubtarget *STI)
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static const unsigned FramePtr
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
Register getFrameRegister(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
bool isThumb2Function() const
unsigned createPICLabelUId()
bool useFastISel() const
True if fast-isel is used.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
bool hasStandaloneRem(EVT VT) const override
Return true if the target can handle a standalone remainder operation.
PointerType * getType() const
Overload to return most specific pointer type.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
const APFloat & getValueAPF() const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
PointerType * getType() const
Global values are always pointers.
@ ExternalLinkage
Externally visible function.
Tracks which library functions to use for a particular subtarget.
Align getAlign() const
Return the alignment of the access that is being performed.
ArrayRef< MCOperandInfo > operands() const
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MachineInstrBundleIterator< MachineInstr > iterator
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
unsigned getDestAddressSpace() const
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
MaybeAlign getSourceAlign() const
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr unsigned id() const
void reserve(size_type N)
void push_back(const T &Elt)
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isPositionIndependent() const
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isStructTy() const
True if this is an instance of StructType.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
const Use * const_op_iterator
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GOT_PREL
Thread Local Storage (General Dynamic Mode)
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ FADD
Simple binary floating point operators.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ User
could "use" a pointer
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
FunctionAddr VTableAddr Value
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
unsigned getBLXOpcode(const MachineFunction &MF)
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.