105class ARMFastISel final :
public FastISel {
125 TM(funcInfo.MF->getTarget()),
TII(*Subtarget->getInstrInfo()),
126 TLI(*Subtarget->getTargetLowering()) {
139 unsigned Op0,
unsigned Op1);
156#include "ARMGenFastISel.inc"
167 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
168 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
173 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
183 bool isPositionIndependent()
const;
184 bool isTypeLegal(
Type *Ty,
MVT &VT);
185 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
186 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
189 MaybeAlign Alignment = std::nullopt,
bool isZExt =
true,
190 bool allocReg =
true);
191 bool ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
193 bool ARMComputeAddress(
const Value *Obj, Address &
Addr);
194 void ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3);
195 bool ARMIsMemCpySmall(
uint64_t Len);
196 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
198 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
202 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
203 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
204 unsigned ARMSelectCallOp(
bool UseReg);
222 unsigned getLibcallReg(
const Twine &
Name);
225 unsigned &NumBytes,
bool isVarArg);
233 void AddLoadStoreOperands(
MVT VT, Address &
Addr,
243bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
244 if (!
MI->hasOptionalDef())
249 if (!MO.isReg() || !MO.isDef())
continue;
250 if (MO.getReg() == ARM::CPSR)
262 return MI->isPredicable();
265 if (opInfo.isPredicate())
283 if (isARMNEONPred(
MI))
289 if (DefinesOptionalPredicate(
MI, &CPSR))
294unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
297 Register ResultReg = createResultReg(RC);
303 if (
II.getNumDefs() >= 1) {
304 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
307 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
309 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
310 TII.get(TargetOpcode::COPY), ResultReg)
316unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
318 unsigned Op0,
unsigned Op1) {
319 Register ResultReg = createResultReg(RC);
327 if (
II.getNumDefs() >= 1) {
329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
333 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
336 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
337 TII.get(TargetOpcode::COPY), ResultReg)
343unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
346 Register ResultReg = createResultReg(RC);
352 if (
II.getNumDefs() >= 1) {
354 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
358 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
361 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
362 TII.get(TargetOpcode::COPY), ResultReg)
368unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
371 Register ResultReg = createResultReg(RC);
374 if (
II.getNumDefs() >= 1) {
375 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
378 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
380 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
381 TII.get(TargetOpcode::COPY), ResultReg)
389unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
390 if (VT == MVT::f64)
return 0;
393 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
394 TII.get(ARM::VMOVSR), MoveReg)
399unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
400 if (VT == MVT::i64)
return 0;
403 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
404 TII.get(ARM::VMOVRS), MoveReg)
412unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
414 bool is64bit = VT == MVT::f64;
429 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
435 if (!Subtarget->hasVFP2Base())
return false;
439 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
441 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
445 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), DestReg)
451unsigned ARMFastISel::ARMMaterializeInt(
const Constant *
C,
MVT VT) {
452 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
458 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->
getZExtValue())) {
459 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
462 Register ImmReg = createResultReg(RC);
463 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
464 TII.get(Opc), ImmReg)
470 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->
isNegative()) {
475 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
478 Register ImmReg = createResultReg(RC);
479 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
480 TII.get(Opc), ImmReg)
486 unsigned ResultReg = 0;
487 if (Subtarget->useMovt())
498 Align Alignment =
DL.getPrefTypeAlign(
C->getType());
499 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
502 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
503 TII.get(ARM::t2LDRpci), ResultReg)
508 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
509 TII.get(ARM::LDRcp), ResultReg)
516bool ARMFastISel::isPositionIndependent()
const {
520unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
525 if (Subtarget->isROPI() || Subtarget->isRWPI())
528 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
531 Register DestReg = createResultReg(RC);
536 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
538 bool IsPositionIndependent = isPositionIndependent();
541 if (Subtarget->useMovt() &&
542 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
544 unsigned char TF = 0;
545 if (Subtarget->isTargetMachO())
548 if (IsPositionIndependent)
549 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
551 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
552 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
558 if (Subtarget->isTargetELF() && IsPositionIndependent)
559 return ARMLowerPICELF(GV, VT);
562 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
567 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
572 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
573 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc),
575 if (IsPositionIndependent)
577 AddOptionalDefs(MIB);
581 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
582 TII.get(ARM::LDRcp), DestReg)
585 AddOptionalDefs(MIB);
587 if (IsPositionIndependent) {
588 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
592 MIMD,
TII.get(Opc), NewDestReg)
595 AddOptionalDefs(MIB);
601 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
602 (Subtarget->isTargetMachO() && IsIndirect)) {
606 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
607 TII.get(ARM::t2LDRi12), NewDestReg)
611 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
612 TII.get(ARM::LDRi12), NewDestReg)
615 DestReg = NewDestReg;
616 AddOptionalDefs(MIB);
622unsigned ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
629 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
630 return ARMMaterializeFP(CFP, VT);
631 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(
C))
632 return ARMMaterializeGV(GV, VT);
633 else if (isa<ConstantInt>(
C))
634 return ARMMaterializeInt(
C, VT);
641unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
643 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
646 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
649 FuncInfo.StaticAllocaMap.find(AI);
653 if (SI != FuncInfo.StaticAllocaMap.end()) {
654 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
656 Register ResultReg = createResultReg(RC);
659 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
660 TII.get(Opc), ResultReg)
669bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
673 if (evt == MVT::Other || !evt.
isSimple())
return false;
681bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
682 if (isTypeLegal(Ty, VT))
return true;
686 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
693bool ARMFastISel::ARMComputeAddress(
const Value *Obj, Address &
Addr) {
695 const User *
U =
nullptr;
696 unsigned Opcode = Instruction::UserOp1;
697 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
700 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
701 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
702 Opcode =
I->getOpcode();
705 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(Obj)) {
706 Opcode =
C->getOpcode();
711 if (Ty->getAddressSpace() > 255)
719 case Instruction::BitCast:
721 return ARMComputeAddress(
U->getOperand(0),
Addr);
722 case Instruction::IntToPtr:
726 return ARMComputeAddress(
U->getOperand(0),
Addr);
728 case Instruction::PtrToInt:
731 return ARMComputeAddress(
U->getOperand(0),
Addr);
733 case Instruction::GetElementPtr: {
735 int TmpOffset =
Addr.Offset;
741 i != e; ++i, ++GTI) {
745 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
755 if (canFoldAddIntoGEP(U,
Op)) {
758 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
761 Op = cast<AddOperator>(
Op)->getOperand(0);
765 goto unsupported_gep;
771 Addr.Offset = TmpOffset;
772 if (ARMComputeAddress(
U->getOperand(0),
Addr))
return true;
780 case Instruction::Alloca: {
783 FuncInfo.StaticAllocaMap.find(AI);
784 if (SI != FuncInfo.StaticAllocaMap.end()) {
785 Addr.BaseType = Address::FrameIndexBase;
786 Addr.Base.FI =
SI->second;
794 if (
Addr.Base.Reg == 0)
Addr.Base.Reg = getRegForValue(Obj);
795 return Addr.Base.Reg != 0;
798void ARMFastISel::ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3) {
799 bool needsLowering =
false;
808 needsLowering = ((
Addr.Offset & 0xfff) !=
Addr.Offset);
810 if (needsLowering && isThumb2)
811 needsLowering = !(Subtarget->hasV6T2Ops() &&
Addr.Offset < 0 &&
815 needsLowering = (
Addr.Offset > 255 ||
Addr.Offset < -255);
821 needsLowering = ((
Addr.Offset & 0xff) !=
Addr.Offset);
828 if (needsLowering &&
Addr.BaseType == Address::FrameIndexBase) {
831 Register ResultReg = createResultReg(RC);
832 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
833 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
834 TII.get(Opc), ResultReg)
837 Addr.Base.Reg = ResultReg;
838 Addr.BaseType = Address::RegBase;
845 Addr.Offset, MVT::i32);
850void ARMFastISel::AddLoadStoreOperands(
MVT VT, Address &
Addr,
860 if (
Addr.BaseType == Address::FrameIndexBase) {
861 int FI =
Addr.Base.FI;
865 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
893 AddOptionalDefs(MIB);
896bool ARMFastISel::ARMEmitLoad(
MVT VT,
Register &ResultReg, Address &
Addr,
901 bool needVMOV =
false;
905 default:
return false;
909 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
910 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
912 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
921 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
924 if (Alignment && *Alignment <
Align(2) &&
925 !Subtarget->allowsUnalignedMem())
929 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
930 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
932 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
934 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
937 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
940 if (Alignment && *Alignment <
Align(4) &&
941 !Subtarget->allowsUnalignedMem())
945 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
952 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
955 if (!Subtarget->hasVFP2Base())
return false;
957 if (Alignment && *Alignment <
Align(4)) {
960 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
961 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
969 if (!Subtarget->hasVFP2Base())
return false;
972 if (Alignment && *Alignment <
Align(4))
980 ARMSimplifyAddress(
Addr, VT, useAM3);
984 ResultReg = createResultReg(RC);
985 assert(ResultReg > 255 &&
"Expected an allocated virtual register.");
987 TII.get(Opc), ResultReg);
994 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
995 TII.get(ARM::VMOVSR), MoveReg)
1004 if (cast<LoadInst>(
I)->isAtomic())
1007 const Value *SV =
I->getOperand(0);
1011 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
1012 if (Arg->hasSwiftErrorAttr())
1016 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1017 if (Alloca->isSwiftError())
1024 if (!isLoadTypeLegal(
I->getType(), VT))
1029 if (!ARMComputeAddress(
I->getOperand(0),
Addr))
return false;
1032 if (!ARMEmitLoad(VT, ResultReg,
Addr, cast<LoadInst>(
I)->
getAlign()))
1034 updateValueMap(
I, ResultReg);
1038bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
1041 bool useAM3 =
false;
1044 default:
return false;
1046 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1047 : &ARM::GPRRegClass);
1048 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1050 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1058 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1059 StrOpc = ARM::t2STRBi8;
1061 StrOpc = ARM::t2STRBi12;
1063 StrOpc = ARM::STRBi12;
1067 if (Alignment && *Alignment <
Align(2) &&
1068 !Subtarget->allowsUnalignedMem())
1072 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1073 StrOpc = ARM::t2STRHi8;
1075 StrOpc = ARM::t2STRHi12;
1082 if (Alignment && *Alignment <
Align(4) &&
1083 !Subtarget->allowsUnalignedMem())
1087 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1088 StrOpc = ARM::t2STRi8;
1090 StrOpc = ARM::t2STRi12;
1092 StrOpc = ARM::STRi12;
1096 if (!Subtarget->hasVFP2Base())
return false;
1098 if (Alignment && *Alignment <
Align(4)) {
1100 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1101 TII.get(ARM::VMOVRS), MoveReg)
1105 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1107 StrOpc = ARM::VSTRS;
1112 if (!Subtarget->hasVFP2Base())
return false;
1115 if (Alignment && *Alignment <
Align(4))
1118 StrOpc = ARM::VSTRD;
1122 ARMSimplifyAddress(
Addr, VT, useAM3);
1134 Value *Op0 =
I->getOperand(0);
1135 unsigned SrcReg = 0;
1138 if (cast<StoreInst>(
I)->isAtomic())
1141 const Value *PtrV =
I->getOperand(1);
1145 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1146 if (Arg->hasSwiftErrorAttr())
1150 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1151 if (Alloca->isSwiftError())
1158 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1162 SrcReg = getRegForValue(Op0);
1163 if (SrcReg == 0)
return false;
1167 if (!ARMComputeAddress(
I->getOperand(1),
Addr))
1170 if (!ARMEmitStore(VT, SrcReg,
Addr, cast<StoreInst>(
I)->
getAlign()))
1232 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1236 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1250 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1251 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1258 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1259 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1260 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1261 Register OpReg = getRegForValue(TI->getOperand(0));
1263 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1268 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1273 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1284 fastEmitBranch(
Target, MIMD.getDL());
1289 if (CmpReg == 0)
return false;
1298 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1301 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1306 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1311 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1318bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1319 Register AddrReg = getRegForValue(
I->getOperand(0));
1320 if (AddrReg == 0)
return false;
1322 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1323 assert(isThumb2 || Subtarget->hasV4TOps());
1325 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1330 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));
1335bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1339 if (!SrcEVT.
isSimple())
return false;
1342 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1345 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1351 bool UseImm =
false;
1352 bool isNegativeImm =
false;
1355 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1356 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1358 const APInt &CIVal = ConstInt->getValue();
1363 if (Imm < 0 && Imm != (
int)0x80000000) {
1364 isNegativeImm =
true;
1370 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1371 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1372 if (ConstFP->isZero() && !ConstFP->isNegative())
1378 bool needsExt =
false;
1380 default:
return false;
1384 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1388 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1398 CmpOpc = ARM::t2CMPrr;
1400 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1403 CmpOpc = ARM::CMPrr;
1405 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1410 Register SrcReg1 = getRegForValue(Src1Value);
1411 if (SrcReg1 == 0)
return false;
1413 unsigned SrcReg2 = 0;
1415 SrcReg2 = getRegForValue(Src2Value);
1416 if (SrcReg2 == 0)
return false;
1421 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1422 if (SrcReg1 == 0)
return false;
1424 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1425 if (SrcReg2 == 0)
return false;
1433 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1437 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1443 AddOptionalDefs(MIB);
1449 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1450 TII.get(ARM::FMSTAT)));
1455 const CmpInst *CI = cast<CmpInst>(
I);
1469 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1471 : &ARM::GPRRegClass;
1472 Register DestReg = createResultReg(RC);
1474 unsigned ZeroReg = fastMaterializeConstant(Zero);
1476 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc), DestReg)
1480 updateValueMap(
I, DestReg);
1486 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1489 if (!
I->getType()->isDoubleTy() ||
1490 !
V->getType()->isFloatTy())
return false;
1493 if (
Op == 0)
return false;
1496 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1497 TII.get(ARM::VCVTDS), Result)
1499 updateValueMap(
I, Result);
1505 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1508 if (!(
I->getType()->isFloatTy() &&
1509 V->getType()->isDoubleTy()))
return false;
1512 if (
Op == 0)
return false;
1515 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1516 TII.get(ARM::VCVTSD), Result)
1518 updateValueMap(
I, Result);
1524 if (!Subtarget->hasVFP2Base())
return false;
1527 Type *Ty =
I->getType();
1528 if (!isTypeLegal(Ty, DstVT))
1531 Value *Src =
I->getOperand(0);
1536 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1539 Register SrcReg = getRegForValue(Src);
1540 if (SrcReg == 0)
return false;
1543 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1544 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1546 if (SrcReg == 0)
return false;
1551 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1552 if (
FP == 0)
return false;
1556 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1557 Opc =
isSigned ? ARM::VSITOD : ARM::VUITOD;
1561 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1563 updateValueMap(
I, ResultReg);
1569 if (!Subtarget->hasVFP2Base())
return false;
1573 if (!isTypeLegal(
RetTy, DstVT))
1577 if (
Op == 0)
return false;
1580 Type *OpTy =
I->getOperand(0)->getType();
1582 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1583 Opc =
isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1588 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1593 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1594 if (IntReg == 0)
return false;
1596 updateValueMap(
I, IntReg);
1602 if (!isTypeLegal(
I->getType(), VT))
1606 if (VT != MVT::i32)
return false;
1608 Register CondReg = getRegForValue(
I->getOperand(0));
1609 if (CondReg == 0)
return false;
1610 Register Op1Reg = getRegForValue(
I->getOperand(1));
1611 if (Op1Reg == 0)
return false;
1615 bool UseImm =
false;
1616 bool isNegativeImm =
false;
1617 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(
I->getOperand(2))) {
1618 assert(VT == MVT::i32 &&
"Expecting an i32.");
1619 Imm = (int)ConstInt->getValue().getZExtValue();
1621 isNegativeImm =
true;
1628 unsigned Op2Reg = 0;
1630 Op2Reg = getRegForValue(
I->getOperand(2));
1631 if (Op2Reg == 0)
return false;
1634 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1637 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1644 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1645 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1647 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1649 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1651 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1653 Register ResultReg = createResultReg(RC);
1657 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1665 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1672 updateValueMap(
I, ResultReg);
1678 Type *Ty =
I->getType();
1679 if (!isTypeLegal(Ty, VT))
1685 if (Subtarget->hasDivideInThumbMode())
1691 LC =
isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1692 else if (VT == MVT::i16)
1693 LC =
isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1694 else if (VT == MVT::i32)
1695 LC =
isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1696 else if (VT == MVT::i64)
1697 LC =
isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1698 else if (VT == MVT::i128)
1699 LC =
isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1700 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1702 return ARMEmitLibcall(
I, LC);
1707 Type *Ty =
I->getType();
1708 if (!isTypeLegal(Ty, VT))
1720 LC =
isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1721 else if (VT == MVT::i16)
1722 LC =
isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1723 else if (VT == MVT::i32)
1724 LC =
isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1725 else if (VT == MVT::i64)
1726 LC =
isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1727 else if (VT == MVT::i128)
1728 LC =
isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1729 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1731 return ARMEmitLibcall(
I, LC);
1734bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1739 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1743 switch (ISDOpcode) {
1744 default:
return false;
1746 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1749 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1752 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1756 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1757 if (SrcReg1 == 0)
return false;
1761 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1762 if (SrcReg2 == 0)
return false;
1764 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1767 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1768 TII.get(Opc), ResultReg)
1770 updateValueMap(
I, ResultReg);
1774bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1776 if (!FPVT.
isSimple())
return false;
1787 Type *Ty =
I->getType();
1788 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1790 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1794 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1795 switch (ISDOpcode) {
1796 default:
return false;
1798 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1801 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1804 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1807 Register Op1 = getRegForValue(
I->getOperand(0));
1808 if (Op1 == 0)
return false;
1810 Register Op2 = getRegForValue(
I->getOperand(1));
1811 if (Op2 == 0)
return false;
1814 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1815 TII.get(Opc), ResultReg)
1817 updateValueMap(
I, ResultReg);
1832 if (Subtarget->hasVFP2Base() && !isVarArg) {
1833 if (!Subtarget->isAAPCS_ABI())
1842 if (Subtarget->isAAPCS_ABI()) {
1843 if (Subtarget->hasFPRegs() &&
1882 CCState CCInfo(
CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1883 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1884 CCAssignFnForCall(
CC,
false, isVarArg));
1888 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1903 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1915 if (!Subtarget->hasVFP2Base())
1919 if (!Subtarget->hasVFP2Base())
1929 NumBytes = CCInfo.getStackSize();
1932 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
1933 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1934 TII.get(AdjStackDown))
1938 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1945 "We don't handle NEON/vector parameters yet.");
1952 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
1953 assert(Arg != 0 &&
"Failed to emit a sext");
1961 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
1962 assert(Arg != 0 &&
"Failed to emit a zext");
1968 assert(BC != 0 &&
"Failed to emit a bitcast!");
1978 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1984 "Custom lowering for v2f64 args not available");
1990 "We only handle register args!");
1992 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2003 if (isa<UndefValue>(ArgVal))
2007 Addr.BaseType = Address::RegBase;
2008 Addr.Base.Reg = ARM::SP;
2011 bool EmitRet = ARMEmitStore(ArgVT, Arg,
Addr); (void)EmitRet;
2012 assert(EmitRet &&
"Could not emit a store for argument!");
2021 unsigned &NumBytes,
bool isVarArg) {
2023 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2024 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2025 TII.get(AdjStackUp))
2029 if (RetVT != MVT::isVoid) {
2031 CCState CCInfo(
CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2032 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true, isVarArg));
2035 if (RVLocs.
size() == 2 && RetVT == MVT::f64) {
2038 MVT DestVT = RVLocs[0].getValVT();
2040 Register ResultReg = createResultReg(DstRC);
2041 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2042 TII.get(ARM::VMOVDRR), ResultReg)
2043 .
addReg(RVLocs[0].getLocReg())
2044 .
addReg(RVLocs[1].getLocReg()));
2046 UsedRegs.
push_back(RVLocs[0].getLocReg());
2047 UsedRegs.
push_back(RVLocs[1].getLocReg());
2050 updateValueMap(
I, ResultReg);
2052 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2053 MVT CopyVT = RVLocs[0].getValVT();
2056 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2061 Register ResultReg = createResultReg(DstRC);
2062 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2063 TII.get(TargetOpcode::COPY),
2064 ResultReg).
addReg(RVLocs[0].getLocReg());
2065 UsedRegs.
push_back(RVLocs[0].getLocReg());
2068 updateValueMap(
I, ResultReg);
2077 const Function &
F = *
I->getParent()->getParent();
2078 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2080 if (!FuncInfo.CanLowerReturn)
2084 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2094 if (
Ret->getNumOperands() > 0) {
2100 CCState CCInfo(
CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2101 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(
CC,
true ,
2104 const Value *RV =
Ret->getOperand(0);
2110 if (ValLocs.
size() != 1)
2124 if (!RVEVT.
isSimple())
return false;
2128 if (RVVT != DestVT) {
2129 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2132 assert(DestVT == MVT::i32 &&
"ARM should always ext to i32");
2136 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
2137 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].
Flags.isZExt());
2138 if (SrcReg == 0)
return false;
2148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2149 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2158 RetOpc = ARM::tBXNS_RET;
2162 RetOpc = Subtarget->getReturnOpcode();
2166 AddOptionalDefs(MIB);
2167 for (
unsigned R : RetRegs)
2172unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2176 return isThumb2 ? ARM::tBL : ARM::BL;
2179unsigned ARMFastISel::getLibcallReg(
const Twine &
Name) {
2181 Type *GVTy = PointerType::get(*Context, 0);
2190 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2206 if (
RetTy->isVoidTy())
2207 RetVT = MVT::isVoid;
2208 else if (!isTypeLegal(
RetTy, RetVT))
2212 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2214 CCState CCInfo(
CC,
false, *FuncInfo.MF, RVLocs, *Context);
2215 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true,
false));
2216 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2225 Args.reserve(
I->getNumOperands());
2226 ArgRegs.
reserve(
I->getNumOperands());
2227 ArgVTs.
reserve(
I->getNumOperands());
2228 ArgFlags.
reserve(
I->getNumOperands());
2231 if (Arg == 0)
return false;
2233 Type *ArgTy =
Op->getType();
2235 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2238 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2249 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2250 RegArgs,
CC, NumBytes,
false))
2254 if (Subtarget->genLongCalls()) {
2256 if (CalleeReg == 0)
return false;
2260 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2262 MIMD,
TII.get(CallOpc));
2266 if (Subtarget->genLongCalls()) {
2283 if (!FinishCall(RetVT, UsedRegs,
I,
CC, NumBytes,
false))
return false;
2286 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2292 const char *IntrMemName =
nullptr) {
2297 if (isa<InlineAsm>(Callee))
return false;
2308 bool isVarArg = FTy->isVarArg();
2313 if (
RetTy->isVoidTy())
2314 RetVT = MVT::isVoid;
2315 else if (!isTypeLegal(
RetTy, RetVT) && RetVT != MVT::i16 &&
2316 RetVT != MVT::i8 && RetVT != MVT::i1)
2320 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2321 RetVT != MVT::i16 && RetVT != MVT::i32) {
2323 CCState CCInfo(
CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2324 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true, isVarArg));
2325 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2334 unsigned arg_size = CI->
arg_size();
2335 Args.reserve(arg_size);
2339 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2342 if (IntrMemName && ArgE - ArgI <= 1)
2346 unsigned ArgIdx = ArgI - CI->
arg_begin();
2361 Type *ArgTy = (*ArgI)->getType();
2363 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2367 Register Arg = getRegForValue(*ArgI);
2371 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2373 Args.push_back(*ArgI);
2382 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2383 RegArgs,
CC, NumBytes, isVarArg))
2387 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2388 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2393 CalleeReg = getLibcallReg(IntrMemName);
2395 CalleeReg = getRegForValue(Callee);
2397 if (CalleeReg == 0)
return false;
2401 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2403 MIMD,
TII.get(CallOpc));
2412 }
else if (!IntrMemName)
2427 if (!FinishCall(RetVT, UsedRegs,
I,
CC, NumBytes, isVarArg))
2431 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2436bool ARMFastISel::ARMIsMemCpySmall(
uint64_t Len) {
2440bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
2443 if (!ARMIsMemCpySmall(Len))
2448 if (!Alignment || *Alignment >= 4) {
2454 assert(Len == 1 &&
"Expected a length of 1!");
2458 assert(Alignment &&
"Alignment is set in this branch");
2460 if (Len >= 2 && *Alignment == 2)
2469 RV = ARMEmitLoad(VT, ResultReg, Src);
2470 assert(RV &&
"Should be able to handle this load.");
2471 RV = ARMEmitStore(VT, ResultReg, Dest);
2472 assert(RV &&
"Should be able to handle this store.");
2477 Dest.Offset +=
Size;
2486 switch (
I.getIntrinsicID()) {
2487 default:
return false;
2488 case Intrinsic::frameaddress: {
2492 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2494 : &ARM::GPRRegClass;
2507 unsigned Depth = cast<ConstantInt>(
I.getOperand(0))->getZExtValue();
2509 DestReg = createResultReg(RC);
2510 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2511 TII.get(LdrOpc), DestReg)
2515 updateValueMap(&
I, SrcReg);
2518 case Intrinsic::memcpy:
2519 case Intrinsic::memmove: {
2527 bool isMemCpy = (
I.getIntrinsicID() == Intrinsic::memcpy);
2528 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2532 if (ARMIsMemCpySmall(Len)) {
2534 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2541 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2552 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2553 return SelectCall(&
I, IntrMemName);
2555 case Intrinsic::memset: {
2567 return SelectCall(&
I,
"memset");
2569 case Intrinsic::trap: {
2571 if (Subtarget->isThumb())
2572 Opcode = ARM::tTRAP;
2574 Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
2575 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode));
2590 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2592 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2596 if (!SrcReg)
return false;
2600 updateValueMap(
I, SrcReg);
2604unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2606 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2608 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2613 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2617 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2618 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2619 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2628 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2629 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2633 static const struct InstructionTable {
2638 }
IT[2][2][3][2] = {
2680 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2681 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2682 "other sizes unimplemented");
2683 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2684 "other sizes unimplemented");
2686 bool hasV6Ops = Subtarget->hasV6Ops();
2687 unsigned Bitness = SrcBits / 8;
2688 assert((Bitness < 3) &&
"sanity-check table bounds");
2690 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2692 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2693 unsigned Opc = ITP->Opc;
2694 assert(ARM::KILL != Opc &&
"Invalid table entry");
2695 unsigned hasS = ITP->hasS;
2698 "only MOVsi has shift operand addressing mode");
2699 unsigned Imm = ITP->Imm;
2702 bool setsCPSR = &ARM::tGPRRegClass == RC;
2703 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2718 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2719 for (
unsigned Instr = 0;
Instr != NumInstrsEmitted; ++
Instr) {
2720 ResultReg = createResultReg(RC);
2721 bool isLsl = (0 ==
Instr) && !isSingleInstr;
2722 unsigned Opcode = isLsl ? LSLOpc : Opc;
2725 bool isKill = 1 ==
Instr;
2727 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode), ResultReg);
2746 Type *DestTy =
I->getType();
2747 Value *Src =
I->getOperand(0);
2748 Type *SrcTy = Src->getType();
2750 bool isZExt = isa<ZExtInst>(
I);
2751 Register SrcReg = getRegForValue(Src);
2752 if (!SrcReg)
return false;
2754 EVT SrcEVT, DestEVT;
2757 if (!SrcEVT.
isSimple())
return false;
2758 if (!DestEVT.
isSimple())
return false;
2762 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2763 if (ResultReg == 0)
return false;
2764 updateValueMap(
I, ResultReg);
2777 if (DestVT != MVT::i32)
2780 unsigned Opc = ARM::MOVsr;
2782 Value *Src2Value =
I->getOperand(1);
2783 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2784 ShiftImm = CI->getZExtValue();
2788 if (ShiftImm == 0 || ShiftImm >=32)
2794 Value *Src1Value =
I->getOperand(0);
2795 Register Reg1 = getRegForValue(Src1Value);
2796 if (Reg1 == 0)
return false;
2799 if (Opc == ARM::MOVsr) {
2800 Reg2 = getRegForValue(Src2Value);
2801 if (Reg2 == 0)
return false;
2804 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2805 if(ResultReg == 0)
return false;
2808 TII.get(Opc), ResultReg)
2811 if (Opc == ARM::MOVsi)
2813 else if (Opc == ARM::MOVsr) {
2818 AddOptionalDefs(MIB);
2819 updateValueMap(
I, ResultReg);
2824bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2825 switch (
I->getOpcode()) {
2826 case Instruction::Load:
2827 return SelectLoad(
I);
2828 case Instruction::Store:
2829 return SelectStore(
I);
2830 case Instruction::Br:
2831 return SelectBranch(
I);
2832 case Instruction::IndirectBr:
2833 return SelectIndirectBr(
I);
2834 case Instruction::ICmp:
2835 case Instruction::FCmp:
2836 return SelectCmp(
I);
2837 case Instruction::FPExt:
2838 return SelectFPExt(
I);
2839 case Instruction::FPTrunc:
2840 return SelectFPTrunc(
I);
2841 case Instruction::SIToFP:
2842 return SelectIToFP(
I,
true);
2843 case Instruction::UIToFP:
2844 return SelectIToFP(
I,
false);
2845 case Instruction::FPToSI:
2846 return SelectFPToI(
I,
true);
2847 case Instruction::FPToUI:
2848 return SelectFPToI(
I,
false);
2849 case Instruction::Add:
2851 case Instruction::Or:
2852 return SelectBinaryIntOp(
I,
ISD::OR);
2853 case Instruction::Sub:
2855 case Instruction::FAdd:
2857 case Instruction::FSub:
2859 case Instruction::FMul:
2861 case Instruction::SDiv:
2862 return SelectDiv(
I,
true);
2863 case Instruction::UDiv:
2864 return SelectDiv(
I,
false);
2865 case Instruction::SRem:
2866 return SelectRem(
I,
true);
2867 case Instruction::URem:
2868 return SelectRem(
I,
false);
2869 case Instruction::Call:
2871 return SelectIntrinsicCall(*
II);
2872 return SelectCall(
I);
2873 case Instruction::Select:
2874 return SelectSelect(
I);
2875 case Instruction::Ret:
2876 return SelectRet(
I);
2877 case Instruction::Trunc:
2878 return SelectTrunc(
I);
2879 case Instruction::ZExt:
2880 case Instruction::SExt:
2881 return SelectIntExt(
I);
2882 case Instruction::Shl:
2884 case Instruction::LShr:
2886 case Instruction::AShr:
2903 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2904 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2905 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2906 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2907 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2914bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
2918 if (!isLoadTypeLegal(LI->
getType(), VT))
2925 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
2932 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
2936 isZExt = FLE.isZExt;
2939 if (!Found)
return false;
2945 Register ResultReg =
MI->getOperand(0).getReg();
2946 if (!ARMEmitLoad(VT, ResultReg,
Addr, LI->
getAlign(), isZExt,
false))
2949 removeDeadCode(
I, std::next(
I));
2953unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
MVT VT) {
2955 LLVMContext *Context = &MF->getFunction().getContext();
2957 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2964 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*Context, 0));
2965 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2970 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2971 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2973 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), TempReg)
2976 if (Opc == ARM::LDRcp)
2982 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2985 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), DestReg)
2987 .
addImm(ARMPCLabelIndex);
2989 if (!Subtarget->isThumb())
2992 if (UseGOT_PREL && Subtarget->isThumb()) {
2994 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2995 TII.get(ARM::t2LDRi12), NewDestReg)
2998 DestReg = NewDestReg;
2999 AddOptionalDefs(MIB);
3004bool ARMFastISel::fastLowerArguments() {
3005 if (!FuncInfo.CanLowerReturn)
3029 if (Arg.getArgNo() >= 4)
3032 if (Arg.hasAttribute(Attribute::InReg) ||
3033 Arg.hasAttribute(Attribute::StructRet) ||
3034 Arg.hasAttribute(Attribute::SwiftSelf) ||
3035 Arg.hasAttribute(Attribute::SwiftError) ||
3036 Arg.hasAttribute(Attribute::ByVal))
3039 Type *ArgTy = Arg.getType();
3044 if (!ArgVT.
isSimple())
return false;
3056 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3061 unsigned ArgNo = Arg.getArgNo();
3063 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3067 Register ResultReg = createResultReg(RC);
3068 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3069 TII.get(TargetOpcode::COPY),
3071 updateValueMap(&Arg, ResultReg);
3082 return new ARMFastISel(funcInfo, libInfo);
unsigned const MachineRegisterInfo * MRI
static const MCPhysReg GPRArgRegs[]
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This file defines the FastISel class.
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
Module.h This file contains the declarations for the Module class.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static const unsigned FramePtr
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolValue - ARM specific constantpool value.
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
unsigned createPICLabelUId()
bool isThumbFunction() const
bool useFastISel() const
True if fast-isel is used.
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
const TargetInstrInfo & TII
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
PointerType * getType() const
Global values are always pointers.
@ ExternalLinkage
Externally visible function.
Indirect Branch Instruction.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Align getAlign() const
Return the alignment of the access that is being performed.
Describe properties that are true of each instruction in the target description file.
ArrayRef< MCOperandInfo > operands() const
This holds information about one operand of a machine instruction, indicating the register class for ...
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
unsigned getDestAddressSpace() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
MaybeAlign getSourceAlign() const
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
Return a value (possibly void), from a function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
bool isPositionIndependent() const
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Primary interface to the complete machine description for the target machine.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
Target - Wrapper for Target specific information.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isStructTy() const
True if this is an instance of StructType.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GOT_PREL
Thread Local Storage (General Dynamic Mode)
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Swift
Calling convention for Swift.
@ ARM_APCS
ARM Procedure Calling Standard (obsolete, but still used on some targets).
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ ARM_AAPCS
ARM Architecture Procedure Calling Standard calling convention (aka EABI).
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ ARM_AAPCS_VFP
Same as ARM_AAPCS, but uses hard floating point ABI.
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ FADD
Simple binary floating point operators.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
unsigned getKillRegState(bool B)
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
gep_type_iterator gep_type_begin(const User *GEP)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
unsigned getBLXOpcode(const MachineFunction &MF)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.