107 class ARMFastISel final :
public FastISel {
127 TM(funcInfo.MF->getTarget()),
TII(*Subtarget->getInstrInfo()),
128 TLI(*Subtarget->getTargetLowering()) {
137 unsigned fastEmitInst_r(
unsigned MachineInstOpcode,
139 unsigned fastEmitInst_rr(
unsigned MachineInstOpcode,
141 unsigned Op0,
unsigned Op1);
142 unsigned fastEmitInst_ri(
unsigned MachineInstOpcode,
145 unsigned fastEmitInst_i(
unsigned MachineInstOpcode,
151 bool fastSelectInstruction(
const Instruction *
I)
override;
152 unsigned fastMaterializeConstant(
const Constant *
C)
override;
153 unsigned fastMaterializeAlloca(
const AllocaInst *AI)
override;
156 bool fastLowerArguments()
override;
158 #include "ARMGenFastISel.inc"
169 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
170 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
175 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
185 bool isPositionIndependent()
const;
186 bool isTypeLegal(
Type *Ty,
MVT &VT);
187 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
188 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
191 MaybeAlign Alignment = std::nullopt,
bool isZExt =
true,
192 bool allocReg =
true);
193 bool ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
195 bool ARMComputeAddress(
const Value *Obj, Address &
Addr);
196 void ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3);
197 bool ARMIsMemCpySmall(
uint64_t Len);
198 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
200 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
204 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
205 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
206 unsigned ARMSelectCallOp(
bool UseReg);
224 unsigned getLibcallReg(
const Twine &Name);
227 unsigned &NumBytes,
bool isVarArg);
235 void AddLoadStoreOperands(
MVT VT, Address &
Addr,
245 bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
246 if (!
MI->hasOptionalDef())
251 if (!MO.isReg() || !MO.isDef())
continue;
252 if (MO.getReg() == ARM::CPSR)
264 return MI->isPredicable();
267 if (opInfo.isPredicate())
285 if (isARMNEONPred(
MI))
291 if (DefinesOptionalPredicate(
MI, &CPSR))
296 unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
299 Register ResultReg = createResultReg(RC);
306 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,
309 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
311 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
312 TII.get(TargetOpcode::COPY), ResultReg)
318 unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
320 unsigned Op0,
unsigned Op1) {
321 Register ResultReg = createResultReg(RC);
331 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
335 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
338 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
339 TII.get(TargetOpcode::COPY), ResultReg)
345 unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
348 Register ResultReg = createResultReg(RC);
356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
360 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
363 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
364 TII.get(TargetOpcode::COPY), ResultReg)
370 unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
373 Register ResultReg = createResultReg(RC);
377 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,
380 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
382 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
383 TII.get(TargetOpcode::COPY), ResultReg)
391 unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
395 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
401 unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
405 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
414 unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
431 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
437 if (!Subtarget->hasVFP2Base())
return false;
441 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
443 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), DestReg)
453 unsigned ARMFastISel::ARMMaterializeInt(
const Constant *
C,
MVT VT) {
460 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->
getZExtValue())) {
461 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
464 Register ImmReg = createResultReg(RC);
465 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
466 TII.get(Opc), ImmReg)
477 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
480 Register ImmReg = createResultReg(RC);
481 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
482 TII.get(Opc), ImmReg)
488 unsigned ResultReg = 0;
489 if (Subtarget->useMovt())
500 Align Alignment =
DL.getPrefTypeAlign(
C->getType());
501 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
504 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
505 TII.get(ARM::t2LDRpci), ResultReg)
510 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
511 TII.get(ARM::LDRcp), ResultReg)
518 bool ARMFastISel::isPositionIndependent()
const {
522 unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
527 if (Subtarget->isROPI() || Subtarget->isRWPI())
530 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
533 Register DestReg = createResultReg(RC);
538 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
540 bool IsPositionIndependent = isPositionIndependent();
543 if (Subtarget->useMovt() &&
544 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
546 unsigned char TF = 0;
547 if (Subtarget->isTargetMachO())
550 if (IsPositionIndependent)
551 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
553 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
554 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
560 if (Subtarget->isTargetELF() && IsPositionIndependent)
561 return ARMLowerPICELF(GV, VT);
564 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
569 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
574 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
575 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc),
577 if (IsPositionIndependent)
579 AddOptionalDefs(MIB);
583 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
584 TII.get(ARM::LDRcp), DestReg)
587 AddOptionalDefs(MIB);
589 if (IsPositionIndependent) {
590 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
594 MIMD,
TII.get(Opc), NewDestReg)
597 AddOptionalDefs(MIB);
603 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
604 (Subtarget->isTargetMachO() && IsIndirect)) {
608 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
609 TII.get(ARM::t2LDRi12), NewDestReg)
613 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
614 TII.get(ARM::LDRi12), NewDestReg)
617 DestReg = NewDestReg;
618 AddOptionalDefs(MIB);
624 unsigned ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
631 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
632 return ARMMaterializeFP(CFP, VT);
633 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(
C))
634 return ARMMaterializeGV(GV, VT);
635 else if (isa<ConstantInt>(
C))
636 return ARMMaterializeInt(
C, VT);
643 unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
645 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
648 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
651 FuncInfo.StaticAllocaMap.find(AI);
655 if (
SI != FuncInfo.StaticAllocaMap.end()) {
656 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
658 Register ResultReg = createResultReg(RC);
661 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
662 TII.get(Opc), ResultReg)
671 bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
683 bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
684 if (isTypeLegal(Ty, VT))
return true;
695 bool ARMFastISel::ARMComputeAddress(
const Value *Obj, Address &
Addr) {
697 const User *U =
nullptr;
698 unsigned Opcode = Instruction::UserOp1;
699 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
702 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
703 FuncInfo.MBBMap[
I->getParent()] == FuncInfo.MBB) {
704 Opcode =
I->getOpcode();
707 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(Obj)) {
708 Opcode =
C->getOpcode();
713 if (Ty->getAddressSpace() > 255)
721 case Instruction::BitCast:
724 case Instruction::IntToPtr:
730 case Instruction::PtrToInt:
735 case Instruction::GetElementPtr: {
737 int TmpOffset =
Addr.Offset;
743 i !=
e; ++
i, ++GTI) {
747 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
757 if (canFoldAddIntoGEP(U,
Op)) {
760 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
763 Op = cast<AddOperator>(
Op)->getOperand(0);
767 goto unsupported_gep;
773 Addr.Offset = TmpOffset;
782 case Instruction::Alloca: {
785 FuncInfo.StaticAllocaMap.find(AI);
786 if (
SI != FuncInfo.StaticAllocaMap.end()) {
787 Addr.BaseType = Address::FrameIndexBase;
788 Addr.Base.FI =
SI->second;
796 if (
Addr.Base.Reg == 0)
Addr.Base.Reg = getRegForValue(Obj);
797 return Addr.Base.Reg != 0;
800 void ARMFastISel::ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3) {
801 bool needsLowering =
false;
810 needsLowering = ((
Addr.Offset & 0xfff) !=
Addr.Offset);
812 if (needsLowering && isThumb2)
813 needsLowering = !(Subtarget->hasV6T2Ops() &&
Addr.Offset < 0 &&
817 needsLowering = (
Addr.Offset > 255 ||
Addr.Offset < -255);
823 needsLowering = ((
Addr.Offset & 0xff) !=
Addr.Offset);
830 if (needsLowering &&
Addr.BaseType == Address::FrameIndexBase) {
833 Register ResultReg = createResultReg(RC);
834 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
835 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
836 TII.get(Opc), ResultReg)
839 Addr.Base.Reg = ResultReg;
840 Addr.BaseType = Address::RegBase;
852 void ARMFastISel::AddLoadStoreOperands(
MVT VT, Address &
Addr,
862 if (
Addr.BaseType == Address::FrameIndexBase) {
863 int FI =
Addr.Base.FI;
867 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
895 AddOptionalDefs(MIB);
898 bool ARMFastISel::ARMEmitLoad(
MVT VT,
Register &ResultReg, Address &
Addr,
903 bool needVMOV =
false;
907 default:
return false;
911 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
912 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
914 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
923 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
926 if (Alignment && *Alignment <
Align(2) &&
927 !Subtarget->allowsUnalignedMem())
931 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
932 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
934 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
936 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
939 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
942 if (Alignment && *Alignment <
Align(4) &&
943 !Subtarget->allowsUnalignedMem())
947 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
954 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
957 if (!Subtarget->hasVFP2Base())
return false;
959 if (Alignment && *Alignment <
Align(4)) {
962 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
963 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
971 if (!Subtarget->hasVFP2Base())
return false;
974 if (Alignment && *Alignment <
Align(4))
982 ARMSimplifyAddress(
Addr, VT, useAM3);
986 ResultReg = createResultReg(RC);
987 assert(ResultReg > 255 &&
"Expected an allocated virtual register.");
989 TII.get(Opc), ResultReg);
996 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1006 if (cast<LoadInst>(
I)->isAtomic())
1009 const Value *SV =
I->getOperand(0);
1013 if (
const Argument *
Arg = dyn_cast<Argument>(SV)) {
1014 if (
Arg->hasSwiftErrorAttr())
1018 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1019 if (Alloca->isSwiftError())
1026 if (!isLoadTypeLegal(
I->getType(), VT))
1031 if (!ARMComputeAddress(
I->getOperand(0),
Addr))
return false;
1034 if (!ARMEmitLoad(VT, ResultReg,
Addr, cast<LoadInst>(
I)->
getAlign()))
1036 updateValueMap(
I, ResultReg);
1040 bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
1043 bool useAM3 =
false;
1046 default:
return false;
1048 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1049 : &ARM::GPRRegClass);
1050 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1052 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1060 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1061 StrOpc = ARM::t2STRBi8;
1063 StrOpc = ARM::t2STRBi12;
1065 StrOpc = ARM::STRBi12;
1069 if (Alignment && *Alignment <
Align(2) &&
1070 !Subtarget->allowsUnalignedMem())
1074 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1075 StrOpc = ARM::t2STRHi8;
1077 StrOpc = ARM::t2STRHi12;
1084 if (Alignment && *Alignment <
Align(4) &&
1085 !Subtarget->allowsUnalignedMem())
1089 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1090 StrOpc = ARM::t2STRi8;
1092 StrOpc = ARM::t2STRi12;
1094 StrOpc = ARM::STRi12;
1098 if (!Subtarget->hasVFP2Base())
return false;
1100 if (Alignment && *Alignment <
Align(4)) {
1102 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1107 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1109 StrOpc = ARM::VSTRS;
1114 if (!Subtarget->hasVFP2Base())
return false;
1117 if (Alignment && *Alignment <
Align(4))
1120 StrOpc = ARM::VSTRD;
1124 ARMSimplifyAddress(
Addr, VT, useAM3);
1136 Value *Op0 =
I->getOperand(0);
1137 unsigned SrcReg = 0;
1140 if (cast<StoreInst>(
I)->isAtomic())
1143 const Value *PtrV =
I->getOperand(1);
1147 if (
const Argument *
Arg = dyn_cast<Argument>(PtrV)) {
1148 if (
Arg->hasSwiftErrorAttr())
1152 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1153 if (Alloca->isSwiftError())
1160 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1164 SrcReg = getRegForValue(Op0);
1165 if (SrcReg == 0)
return false;
1169 if (!ARMComputeAddress(
I->getOperand(1),
Addr))
1172 if (!ARMEmitStore(VT, SrcReg,
Addr, cast<StoreInst>(
I)->
getAlign()))
1234 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1238 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1252 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1253 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1260 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1261 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1262 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1263 Register OpReg = getRegForValue(TI->getOperand(0));
1265 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1270 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1275 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1286 fastEmitBranch(
Target, MIMD.getDL());
1291 if (CmpReg == 0)
return false;
1300 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1303 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1308 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1313 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1320 bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1321 Register AddrReg = getRegForValue(
I->getOperand(0));
1322 if (AddrReg == 0)
return false;
1324 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1325 assert(isThumb2 || Subtarget->hasV4TOps());
1327 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1332 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
1337 bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1341 if (!SrcEVT.
isSimple())
return false;
1344 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1347 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1353 bool UseImm =
false;
1354 bool isNegativeImm =
false;
1357 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1360 const APInt &CIVal = ConstInt->getValue();
1365 if (
Imm < 0 &&
Imm != (
int)0x80000000) {
1366 isNegativeImm =
true;
1372 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1374 if (ConstFP->isZero() && !ConstFP->isNegative())
1380 bool needsExt =
false;
1382 default:
return false;
1386 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1390 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1400 CmpOpc = ARM::t2CMPrr;
1402 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1405 CmpOpc = ARM::CMPrr;
1407 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1412 Register SrcReg1 = getRegForValue(Src1Value);
1413 if (SrcReg1 == 0)
return false;
1415 unsigned SrcReg2 = 0;
1417 SrcReg2 = getRegForValue(Src2Value);
1418 if (SrcReg2 == 0)
return false;
1423 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1,
MVT::i32, isZExt);
1424 if (SrcReg1 == 0)
return false;
1426 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2,
MVT::i32, isZExt);
1427 if (SrcReg2 == 0)
return false;
1435 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1439 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
1445 AddOptionalDefs(MIB);
1451 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1457 const CmpInst *CI = cast<CmpInst>(
I);
1471 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1473 : &ARM::GPRRegClass;
1474 Register DestReg = createResultReg(RC);
1476 unsigned ZeroReg = fastMaterializeConstant(Zero);
1478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc), DestReg)
1482 updateValueMap(
I, DestReg);
1488 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1490 Value *V =
I->getOperand(0);
1491 if (!
I->getType()->isDoubleTy() ||
1495 if (
Op == 0)
return false;
1498 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1499 TII.get(ARM::VCVTDS), Result)
1501 updateValueMap(
I, Result);
1505 bool ARMFastISel::SelectFPTrunc(
const Instruction *
I) {
1507 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1509 Value *V =
I->getOperand(0);
1510 if (!(
I->getType()->isFloatTy() &&
1514 if (
Op == 0)
return false;
1517 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1518 TII.get(ARM::VCVTSD), Result)
1520 updateValueMap(
I, Result);
1526 if (!Subtarget->hasVFP2Base())
return false;
1529 Type *Ty =
I->getType();
1530 if (!isTypeLegal(Ty, DstVT))
1533 Value *Src =
I->getOperand(0);
1541 Register SrcReg = getRegForValue(Src);
1542 if (SrcReg == 0)
return false;
1546 SrcReg = ARMEmitIntExt(SrcVT, SrcReg,
MVT::i32,
1548 if (SrcReg == 0)
return false;
1553 unsigned FP = ARMMoveToFPReg(
MVT::f32, SrcReg);
1554 if (FP == 0)
return false;
1558 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1559 Opc =
isSigned ? ARM::VSITOD : ARM::VUITOD;
1563 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1565 updateValueMap(
I, ResultReg);
1571 if (!Subtarget->hasVFP2Base())
return false;
1574 Type *RetTy =
I->getType();
1575 if (!isTypeLegal(RetTy, DstVT))
1579 if (
Op == 0)
return false;
1582 Type *OpTy =
I->getOperand(0)->getType();
1584 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1585 Opc =
isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1590 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1595 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1596 if (IntReg == 0)
return false;
1598 updateValueMap(
I, IntReg);
1604 if (!isTypeLegal(
I->getType(), VT))
1610 Register CondReg = getRegForValue(
I->getOperand(0));
1611 if (CondReg == 0)
return false;
1612 Register Op1Reg = getRegForValue(
I->getOperand(1));
1613 if (Op1Reg == 0)
return false;
1617 bool UseImm =
false;
1618 bool isNegativeImm =
false;
1619 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(
I->getOperand(2))) {
1621 Imm = (
int)ConstInt->getValue().getZExtValue();
1623 isNegativeImm =
true;
1630 unsigned Op2Reg = 0;
1632 Op2Reg = getRegForValue(
I->getOperand(2));
1633 if (Op2Reg == 0)
return false;
1636 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1639 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1646 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1647 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1649 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1651 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1653 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1655 Register ResultReg = createResultReg(RC);
1659 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1667 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1674 updateValueMap(
I, ResultReg);
1680 Type *Ty =
I->getType();
1681 if (!isTypeLegal(Ty, VT))
1687 if (Subtarget->hasDivideInThumbMode())
1693 LC =
isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1695 LC =
isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1697 LC =
isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1699 LC =
isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1701 LC =
isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1702 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1704 return ARMEmitLibcall(
I, LC);
1709 Type *Ty =
I->getType();
1710 if (!isTypeLegal(Ty, VT))
1722 LC =
isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1724 LC =
isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1726 LC =
isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1728 LC =
isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1730 LC =
isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1731 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1733 return ARMEmitLibcall(
I, LC);
1736 bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1745 switch (ISDOpcode) {
1746 default:
return false;
1748 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1751 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1754 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1758 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1759 if (SrcReg1 == 0)
return false;
1763 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1764 if (SrcReg2 == 0)
return false;
1766 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1769 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1770 TII.get(Opc), ResultReg)
1772 updateValueMap(
I, ResultReg);
1776 bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1778 if (!FPVT.
isSimple())
return false;
1789 Type *Ty =
I->getType();
1790 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1792 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1797 switch (ISDOpcode) {
1798 default:
return false;
1800 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1803 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1806 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1809 Register Op1 = getRegForValue(
I->getOperand(0));
1810 if (Op1 == 0)
return false;
1812 Register Op2 = getRegForValue(
I->getOperand(1));
1813 if (Op2 == 0)
return false;
1816 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1817 TII.get(Opc), ResultReg)
1819 updateValueMap(
I, ResultReg);
1834 if (Subtarget->hasVFP2Base() && !isVarArg) {
1835 if (!Subtarget->isAAPCS_ABI())
1844 if (Subtarget->isAAPCS_ABI()) {
1845 if (Subtarget->hasVFP2Base() &&
1885 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1886 CCAssignFnForCall(
CC,
false, isVarArg));
1890 for (
unsigned i = 0,
e = ArgLocs.size();
i !=
e; ++
i) {
1905 !VA.
isRegLoc() || !ArgLocs[++
i].isRegLoc())
1917 if (!Subtarget->hasVFP2Base())
1921 if (!Subtarget->hasVFP2Base())
1931 NumBytes = CCInfo.getNextStackOffset();
1934 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
1935 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1936 TII.get(AdjStackDown))
1940 for (
unsigned i = 0,
e = ArgLocs.size();
i !=
e; ++
i) {
1947 "We don't handle NEON/vector parameters yet.");
1954 Arg = ARMEmitIntExt(ArgVT,
Arg, DestVT,
false);
1955 assert(
Arg != 0 &&
"Failed to emit a sext");
1963 Arg = ARMEmitIntExt(ArgVT,
Arg, DestVT,
true);
1964 assert(
Arg != 0 &&
"Failed to emit a zext");
1970 assert(BC != 0 &&
"Failed to emit a bitcast!");
1980 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1986 "Custom lowering for v2f64 args not available");
1992 "We only handle register args!");
1994 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2005 if (isa<UndefValue>(ArgVal))
2009 Addr.BaseType = Address::RegBase;
2010 Addr.Base.Reg = ARM::SP;
2013 bool EmitRet = ARMEmitStore(ArgVT,
Arg,
Addr); (void)EmitRet;
2014 assert(EmitRet &&
"Could not emit a store for argument!");
2023 unsigned &NumBytes,
bool isVarArg) {
2025 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2026 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2027 TII.get(AdjStackUp))
2034 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true, isVarArg));
2037 if (RVLocs.size() == 2 && RetVT ==
MVT::f64) {
2040 MVT DestVT = RVLocs[0].getValVT();
2042 Register ResultReg = createResultReg(DstRC);
2043 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2045 .
addReg(RVLocs[0].getLocReg())
2046 .
addReg(RVLocs[1].getLocReg()));
2048 UsedRegs.push_back(RVLocs[0].getLocReg());
2049 UsedRegs.push_back(RVLocs[1].getLocReg());
2052 updateValueMap(
I, ResultReg);
2054 assert(RVLocs.size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2055 MVT CopyVT = RVLocs[0].getValVT();
2063 Register ResultReg = createResultReg(DstRC);
2064 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2065 TII.get(TargetOpcode::COPY),
2066 ResultReg).
addReg(RVLocs[0].getLocReg());
2067 UsedRegs.push_back(RVLocs[0].getLocReg());
2070 updateValueMap(
I, ResultReg);
2079 const Function &
F = *
I->getParent()->getParent();
2080 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2082 if (!FuncInfo.CanLowerReturn)
2086 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2096 if (
Ret->getNumOperands() > 0) {
2102 CCState CCInfo(
CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2103 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(
CC,
true ,
2106 const Value *RV =
Ret->getOperand(0);
2112 if (ValLocs.size() != 1)
2126 if (!RVEVT.
isSimple())
return false;
2130 if (RVVT != DestVT) {
2138 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
2139 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].
Flags.isZExt());
2140 if (SrcReg == 0)
return false;
2150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2151 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2160 RetOpc = ARM::tBXNS_RET;
2164 RetOpc = Subtarget->getReturnOpcode();
2168 AddOptionalDefs(MIB);
2169 for (
unsigned R : RetRegs)
2174 unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2178 return isThumb2 ? ARM::tBL :
ARM::BL;
2181 unsigned ARMFastISel::getLibcallReg(
const Twine &Name) {
2192 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2206 Type *RetTy =
I->getType();
2210 else if (!isTypeLegal(RetTy, RetVT))
2217 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true,
false));
2218 if (RVLocs.size() >= 2 && RetVT !=
MVT::f64)
2227 Args.reserve(
I->getNumOperands());
2228 ArgRegs.
reserve(
I->getNumOperands());
2229 ArgVTs.
reserve(
I->getNumOperands());
2230 ArgFlags.
reserve(
I->getNumOperands());
2233 if (
Arg == 0)
return false;
2235 Type *ArgTy =
Op->getType();
2237 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2240 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2243 ArgRegs.push_back(
Arg);
2244 ArgVTs.push_back(ArgVT);
2245 ArgFlags.push_back(Flags);
2251 if (!ProcessCallArgs(
Args, ArgRegs, ArgVTs, ArgFlags,
2252 RegArgs,
CC, NumBytes,
false))
2256 if (Subtarget->genLongCalls()) {
2258 if (CalleeReg == 0)
return false;
2262 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2264 MIMD,
TII.get(CallOpc));
2268 if (Subtarget->genLongCalls()) {
2285 if (!FinishCall(RetVT, UsedRegs,
I,
CC, NumBytes,
false))
return false;
2288 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2294 const char *IntrMemName =
nullptr) {
2299 if (isa<InlineAsm>(Callee))
return false;
2313 Type *RetTy =
I->getType();
2317 else if (!isTypeLegal(RetTy, RetVT) && RetVT !=
MVT::i16 &&
2326 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true, isVarArg));
2327 if (RVLocs.size() >= 2 && RetVT !=
MVT::f64)
2336 unsigned arg_size = CI->
arg_size();
2337 Args.reserve(arg_size);
2341 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2344 if (IntrMemName && ArgE - ArgI <= 1)
2348 unsigned ArgIdx = ArgI - CI->
arg_begin();
2363 Type *ArgTy = (*ArgI)->getType();
2365 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT !=
MVT::i16 && ArgVT !=
MVT::i8 &&
2373 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2375 Args.push_back(*ArgI);
2376 ArgRegs.push_back(
Arg);
2377 ArgVTs.push_back(ArgVT);
2378 ArgFlags.push_back(Flags);
2384 if (!ProcessCallArgs(
Args, ArgRegs, ArgVTs, ArgFlags,
2385 RegArgs,
CC, NumBytes, isVarArg))
2389 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2390 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2395 CalleeReg = getLibcallReg(IntrMemName);
2397 CalleeReg = getRegForValue(Callee);
2399 if (CalleeReg == 0)
return false;
2403 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2405 MIMD,
TII.get(CallOpc));
2414 }
else if (!IntrMemName)
2429 if (!FinishCall(RetVT, UsedRegs,
I,
CC, NumBytes, isVarArg))
2433 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2438 bool ARMFastISel::ARMIsMemCpySmall(
uint64_t Len) {
2442 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
2445 if (!ARMIsMemCpySmall(Len))
2450 if (!Alignment || *Alignment >= 4) {
2456 assert(Len == 1 &&
"Expected a length of 1!");
2460 assert(Alignment &&
"Alignment is set in this branch");
2462 if (Len >= 2 && *Alignment == 2)
2471 RV = ARMEmitLoad(VT, ResultReg, Src);
2472 assert(RV &&
"Should be able to handle this load.");
2473 RV = ARMEmitStore(VT, ResultReg, Dest);
2474 assert(RV &&
"Should be able to handle this store.");
2479 Dest.Offset +=
Size;
2488 switch (
I.getIntrinsicID()) {
2489 default:
return false;
2490 case Intrinsic::frameaddress: {
2494 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2496 : &ARM::GPRRegClass;
2509 unsigned Depth = cast<ConstantInt>(
I.getOperand(0))->getZExtValue();
2511 DestReg = createResultReg(RC);
2512 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2513 TII.get(LdrOpc), DestReg)
2517 updateValueMap(&
I, SrcReg);
2521 case Intrinsic::memmove: {
2530 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2534 if (ARMIsMemCpySmall(Len)) {
2536 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2543 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2554 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2555 return SelectCall(&
I, IntrMemName);
2557 case Intrinsic::memset: {
2569 return SelectCall(&
I,
"memset");
2571 case Intrinsic::trap: {
2573 if (Subtarget->isThumb())
2574 Opcode = ARM::tTRAP;
2576 Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl :
ARM::TRAP;
2577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode));
2598 if (!SrcReg)
return false;
2602 updateValueMap(
I, SrcReg);
2606 unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2615 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2619 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2620 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2621 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2630 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2631 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2635 static const struct InstructionTable {
2640 }
IT[2][2][3][2] = {
2682 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2683 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2684 "other sizes unimplemented");
2685 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2686 "other sizes unimplemented");
2688 bool hasV6Ops = Subtarget->hasV6Ops();
2689 unsigned Bitness = SrcBits / 8;
2690 assert((Bitness < 3) &&
"sanity-check table bounds");
2692 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2694 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2695 unsigned Opc = ITP->Opc;
2696 assert(ARM::KILL != Opc &&
"Invalid table entry");
2697 unsigned hasS = ITP->hasS;
2700 "only MOVsi has shift operand addressing mode");
2701 unsigned Imm = ITP->Imm;
2704 bool setsCPSR = &ARM::tGPRRegClass == RC;
2705 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2720 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2721 for (
unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2722 ResultReg = createResultReg(RC);
2723 bool isLsl = (0 == Instr) && !isSingleInstr;
2724 unsigned Opcode = isLsl ? LSLOpc : Opc;
2727 bool isKill = 1 == Instr;
2729 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode), ResultReg);
2748 Type *DestTy =
I->getType();
2749 Value *Src =
I->getOperand(0);
2750 Type *SrcTy = Src->getType();
2752 bool isZExt = isa<ZExtInst>(
I);
2753 Register SrcReg = getRegForValue(Src);
2754 if (!SrcReg)
return false;
2756 EVT SrcEVT, DestEVT;
2759 if (!SrcEVT.
isSimple())
return false;
2760 if (!DestEVT.
isSimple())
return false;
2764 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2765 if (ResultReg == 0)
return false;
2766 updateValueMap(
I, ResultReg);
2782 unsigned Opc = ARM::MOVsr;
2784 Value *Src2Value =
I->getOperand(1);
2785 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2786 ShiftImm = CI->getZExtValue();
2790 if (ShiftImm == 0 || ShiftImm >=32)
2796 Value *Src1Value =
I->getOperand(0);
2797 Register Reg1 = getRegForValue(Src1Value);
2798 if (Reg1 == 0)
return false;
2801 if (Opc == ARM::MOVsr) {
2802 Reg2 = getRegForValue(Src2Value);
2803 if (Reg2 == 0)
return false;
2806 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2807 if(ResultReg == 0)
return false;
2810 TII.get(Opc), ResultReg)
2813 if (Opc == ARM::MOVsi)
2815 else if (Opc == ARM::MOVsr) {
2820 AddOptionalDefs(MIB);
2821 updateValueMap(
I, ResultReg);
2826 bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2827 switch (
I->getOpcode()) {
2829 return SelectLoad(
I);
2831 return SelectStore(
I);
2832 case Instruction::Br:
2833 return SelectBranch(
I);
2834 case Instruction::IndirectBr:
2835 return SelectIndirectBr(
I);
2836 case Instruction::ICmp:
2837 case Instruction::FCmp:
2838 return SelectCmp(
I);
2839 case Instruction::FPExt:
2840 return SelectFPExt(
I);
2841 case Instruction::FPTrunc:
2842 return SelectFPTrunc(
I);
2843 case Instruction::SIToFP:
2844 return SelectIToFP(
I,
true);
2845 case Instruction::UIToFP:
2846 return SelectIToFP(
I,
false);
2847 case Instruction::FPToSI:
2848 return SelectFPToI(
I,
true);
2849 case Instruction::FPToUI:
2850 return SelectFPToI(
I,
false);
2853 case Instruction::Or:
2854 return SelectBinaryIntOp(
I,
ISD::OR);
2855 case Instruction::Sub:
2857 case Instruction::FAdd:
2859 case Instruction::FSub:
2861 case Instruction::FMul:
2863 case Instruction::SDiv:
2864 return SelectDiv(
I,
true);
2865 case Instruction::UDiv:
2866 return SelectDiv(
I,
false);
2867 case Instruction::SRem:
2868 return SelectRem(
I,
true);
2869 case Instruction::URem:
2870 return SelectRem(
I,
false);
2873 return SelectIntrinsicCall(*II);
2874 return SelectCall(
I);
2876 return SelectSelect(
I);
2878 return SelectRet(
I);
2879 case Instruction::Trunc:
2880 return SelectTrunc(
I);
2881 case Instruction::ZExt:
2882 case Instruction::SExt:
2883 return SelectIntExt(
I);
2884 case Instruction::Shl:
2886 case Instruction::LShr:
2888 case Instruction::AShr:
2907 { { ARM::ANDri, ARM::t2ANDri }, 255, 1,
MVT::i8 },
2916 bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
2920 if (!isLoadTypeLegal(LI->
getType(), VT))
2927 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
2934 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
2938 isZExt = FLE.isZExt;
2941 if (!Found)
return false;
2947 Register ResultReg =
MI->getOperand(0).getReg();
2948 if (!ARMEmitLoad(VT, ResultReg,
Addr, LI->
getAlign(), isZExt,
false))
2951 removeDeadCode(
I, std::next(
I));
2955 unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
MVT VT) {
2956 bool UseGOT_PREL = !
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV);
2960 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2968 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2973 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2974 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2976 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), TempReg)
2979 if (Opc == ARM::LDRcp)
2985 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2988 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), DestReg)
2990 .
addImm(ARMPCLabelIndex);
2992 if (!Subtarget->isThumb())
2995 if (UseGOT_PREL && Subtarget->isThumb()) {
2997 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2998 TII.get(ARM::t2LDRi12), NewDestReg)
3001 DestReg = NewDestReg;
3002 AddOptionalDefs(MIB);
3007 bool ARMFastISel::fastLowerArguments() {
3008 if (!FuncInfo.CanLowerReturn)
3032 if (
Arg.getArgNo() >= 4)
3035 if (
Arg.hasAttribute(Attribute::InReg) ||
3036 Arg.hasAttribute(Attribute::StructRet) ||
3037 Arg.hasAttribute(Attribute::SwiftSelf) ||
3038 Arg.hasAttribute(Attribute::SwiftError) ||
3039 Arg.hasAttribute(Attribute::ByVal))
3047 if (!ArgVT.
isSimple())
return false;
3059 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
3064 unsigned ArgNo =
Arg.getArgNo();
3066 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3070 Register ResultReg = createResultReg(RC);
3071 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3072 TII.get(TargetOpcode::COPY),
3074 updateValueMap(&
Arg, ResultReg);
3085 return new ARMFastISel(funcInfo, libInfo);