107 class ARMFastISel final :
public FastISel {
128 TM(funcInfo.MF->getTarget()),
TII(*Subtarget->getInstrInfo()),
129 TLI(*Subtarget->getTargetLowering()) {
138 unsigned fastEmitInst_r(
unsigned MachineInstOpcode,
140 unsigned fastEmitInst_rr(
unsigned MachineInstOpcode,
142 unsigned Op0,
unsigned Op1);
143 unsigned fastEmitInst_ri(
unsigned MachineInstOpcode,
146 unsigned fastEmitInst_i(
unsigned MachineInstOpcode,
152 bool fastSelectInstruction(
const Instruction *
I)
override;
153 unsigned fastMaterializeConstant(
const Constant *
C)
override;
154 unsigned fastMaterializeAlloca(
const AllocaInst *AI)
override;
157 bool fastLowerArguments()
override;
159 #include "ARMGenFastISel.inc"
170 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
171 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
176 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
186 bool isPositionIndependent()
const;
187 bool isTypeLegal(
Type *Ty,
MVT &VT);
188 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
189 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
192 unsigned Alignment = 0,
bool isZExt =
true,
193 bool allocReg =
true);
194 bool ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
195 unsigned Alignment = 0);
196 bool ARMComputeAddress(
const Value *Obj, Address &
Addr);
197 void ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3);
198 bool ARMIsMemCpySmall(
uint64_t Len);
199 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
201 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
205 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
206 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
207 unsigned ARMSelectCallOp(
bool UseReg);
225 unsigned getLibcallReg(
const Twine &
Name);
228 unsigned &NumBytes,
bool isVarArg);
236 void AddLoadStoreOperands(
MVT VT, Address &
Addr,
246 bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
247 if (!
MI->hasOptionalDef())
252 if (!MO.isReg() || !MO.isDef())
continue;
253 if (MO.getReg() == ARM::CPSR)
265 return MI->isPredicable();
268 if (opInfo.isPredicate())
286 if (isARMNEONPred(
MI))
292 if (DefinesOptionalPredicate(
MI, &CPSR))
297 unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
300 Register ResultReg = createResultReg(RC);
307 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
310 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
312 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
313 TII.get(TargetOpcode::COPY), ResultReg)
319 unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
321 unsigned Op0,
unsigned Op1) {
322 Register ResultReg = createResultReg(RC);
332 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
336 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
339 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
340 TII.get(TargetOpcode::COPY), ResultReg)
346 unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
349 Register ResultReg = createResultReg(RC);
357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
361 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
364 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
365 TII.get(TargetOpcode::COPY), ResultReg)
371 unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
374 Register ResultReg = createResultReg(RC);
378 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
381 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
383 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
384 TII.get(TargetOpcode::COPY), ResultReg)
392 unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
396 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
402 unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
406 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
415 unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
432 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
438 if (!Subtarget->hasVFP2Base())
return false;
442 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
444 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
448 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg)
454 unsigned ARMFastISel::ARMMaterializeInt(
const Constant *
C,
MVT VT) {
462 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
465 Register ImmReg = createResultReg(RC);
466 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
467 TII.get(Opc), ImmReg)
478 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
481 Register ImmReg = createResultReg(RC);
482 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
483 TII.get(Opc), ImmReg)
489 unsigned ResultReg = 0;
490 if (Subtarget->useMovt())
502 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
505 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
506 TII.get(ARM::t2LDRpci), ResultReg)
511 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
512 TII.get(ARM::LDRcp), ResultReg)
519 bool ARMFastISel::isPositionIndependent()
const {
523 unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
528 if (Subtarget->isROPI() || Subtarget->isRWPI())
531 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
534 Register DestReg = createResultReg(RC);
539 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
541 bool IsPositionIndependent = isPositionIndependent();
544 if (Subtarget->useMovt() &&
545 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
547 unsigned char TF = 0;
548 if (Subtarget->isTargetMachO())
551 if (IsPositionIndependent)
552 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
554 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
555 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
561 if (Subtarget->isTargetELF() && IsPositionIndependent)
562 return ARMLowerPICELF(GV, VT);
565 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
570 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
575 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
576 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc),
578 if (IsPositionIndependent)
580 AddOptionalDefs(MIB);
584 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
585 TII.get(ARM::LDRcp), DestReg)
588 AddOptionalDefs(MIB);
590 if (IsPositionIndependent) {
591 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
595 DbgLoc,
TII.get(Opc), NewDestReg)
598 AddOptionalDefs(MIB);
604 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
605 (Subtarget->isTargetMachO() && IsIndirect) ||
606 Subtarget->genLongCalls()) {
610 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
611 TII.get(ARM::t2LDRi12), NewDestReg)
615 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
616 TII.get(ARM::LDRi12), NewDestReg)
619 DestReg = NewDestReg;
620 AddOptionalDefs(MIB);
626 unsigned ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
633 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
634 return ARMMaterializeFP(CFP, VT);
635 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(
C))
636 return ARMMaterializeGV(GV, VT);
637 else if (isa<ConstantInt>(
C))
638 return ARMMaterializeInt(
C, VT);
645 unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
647 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
650 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
653 FuncInfo.StaticAllocaMap.find(AI);
657 if (
SI != FuncInfo.StaticAllocaMap.end()) {
658 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
660 Register ResultReg = createResultReg(RC);
663 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
664 TII.get(Opc), ResultReg)
673 bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
685 bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
686 if (isTypeLegal(Ty, VT))
return true;
697 bool ARMFastISel::ARMComputeAddress(
const Value *Obj, Address &
Addr) {
699 const User *U =
nullptr;
700 unsigned Opcode = Instruction::UserOp1;
701 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
704 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
705 FuncInfo.MBBMap[
I->getParent()] == FuncInfo.MBB) {
706 Opcode =
I->getOpcode();
709 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(Obj)) {
710 Opcode =
C->getOpcode();
715 if (Ty->getAddressSpace() > 255)
723 case Instruction::BitCast:
726 case Instruction::IntToPtr:
732 case Instruction::PtrToInt:
737 case Instruction::GetElementPtr: {
739 int TmpOffset =
Addr.Offset;
745 i !=
e; ++
i, ++GTI) {
749 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
759 if (canFoldAddIntoGEP(U,
Op)) {
762 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
765 Op = cast<AddOperator>(
Op)->getOperand(0);
769 goto unsupported_gep;
775 Addr.Offset = TmpOffset;
784 case Instruction::Alloca: {
787 FuncInfo.StaticAllocaMap.find(AI);
788 if (
SI != FuncInfo.StaticAllocaMap.end()) {
789 Addr.BaseType = Address::FrameIndexBase;
790 Addr.Base.FI =
SI->second;
798 if (
Addr.Base.Reg == 0)
Addr.Base.Reg = getRegForValue(Obj);
799 return Addr.Base.Reg != 0;
802 void ARMFastISel::ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3) {
803 bool needsLowering =
false;
812 needsLowering = ((
Addr.Offset & 0xfff) !=
Addr.Offset);
814 if (needsLowering && isThumb2)
815 needsLowering = !(Subtarget->hasV6T2Ops() &&
Addr.Offset < 0 &&
819 needsLowering = (
Addr.Offset > 255 ||
Addr.Offset < -255);
825 needsLowering = ((
Addr.Offset & 0xff) !=
Addr.Offset);
832 if (needsLowering &&
Addr.BaseType == Address::FrameIndexBase) {
835 Register ResultReg = createResultReg(RC);
836 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
837 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
838 TII.get(Opc), ResultReg)
841 Addr.Base.Reg = ResultReg;
842 Addr.BaseType = Address::RegBase;
854 void ARMFastISel::AddLoadStoreOperands(
MVT VT, Address &
Addr,
864 if (
Addr.BaseType == Address::FrameIndexBase) {
865 int FI =
Addr.Base.FI;
869 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
897 AddOptionalDefs(MIB);
900 bool ARMFastISel::ARMEmitLoad(
MVT VT,
Register &ResultReg, Address &
Addr,
901 unsigned Alignment,
bool isZExt,
bool allocReg) {
904 bool needVMOV =
false;
908 default:
return false;
912 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
913 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
915 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
924 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
927 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
931 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
932 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
934 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
936 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
939 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
942 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
946 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
953 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
956 if (!Subtarget->hasVFP2Base())
return false;
958 if (Alignment && Alignment < 4) {
961 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
962 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
970 if (!Subtarget->hasVFP2Base())
return false;
973 if (Alignment && Alignment < 4)
981 ARMSimplifyAddress(
Addr, VT, useAM3);
985 ResultReg = createResultReg(RC);
986 assert(ResultReg > 255 &&
"Expected an allocated virtual register.");
988 TII.get(Opc), ResultReg);
995 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1005 if (cast<LoadInst>(
I)->isAtomic())
1008 const Value *SV =
I->getOperand(0);
1012 if (
const Argument *
Arg = dyn_cast<Argument>(SV)) {
1013 if (
Arg->hasSwiftErrorAttr())
1017 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1018 if (Alloca->isSwiftError())
1025 if (!isLoadTypeLegal(
I->getType(), VT))
1030 if (!ARMComputeAddress(
I->getOperand(0),
Addr))
return false;
1035 updateValueMap(
I, ResultReg);
1039 bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
1040 unsigned Alignment) {
1042 bool useAM3 =
false;
1045 default:
return false;
1047 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1048 : &ARM::GPRRegClass);
1049 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1051 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1059 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1060 StrOpc = ARM::t2STRBi8;
1062 StrOpc = ARM::t2STRBi12;
1064 StrOpc = ARM::STRBi12;
1068 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1072 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1073 StrOpc = ARM::t2STRHi8;
1075 StrOpc = ARM::t2STRHi12;
1082 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1086 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1087 StrOpc = ARM::t2STRi8;
1089 StrOpc = ARM::t2STRi12;
1091 StrOpc = ARM::STRi12;
1095 if (!Subtarget->hasVFP2Base())
return false;
1097 if (Alignment && Alignment < 4) {
1099 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1104 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1106 StrOpc = ARM::VSTRS;
1111 if (!Subtarget->hasVFP2Base())
return false;
1114 if (Alignment && Alignment < 4)
1117 StrOpc = ARM::VSTRD;
1121 ARMSimplifyAddress(
Addr, VT, useAM3);
1133 Value *Op0 =
I->getOperand(0);
1134 unsigned SrcReg = 0;
1137 if (cast<StoreInst>(
I)->isAtomic())
1140 const Value *PtrV =
I->getOperand(1);
1144 if (
const Argument *
Arg = dyn_cast<Argument>(PtrV)) {
1145 if (
Arg->hasSwiftErrorAttr())
1149 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1150 if (Alloca->isSwiftError())
1157 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1161 SrcReg = getRegForValue(Op0);
1162 if (SrcReg == 0)
return false;
1166 if (!ARMComputeAddress(
I->getOperand(1),
Addr))
1231 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1235 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1249 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1250 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1252 finishCondBranch(BI->
getParent(), TBB, FBB);
1257 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1258 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1259 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1260 Register OpReg = getRegForValue(TI->getOperand(0));
1262 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1267 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1272 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1273 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1276 finishCondBranch(BI->
getParent(), TBB, FBB);
1283 fastEmitBranch(
Target, DbgLoc);
1288 if (CmpReg == 0)
return false;
1297 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1300 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
1305 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1310 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1313 finishCondBranch(BI->
getParent(), TBB, FBB);
1317 bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1318 Register AddrReg = getRegForValue(
I->getOperand(0));
1319 if (AddrReg == 0)
return false;
1321 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1322 assert(isThumb2 || Subtarget->hasV4TOps());
1324 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1329 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
1334 bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1338 if (!SrcEVT.
isSimple())
return false;
1341 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1344 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1350 bool UseImm =
false;
1351 bool isNegativeImm =
false;
1354 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1357 const APInt &CIVal = ConstInt->getValue();
1362 if (
Imm < 0 &&
Imm != (
int)0x80000000) {
1363 isNegativeImm =
true;
1369 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1371 if (ConstFP->isZero() && !ConstFP->isNegative())
1377 bool needsExt =
false;
1379 default:
return false;
1383 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1387 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1397 CmpOpc = ARM::t2CMPrr;
1399 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1402 CmpOpc = ARM::CMPrr;
1404 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1409 Register SrcReg1 = getRegForValue(Src1Value);
1410 if (SrcReg1 == 0)
return false;
1412 unsigned SrcReg2 = 0;
1414 SrcReg2 = getRegForValue(Src2Value);
1415 if (SrcReg2 == 0)
return false;
1420 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1,
MVT::i32, isZExt);
1421 if (SrcReg1 == 0)
return false;
1423 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2,
MVT::i32, isZExt);
1424 if (SrcReg2 == 0)
return false;
1432 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1436 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1442 AddOptionalDefs(MIB);
1448 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1454 const CmpInst *CI = cast<CmpInst>(
I);
1468 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1470 : &ARM::GPRRegClass;
1471 Register DestReg = createResultReg(RC);
1473 unsigned ZeroReg = fastMaterializeConstant(Zero);
1475 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc), DestReg)
1479 updateValueMap(
I, DestReg);
1485 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1487 Value *V =
I->getOperand(0);
1488 if (!
I->getType()->isDoubleTy() ||
1492 if (
Op == 0)
return false;
1495 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1496 TII.get(ARM::VCVTDS), Result)
1498 updateValueMap(
I, Result);
1502 bool ARMFastISel::SelectFPTrunc(
const Instruction *
I) {
1504 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1506 Value *V =
I->getOperand(0);
1507 if (!(
I->getType()->isFloatTy() &&
1511 if (
Op == 0)
return false;
1514 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1515 TII.get(ARM::VCVTSD), Result)
1517 updateValueMap(
I, Result);
1521 bool ARMFastISel::SelectIToFP(
const Instruction *
I,
bool isSigned) {
1523 if (!Subtarget->hasVFP2Base())
return false;
1526 Type *Ty =
I->getType();
1527 if (!isTypeLegal(Ty, DstVT))
1530 Value *Src =
I->getOperand(0);
1538 Register SrcReg = getRegForValue(Src);
1539 if (SrcReg == 0)
return false;
1543 SrcReg = ARMEmitIntExt(SrcVT, SrcReg,
MVT::i32,
1545 if (SrcReg == 0)
return false;
1550 unsigned FP = ARMMoveToFPReg(
MVT::f32, SrcReg);
1551 if (FP == 0)
return false;
1554 if (Ty->
isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1555 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1556 Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1560 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1562 updateValueMap(
I, ResultReg);
1566 bool ARMFastISel::SelectFPToI(
const Instruction *
I,
bool isSigned) {
1568 if (!Subtarget->hasVFP2Base())
return false;
1571 Type *RetTy =
I->getType();
1572 if (!isTypeLegal(RetTy, DstVT))
1576 if (
Op == 0)
return false;
1579 Type *OpTy =
I->getOperand(0)->getType();
1580 if (OpTy->
isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1581 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1582 Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1587 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1592 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1593 if (IntReg == 0)
return false;
1595 updateValueMap(
I, IntReg);
1601 if (!isTypeLegal(
I->getType(), VT))
1607 Register CondReg = getRegForValue(
I->getOperand(0));
1608 if (CondReg == 0)
return false;
1609 Register Op1Reg = getRegForValue(
I->getOperand(1));
1610 if (Op1Reg == 0)
return false;
1614 bool UseImm =
false;
1615 bool isNegativeImm =
false;
1616 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(
I->getOperand(2))) {
1618 Imm = (
int)ConstInt->getValue().getZExtValue();
1620 isNegativeImm =
true;
1627 unsigned Op2Reg = 0;
1629 Op2Reg = getRegForValue(
I->getOperand(2));
1630 if (Op2Reg == 0)
return false;
1633 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1636 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
1643 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1644 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1646 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1648 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1650 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1652 Register ResultReg = createResultReg(RC);
1656 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc),
1664 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc),
1671 updateValueMap(
I, ResultReg);
1675 bool ARMFastISel::SelectDiv(
const Instruction *
I,
bool isSigned) {
1677 Type *Ty =
I->getType();
1678 if (!isTypeLegal(Ty, VT))
1684 if (Subtarget->hasDivideInThumbMode())
1690 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1692 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1694 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1696 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1698 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1699 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1701 return ARMEmitLibcall(
I, LC);
1704 bool ARMFastISel::SelectRem(
const Instruction *
I,
bool isSigned) {
1706 Type *Ty =
I->getType();
1707 if (!isTypeLegal(Ty, VT))
1719 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1721 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1723 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1725 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1727 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1728 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1730 return ARMEmitLibcall(
I, LC);
1733 bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1742 switch (ISDOpcode) {
1743 default:
return false;
1745 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1748 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1751 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1755 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1756 if (SrcReg1 == 0)
return false;
1760 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1761 if (SrcReg2 == 0)
return false;
1763 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1766 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1767 TII.get(Opc), ResultReg)
1769 updateValueMap(
I, ResultReg);
1773 bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1775 if (!FPVT.
isSimple())
return false;
1786 Type *Ty =
I->getType();
1787 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1789 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1794 switch (ISDOpcode) {
1795 default:
return false;
1797 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1800 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1803 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1806 Register Op1 = getRegForValue(
I->getOperand(0));
1807 if (Op1 == 0)
return false;
1809 Register Op2 = getRegForValue(
I->getOperand(1));
1810 if (Op2 == 0)
return false;
1813 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1814 TII.get(Opc), ResultReg)
1816 updateValueMap(
I, ResultReg);
1831 if (Subtarget->hasVFP2Base() && !isVarArg) {
1832 if (!Subtarget->isAAPCS_ABI())
1841 if (Subtarget->isAAPCS_ABI()) {
1842 if (Subtarget->hasVFP2Base() &&
1882 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1883 CCAssignFnForCall(CC,
false, isVarArg));
1887 for (
unsigned i = 0,
e = ArgLocs.size();
i !=
e; ++
i) {
1902 !VA.
isRegLoc() || !ArgLocs[++
i].isRegLoc())
1914 if (!Subtarget->hasVFP2Base())
1918 if (!Subtarget->hasVFP2Base())
1928 NumBytes = CCInfo.getNextStackOffset();
1931 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
1932 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1933 TII.get(AdjStackDown))
1937 for (
unsigned i = 0,
e = ArgLocs.size();
i !=
e; ++
i) {
1944 "We don't handle NEON/vector parameters yet.");
1951 Arg = ARMEmitIntExt(ArgVT,
Arg, DestVT,
false);
1952 assert(
Arg != 0 &&
"Failed to emit a sext");
1960 Arg = ARMEmitIntExt(ArgVT,
Arg, DestVT,
true);
1961 assert(
Arg != 0 &&
"Failed to emit a zext");
1967 assert(BC != 0 &&
"Failed to emit a bitcast!");
1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1983 "Custom lowering for v2f64 args not available");
1989 "We only handle register args!");
1991 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2002 if (isa<UndefValue>(ArgVal))
2006 Addr.BaseType = Address::RegBase;
2007 Addr.Base.Reg = ARM::SP;
2010 bool EmitRet = ARMEmitStore(ArgVT,
Arg,
Addr); (void)EmitRet;
2011 assert(EmitRet &&
"Could not emit a store for argument!");
2020 unsigned &NumBytes,
bool isVarArg) {
2022 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2023 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2024 TII.get(AdjStackUp))
2031 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true, isVarArg));
2034 if (RVLocs.size() == 2 && RetVT ==
MVT::f64) {
2037 MVT DestVT = RVLocs[0].getValVT();
2039 Register ResultReg = createResultReg(DstRC);
2040 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2042 .
addReg(RVLocs[0].getLocReg())
2043 .
addReg(RVLocs[1].getLocReg()));
2045 UsedRegs.push_back(RVLocs[0].getLocReg());
2046 UsedRegs.push_back(RVLocs[1].getLocReg());
2049 updateValueMap(
I, ResultReg);
2051 assert(RVLocs.size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2052 MVT CopyVT = RVLocs[0].getValVT();
2060 Register ResultReg = createResultReg(DstRC);
2061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2062 TII.get(TargetOpcode::COPY),
2063 ResultReg).
addReg(RVLocs[0].getLocReg());
2064 UsedRegs.push_back(RVLocs[0].getLocReg());
2067 updateValueMap(
I, ResultReg);
2076 const Function &
F = *
I->getParent()->getParent();
2077 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2079 if (!FuncInfo.CanLowerReturn)
2083 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2093 if (
Ret->getNumOperands() > 0) {
2099 CCState CCInfo(CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2100 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC,
true ,
2103 const Value *RV =
Ret->getOperand(0);
2109 if (ValLocs.size() != 1)
2123 if (!RVEVT.
isSimple())
return false;
2127 if (RVVT != DestVT) {
2135 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2136 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2137 if (SrcReg == 0)
return false;
2147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2148 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2157 RetOpc = ARM::tBXNS_RET;
2161 RetOpc = Subtarget->getReturnOpcode();
2165 AddOptionalDefs(MIB);
2166 for (
unsigned R : RetRegs)
2171 unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2175 return isThumb2 ? ARM::tBL :
ARM::BL;
2178 unsigned ARMFastISel::getLibcallReg(
const Twine &
Name) {
2189 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2203 Type *RetTy =
I->getType();
2207 else if (!isTypeLegal(RetTy, RetVT))
2214 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true,
false));
2215 if (RVLocs.size() >= 2 && RetVT !=
MVT::f64)
2224 Args.reserve(
I->getNumOperands());
2225 ArgRegs.
reserve(
I->getNumOperands());
2226 ArgVTs.
reserve(
I->getNumOperands());
2227 ArgFlags.
reserve(
I->getNumOperands());
2230 if (
Arg == 0)
return false;
2232 Type *ArgTy =
Op->getType();
2234 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2240 ArgRegs.push_back(
Arg);
2241 ArgVTs.push_back(ArgVT);
2242 ArgFlags.push_back(Flags);
2248 if (!ProcessCallArgs(
Args, ArgRegs, ArgVTs, ArgFlags,
2249 RegArgs, CC, NumBytes,
false))
2253 if (Subtarget->genLongCalls()) {
2255 if (CalleeReg == 0)
return false;
2259 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2261 DbgLoc,
TII.get(CallOpc));
2265 if (Subtarget->genLongCalls()) {
2282 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes,
false))
return false;
2285 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2291 const char *IntrMemName =
nullptr) {
2296 if (isa<InlineAsm>(Callee))
return false;
2310 Type *RetTy =
I->getType();
2314 else if (!isTypeLegal(RetTy, RetVT) && RetVT !=
MVT::i16 &&
2323 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true, isVarArg));
2324 if (RVLocs.size() >= 2 && RetVT !=
MVT::f64)
2333 unsigned arg_size = CI->
arg_size();
2334 Args.reserve(arg_size);
2338 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2341 if (IntrMemName && ArgE - ArgI <= 1)
2345 unsigned ArgIdx = ArgI - CI->
arg_begin();
2360 Type *ArgTy = (*ArgI)->getType();
2362 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT !=
MVT::i16 && ArgVT !=
MVT::i8 &&
2372 Args.push_back(*ArgI);
2373 ArgRegs.push_back(
Arg);
2374 ArgVTs.push_back(ArgVT);
2375 ArgFlags.push_back(Flags);
2381 if (!ProcessCallArgs(
Args, ArgRegs, ArgVTs, ArgFlags,
2382 RegArgs, CC, NumBytes, isVarArg))
2386 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2387 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2392 CalleeReg = getLibcallReg(IntrMemName);
2394 CalleeReg = getRegForValue(Callee);
2396 if (CalleeReg == 0)
return false;
2400 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2402 DbgLoc,
TII.get(CallOpc));
2411 }
else if (!IntrMemName)
2426 if (!FinishCall(RetVT, UsedRegs,
I, CC, NumBytes, isVarArg))
2430 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2435 bool ARMFastISel::ARMIsMemCpySmall(
uint64_t Len) {
2439 bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
2440 uint64_t Len,
unsigned Alignment) {
2442 if (!ARMIsMemCpySmall(Len))
2447 if (!Alignment || Alignment >= 4) {
2453 assert(Len == 1 &&
"Expected a length of 1!");
2458 if (Len >= 2 && Alignment == 2)
2467 RV = ARMEmitLoad(VT, ResultReg, Src);
2468 assert(RV &&
"Should be able to handle this load.");
2469 RV = ARMEmitStore(VT, ResultReg, Dest);
2470 assert(RV &&
"Should be able to handle this store.");
2475 Dest.Offset +=
Size;
2484 switch (
I.getIntrinsicID()) {
2485 default:
return false;
2486 case Intrinsic::frameaddress: {
2490 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2492 : &ARM::GPRRegClass;
2505 unsigned Depth = cast<ConstantInt>(
I.getOperand(0))->getZExtValue();
2507 DestReg = createResultReg(RC);
2508 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2509 TII.get(LdrOpc), DestReg)
2513 updateValueMap(&
I, SrcReg);
2517 case Intrinsic::memmove: {
2526 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2530 if (ARMIsMemCpySmall(Len)) {
2532 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2537 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2548 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2549 return SelectCall(&
I, IntrMemName);
2551 case Intrinsic::memset: {
2563 return SelectCall(&
I,
"memset");
2565 case Intrinsic::trap: {
2567 if (Subtarget->isThumb())
2568 Opcode = ARM::tTRAP;
2570 Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl :
ARM::TRAP;
2571 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opcode));
2592 if (!SrcReg)
return false;
2596 updateValueMap(
I, SrcReg);
2600 unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2609 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2613 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2614 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2615 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2624 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2625 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2629 static const struct InstructionTable {
2634 }
IT[2][2][3][2] = {
2676 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2677 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2678 "other sizes unimplemented");
2679 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2680 "other sizes unimplemented");
2682 bool hasV6Ops = Subtarget->hasV6Ops();
2683 unsigned Bitness = SrcBits / 8;
2684 assert((Bitness < 3) &&
"sanity-check table bounds");
2686 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2688 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2689 unsigned Opc = ITP->Opc;
2690 assert(ARM::KILL != Opc &&
"Invalid table entry");
2691 unsigned hasS = ITP->hasS;
2694 "only MOVsi has shift operand addressing mode");
2695 unsigned Imm = ITP->Imm;
2698 bool setsCPSR = &ARM::tGPRRegClass == RC;
2699 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2714 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2715 for (
unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2716 ResultReg = createResultReg(RC);
2717 bool isLsl = (0 == Instr) && !isSingleInstr;
2718 unsigned Opcode = isLsl ? LSLOpc : Opc;
2721 bool isKill = 1 == Instr;
2723 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opcode), ResultReg);
2742 Type *DestTy =
I->getType();
2743 Value *Src =
I->getOperand(0);
2744 Type *SrcTy = Src->getType();
2746 bool isZExt = isa<ZExtInst>(
I);
2747 Register SrcReg = getRegForValue(Src);
2748 if (!SrcReg)
return false;
2750 EVT SrcEVT, DestEVT;
2753 if (!SrcEVT.
isSimple())
return false;
2754 if (!DestEVT.
isSimple())
return false;
2758 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2759 if (ResultReg == 0)
return false;
2760 updateValueMap(
I, ResultReg);
2776 unsigned Opc = ARM::MOVsr;
2778 Value *Src2Value =
I->getOperand(1);
2779 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2780 ShiftImm = CI->getZExtValue();
2784 if (ShiftImm == 0 || ShiftImm >=32)
2790 Value *Src1Value =
I->getOperand(0);
2791 Register Reg1 = getRegForValue(Src1Value);
2792 if (Reg1 == 0)
return false;
2795 if (Opc == ARM::MOVsr) {
2796 Reg2 = getRegForValue(Src2Value);
2797 if (Reg2 == 0)
return false;
2800 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2801 if(ResultReg == 0)
return false;
2804 TII.get(Opc), ResultReg)
2807 if (Opc == ARM::MOVsi)
2809 else if (Opc == ARM::MOVsr) {
2814 AddOptionalDefs(MIB);
2815 updateValueMap(
I, ResultReg);
2820 bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2821 switch (
I->getOpcode()) {
2823 return SelectLoad(
I);
2825 return SelectStore(
I);
2826 case Instruction::Br:
2827 return SelectBranch(
I);
2828 case Instruction::IndirectBr:
2829 return SelectIndirectBr(
I);
2830 case Instruction::ICmp:
2831 case Instruction::FCmp:
2832 return SelectCmp(
I);
2833 case Instruction::FPExt:
2834 return SelectFPExt(
I);
2835 case Instruction::FPTrunc:
2836 return SelectFPTrunc(
I);
2837 case Instruction::SIToFP:
2838 return SelectIToFP(
I,
true);
2839 case Instruction::UIToFP:
2840 return SelectIToFP(
I,
false);
2841 case Instruction::FPToSI:
2842 return SelectFPToI(
I,
true);
2843 case Instruction::FPToUI:
2844 return SelectFPToI(
I,
false);
2847 case Instruction::Or:
2848 return SelectBinaryIntOp(
I,
ISD::OR);
2849 case Instruction::Sub:
2851 case Instruction::FAdd:
2853 case Instruction::FSub:
2855 case Instruction::FMul:
2857 case Instruction::SDiv:
2858 return SelectDiv(
I,
true);
2859 case Instruction::UDiv:
2860 return SelectDiv(
I,
false);
2861 case Instruction::SRem:
2862 return SelectRem(
I,
true);
2863 case Instruction::URem:
2864 return SelectRem(
I,
false);
2867 return SelectIntrinsicCall(*II);
2868 return SelectCall(
I);
2870 return SelectSelect(
I);
2872 return SelectRet(
I);
2873 case Instruction::Trunc:
2874 return SelectTrunc(
I);
2875 case Instruction::ZExt:
2876 case Instruction::SExt:
2877 return SelectIntExt(
I);
2878 case Instruction::Shl:
2880 case Instruction::LShr:
2882 case Instruction::AShr:
2901 { { ARM::ANDri, ARM::t2ANDri }, 255, 1,
MVT::i8 },
2910 bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
2914 if (!isLoadTypeLegal(LI->
getType(), VT))
2921 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
2928 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
2932 isZExt = FLE.isZExt;
2935 if (!Found)
return false;
2941 Register ResultReg =
MI->getOperand(0).getReg();
2945 removeDeadCode(
I, std::next(
I));
2949 unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
MVT VT) {
2950 bool UseGOT_PREL = !
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV);
2954 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2962 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2967 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2968 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2970 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), TempReg)
2973 if (Opc == ARM::LDRcp)
2979 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2982 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg)
2984 .
addImm(ARMPCLabelIndex);
2986 if (!Subtarget->isThumb())
2989 if (UseGOT_PREL && Subtarget->isThumb()) {
2991 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2992 TII.get(ARM::t2LDRi12), NewDestReg)
2995 DestReg = NewDestReg;
2996 AddOptionalDefs(MIB);
3001 bool ARMFastISel::fastLowerArguments() {
3002 if (!FuncInfo.CanLowerReturn)
3026 if (
Arg.getArgNo() >= 4)
3029 if (
Arg.hasAttribute(Attribute::InReg) ||
3030 Arg.hasAttribute(Attribute::StructRet) ||
3031 Arg.hasAttribute(Attribute::SwiftSelf) ||
3032 Arg.hasAttribute(Attribute::SwiftError) ||
3033 Arg.hasAttribute(Attribute::ByVal))
3041 if (!ArgVT.
isSimple())
return false;
3053 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
3058 unsigned ArgNo =
Arg.getArgNo();
3060 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3064 Register ResultReg = createResultReg(RC);
3065 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3066 TII.get(TargetOpcode::COPY),
3068 updateValueMap(&
Arg, ResultReg);
3079 return new ARMFastISel(funcInfo, libInfo);