107class ARMFastISel final :
public FastISel {
127 TM(funcInfo.MF->getTarget()),
TII(*Subtarget->getInstrInfo()),
128 TLI(*Subtarget->getTargetLowering()) {
141 unsigned Op0,
unsigned Op1);
158#include "ARMGenFastISel.inc"
169 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
170 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
175 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
185 bool isPositionIndependent()
const;
186 bool isTypeLegal(
Type *Ty,
MVT &VT);
187 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
188 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
191 MaybeAlign Alignment = std::nullopt,
bool isZExt =
true,
192 bool allocReg =
true);
193 bool ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
195 bool ARMComputeAddress(
const Value *Obj, Address &
Addr);
196 void ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3);
197 bool ARMIsMemCpySmall(
uint64_t Len);
198 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
200 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
204 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
205 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
206 unsigned ARMSelectCallOp(
bool UseReg);
224 unsigned getLibcallReg(
const Twine &
Name);
227 unsigned &NumBytes,
bool isVarArg);
235 void AddLoadStoreOperands(
MVT VT, Address &
Addr,
245bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
246 if (!
MI->hasOptionalDef())
251 if (!MO.isReg() || !MO.isDef())
continue;
252 if (MO.getReg() == ARM::CPSR)
264 return MI->isPredicable();
267 if (opInfo.isPredicate())
285 if (isARMNEONPred(
MI))
291 if (DefinesOptionalPredicate(
MI, &CPSR))
296unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
299 Register ResultReg = createResultReg(RC);
305 if (
II.getNumDefs() >= 1) {
306 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
309 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
311 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
312 TII.get(TargetOpcode::COPY), ResultReg)
318unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
320 unsigned Op0,
unsigned Op1) {
321 Register ResultReg = createResultReg(RC);
329 if (
II.getNumDefs() >= 1) {
331 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
335 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
338 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
339 TII.get(TargetOpcode::COPY), ResultReg)
345unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
348 Register ResultReg = createResultReg(RC);
354 if (
II.getNumDefs() >= 1) {
356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
360 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
363 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
364 TII.get(TargetOpcode::COPY), ResultReg)
370unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
373 Register ResultReg = createResultReg(RC);
376 if (
II.getNumDefs() >= 1) {
377 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
380 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
382 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
383 TII.get(TargetOpcode::COPY), ResultReg)
391unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
392 if (VT == MVT::f64)
return 0;
395 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
396 TII.get(ARM::VMOVSR), MoveReg)
401unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
402 if (VT == MVT::i64)
return 0;
405 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
406 TII.get(ARM::VMOVRS), MoveReg)
414unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
416 bool is64bit = VT == MVT::f64;
431 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
437 if (!Subtarget->hasVFP2Base())
return false;
441 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
443 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), DestReg)
453unsigned ARMFastISel::ARMMaterializeInt(
const Constant *
C,
MVT VT) {
454 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
460 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->
getZExtValue())) {
461 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
464 Register ImmReg = createResultReg(RC);
465 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
466 TII.get(Opc), ImmReg)
472 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->
isNegative()) {
477 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
480 Register ImmReg = createResultReg(RC);
481 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
482 TII.get(Opc), ImmReg)
488 unsigned ResultReg = 0;
489 if (Subtarget->useMovt())
500 Align Alignment =
DL.getPrefTypeAlign(
C->getType());
501 unsigned Idx = MCP.getConstantPoolIndex(
C, Alignment);
504 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
505 TII.get(ARM::t2LDRpci), ResultReg)
510 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
511 TII.get(ARM::LDRcp), ResultReg)
518bool ARMFastISel::isPositionIndependent()
const {
522unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
527 if (Subtarget->isROPI() || Subtarget->isRWPI())
530 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
533 Register DestReg = createResultReg(RC);
538 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
540 bool IsPositionIndependent = isPositionIndependent();
543 if (Subtarget->useMovt() &&
544 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
546 unsigned char TF = 0;
547 if (Subtarget->isTargetMachO())
550 if (IsPositionIndependent)
551 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
553 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
554 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
560 if (Subtarget->isTargetELF() && IsPositionIndependent)
561 return ARMLowerPICELF(GV, VT);
564 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
569 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
574 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
575 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc),
577 if (IsPositionIndependent)
579 AddOptionalDefs(MIB);
583 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
584 TII.get(ARM::LDRcp), DestReg)
587 AddOptionalDefs(MIB);
589 if (IsPositionIndependent) {
590 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
594 MIMD,
TII.get(Opc), NewDestReg)
597 AddOptionalDefs(MIB);
603 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
604 (Subtarget->isTargetMachO() && IsIndirect)) {
608 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
609 TII.get(ARM::t2LDRi12), NewDestReg)
613 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
614 TII.get(ARM::LDRi12), NewDestReg)
617 DestReg = NewDestReg;
618 AddOptionalDefs(MIB);
624unsigned ARMFastISel::fastMaterializeConstant(
const Constant *
C) {
631 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
632 return ARMMaterializeFP(CFP, VT);
633 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(
C))
634 return ARMMaterializeGV(GV, VT);
635 else if (isa<ConstantInt>(
C))
636 return ARMMaterializeInt(
C, VT);
643unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
645 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
648 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
651 FuncInfo.StaticAllocaMap.find(AI);
655 if (SI != FuncInfo.StaticAllocaMap.end()) {
656 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
658 Register ResultReg = createResultReg(RC);
661 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
662 TII.get(Opc), ResultReg)
671bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
675 if (evt == MVT::Other || !evt.
isSimple())
return false;
683bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
684 if (isTypeLegal(Ty, VT))
return true;
688 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
695bool ARMFastISel::ARMComputeAddress(
const Value *Obj, Address &
Addr) {
697 const User *
U =
nullptr;
698 unsigned Opcode = Instruction::UserOp1;
699 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
702 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
703 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
704 Opcode =
I->getOpcode();
707 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(Obj)) {
708 Opcode =
C->getOpcode();
713 if (Ty->getAddressSpace() > 255)
721 case Instruction::BitCast:
723 return ARMComputeAddress(
U->getOperand(0),
Addr);
724 case Instruction::IntToPtr:
728 return ARMComputeAddress(
U->getOperand(0),
Addr);
730 case Instruction::PtrToInt:
733 return ARMComputeAddress(
U->getOperand(0),
Addr);
735 case Instruction::GetElementPtr: {
737 int TmpOffset =
Addr.Offset;
743 i != e; ++i, ++GTI) {
747 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
757 if (canFoldAddIntoGEP(U,
Op)) {
760 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
763 Op = cast<AddOperator>(
Op)->getOperand(0);
767 goto unsupported_gep;
773 Addr.Offset = TmpOffset;
774 if (ARMComputeAddress(
U->getOperand(0),
Addr))
return true;
782 case Instruction::Alloca: {
785 FuncInfo.StaticAllocaMap.find(AI);
786 if (SI != FuncInfo.StaticAllocaMap.end()) {
787 Addr.BaseType = Address::FrameIndexBase;
788 Addr.Base.FI =
SI->second;
796 if (
Addr.Base.Reg == 0)
Addr.Base.Reg = getRegForValue(Obj);
797 return Addr.Base.Reg != 0;
800void ARMFastISel::ARMSimplifyAddress(Address &
Addr,
MVT VT,
bool useAM3) {
801 bool needsLowering =
false;
810 needsLowering = ((
Addr.Offset & 0xfff) !=
Addr.Offset);
812 if (needsLowering && isThumb2)
813 needsLowering = !(Subtarget->hasV6T2Ops() &&
Addr.Offset < 0 &&
817 needsLowering = (
Addr.Offset > 255 ||
Addr.Offset < -255);
823 needsLowering = ((
Addr.Offset & 0xff) !=
Addr.Offset);
830 if (needsLowering &&
Addr.BaseType == Address::FrameIndexBase) {
833 Register ResultReg = createResultReg(RC);
834 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
835 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
836 TII.get(Opc), ResultReg)
839 Addr.Base.Reg = ResultReg;
840 Addr.BaseType = Address::RegBase;
847 Addr.Offset, MVT::i32);
852void ARMFastISel::AddLoadStoreOperands(
MVT VT, Address &
Addr,
862 if (
Addr.BaseType == Address::FrameIndexBase) {
863 int FI =
Addr.Base.FI;
867 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
895 AddOptionalDefs(MIB);
898bool ARMFastISel::ARMEmitLoad(
MVT VT,
Register &ResultReg, Address &
Addr,
903 bool needVMOV =
false;
907 default:
return false;
911 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
912 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
914 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
923 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
926 if (Alignment && *Alignment <
Align(2) &&
927 !Subtarget->allowsUnalignedMem())
931 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
932 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
934 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
936 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
939 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
942 if (Alignment && *Alignment <
Align(4) &&
943 !Subtarget->allowsUnalignedMem())
947 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
954 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
957 if (!Subtarget->hasVFP2Base())
return false;
959 if (Alignment && *Alignment <
Align(4)) {
962 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
963 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
971 if (!Subtarget->hasVFP2Base())
return false;
974 if (Alignment && *Alignment <
Align(4))
982 ARMSimplifyAddress(
Addr, VT, useAM3);
986 ResultReg = createResultReg(RC);
987 assert(ResultReg > 255 &&
"Expected an allocated virtual register.");
989 TII.get(Opc), ResultReg);
996 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
997 TII.get(ARM::VMOVSR), MoveReg)
1006 if (cast<LoadInst>(
I)->isAtomic())
1009 const Value *SV =
I->getOperand(0);
1013 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
1014 if (Arg->hasSwiftErrorAttr())
1018 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1019 if (Alloca->isSwiftError())
1026 if (!isLoadTypeLegal(
I->getType(), VT))
1031 if (!ARMComputeAddress(
I->getOperand(0),
Addr))
return false;
1034 if (!ARMEmitLoad(VT, ResultReg,
Addr, cast<LoadInst>(
I)->
getAlign()))
1036 updateValueMap(
I, ResultReg);
1040bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg, Address &
Addr,
1043 bool useAM3 =
false;
1046 default:
return false;
1048 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1049 : &ARM::GPRRegClass);
1050 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1052 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1060 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1061 StrOpc = ARM::t2STRBi8;
1063 StrOpc = ARM::t2STRBi12;
1065 StrOpc = ARM::STRBi12;
1069 if (Alignment && *Alignment <
Align(2) &&
1070 !Subtarget->allowsUnalignedMem())
1074 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1075 StrOpc = ARM::t2STRHi8;
1077 StrOpc = ARM::t2STRHi12;
1084 if (Alignment && *Alignment <
Align(4) &&
1085 !Subtarget->allowsUnalignedMem())
1089 if (
Addr.Offset < 0 &&
Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1090 StrOpc = ARM::t2STRi8;
1092 StrOpc = ARM::t2STRi12;
1094 StrOpc = ARM::STRi12;
1098 if (!Subtarget->hasVFP2Base())
return false;
1100 if (Alignment && *Alignment <
Align(4)) {
1102 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1103 TII.get(ARM::VMOVRS), MoveReg)
1107 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1109 StrOpc = ARM::VSTRS;
1114 if (!Subtarget->hasVFP2Base())
return false;
1117 if (Alignment && *Alignment <
Align(4))
1120 StrOpc = ARM::VSTRD;
1124 ARMSimplifyAddress(
Addr, VT, useAM3);
1136 Value *Op0 =
I->getOperand(0);
1137 unsigned SrcReg = 0;
1140 if (cast<StoreInst>(
I)->isAtomic())
1143 const Value *PtrV =
I->getOperand(1);
1147 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1148 if (Arg->hasSwiftErrorAttr())
1152 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1153 if (Alloca->isSwiftError())
1160 if (!isLoadTypeLegal(
I->getOperand(0)->getType(), VT))
1164 SrcReg = getRegForValue(Op0);
1165 if (SrcReg == 0)
return false;
1169 if (!ARMComputeAddress(
I->getOperand(1),
Addr))
1172 if (!ARMEmitStore(VT, SrcReg,
Addr, cast<StoreInst>(
I)->
getAlign()))
1234 if (CI->
hasOneUse() && (CI->getParent() ==
I->getParent())) {
1238 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1252 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1253 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1260 if (TI->hasOneUse() && TI->getParent() ==
I->getParent() &&
1261 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1262 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1263 Register OpReg = getRegForValue(TI->getOperand(0));
1265 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1270 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1275 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1286 fastEmitBranch(
Target, MIMD.getDL());
1291 if (CmpReg == 0)
return false;
1300 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1303 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1308 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
1313 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(BrOpc))
1320bool ARMFastISel::SelectIndirectBr(
const Instruction *
I) {
1321 Register AddrReg = getRegForValue(
I->getOperand(0));
1322 if (AddrReg == 0)
return false;
1324 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1325 assert(isThumb2 || Subtarget->hasV4TOps());
1327 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1332 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));
1337bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1341 if (!SrcEVT.
isSimple())
return false;
1344 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1347 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1353 bool UseImm =
false;
1354 bool isNegativeImm =
false;
1357 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1358 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
1360 const APInt &CIVal = ConstInt->getValue();
1365 if (Imm < 0 && Imm != (
int)0x80000000) {
1366 isNegativeImm =
true;
1372 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1373 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
1374 if (ConstFP->isZero() && !ConstFP->isNegative())
1380 bool needsExt =
false;
1382 default:
return false;
1386 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
1390 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
1400 CmpOpc = ARM::t2CMPrr;
1402 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1405 CmpOpc = ARM::CMPrr;
1407 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1412 Register SrcReg1 = getRegForValue(Src1Value);
1413 if (SrcReg1 == 0)
return false;
1415 unsigned SrcReg2 = 0;
1417 SrcReg2 = getRegForValue(Src2Value);
1418 if (SrcReg2 == 0)
return false;
1423 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
1424 if (SrcReg1 == 0)
return false;
1426 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
1427 if (SrcReg2 == 0)
return false;
1435 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1439 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
1445 AddOptionalDefs(MIB);
1451 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1452 TII.get(ARM::FMSTAT)));
1457 const CmpInst *CI = cast<CmpInst>(
I);
1471 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1473 : &ARM::GPRRegClass;
1474 Register DestReg = createResultReg(RC);
1476 unsigned ZeroReg = fastMaterializeConstant(Zero);
1478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc), DestReg)
1482 updateValueMap(
I, DestReg);
1488 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1491 if (!
I->getType()->isDoubleTy() ||
1492 !
V->getType()->isFloatTy())
return false;
1495 if (
Op == 0)
return false;
1498 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1499 TII.get(ARM::VCVTDS), Result)
1501 updateValueMap(
I, Result);
1507 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())
return false;
1510 if (!(
I->getType()->isFloatTy() &&
1511 V->getType()->isDoubleTy()))
return false;
1514 if (
Op == 0)
return false;
1517 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1518 TII.get(ARM::VCVTSD), Result)
1520 updateValueMap(
I, Result);
1526 if (!Subtarget->hasVFP2Base())
return false;
1529 Type *Ty =
I->getType();
1530 if (!isTypeLegal(Ty, DstVT))
1533 Value *Src =
I->getOperand(0);
1538 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1541 Register SrcReg = getRegForValue(Src);
1542 if (SrcReg == 0)
return false;
1545 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
1546 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
1548 if (SrcReg == 0)
return false;
1553 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
1554 if (
FP == 0)
return false;
1558 else if (Ty->
isDoubleTy() && Subtarget->hasFP64())
1559 Opc =
isSigned ? ARM::VSITOD : ARM::VUITOD;
1563 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1565 updateValueMap(
I, ResultReg);
1571 if (!Subtarget->hasVFP2Base())
return false;
1575 if (!isTypeLegal(
RetTy, DstVT))
1579 if (
Op == 0)
return false;
1582 Type *OpTy =
I->getOperand(0)->getType();
1584 else if (OpTy->
isDoubleTy() && Subtarget->hasFP64())
1585 Opc =
isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1590 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1595 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1596 if (IntReg == 0)
return false;
1598 updateValueMap(
I, IntReg);
1604 if (!isTypeLegal(
I->getType(), VT))
1608 if (VT != MVT::i32)
return false;
1610 Register CondReg = getRegForValue(
I->getOperand(0));
1611 if (CondReg == 0)
return false;
1612 Register Op1Reg = getRegForValue(
I->getOperand(1));
1613 if (Op1Reg == 0)
return false;
1617 bool UseImm =
false;
1618 bool isNegativeImm =
false;
1619 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(
I->getOperand(2))) {
1620 assert(VT == MVT::i32 &&
"Expecting an i32.");
1621 Imm = (int)ConstInt->getValue().getZExtValue();
1623 isNegativeImm =
true;
1630 unsigned Op2Reg = 0;
1632 Op2Reg = getRegForValue(
I->getOperand(2));
1633 if (Op2Reg == 0)
return false;
1636 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1639 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TstOpc))
1646 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1647 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1649 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1651 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1653 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1655 Register ResultReg = createResultReg(RC);
1659 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1667 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(MovCCOpc),
1674 updateValueMap(
I, ResultReg);
1680 Type *Ty =
I->getType();
1681 if (!isTypeLegal(Ty, VT))
1687 if (Subtarget->hasDivideInThumbMode())
1693 LC =
isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
1694 else if (VT == MVT::i16)
1695 LC =
isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
1696 else if (VT == MVT::i32)
1697 LC =
isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
1698 else if (VT == MVT::i64)
1699 LC =
isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
1700 else if (VT == MVT::i128)
1701 LC =
isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
1702 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SDIV!");
1704 return ARMEmitLibcall(
I, LC);
1709 Type *Ty =
I->getType();
1710 if (!isTypeLegal(Ty, VT))
1722 LC =
isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
1723 else if (VT == MVT::i16)
1724 LC =
isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
1725 else if (VT == MVT::i32)
1726 LC =
isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
1727 else if (VT == MVT::i64)
1728 LC =
isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
1729 else if (VT == MVT::i128)
1730 LC =
isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
1731 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unsupported SREM!");
1733 return ARMEmitLibcall(
I, LC);
1736bool ARMFastISel::SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode) {
1741 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1745 switch (ISDOpcode) {
1746 default:
return false;
1748 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1751 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1754 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1758 Register SrcReg1 = getRegForValue(
I->getOperand(0));
1759 if (SrcReg1 == 0)
return false;
1763 Register SrcReg2 = getRegForValue(
I->getOperand(1));
1764 if (SrcReg2 == 0)
return false;
1766 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1769 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1770 TII.get(Opc), ResultReg)
1772 updateValueMap(
I, ResultReg);
1776bool ARMFastISel::SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode) {
1778 if (!FPVT.
isSimple())
return false;
1789 Type *Ty =
I->getType();
1790 if (Ty->
isFloatTy() && !Subtarget->hasVFP2Base())
1792 if (Ty->
isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
1796 bool is64bit = VT == MVT::f64 || VT == MVT::i64;
1797 switch (ISDOpcode) {
1798 default:
return false;
1800 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1803 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1806 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1809 Register Op1 = getRegForValue(
I->getOperand(0));
1810 if (Op1 == 0)
return false;
1812 Register Op2 = getRegForValue(
I->getOperand(1));
1813 if (Op2 == 0)
return false;
1816 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1817 TII.get(Opc), ResultReg)
1819 updateValueMap(
I, ResultReg);
1834 if (Subtarget->hasVFP2Base() && !isVarArg) {
1835 if (!Subtarget->isAAPCS_ABI())
1844 if (Subtarget->isAAPCS_ABI()) {
1845 if (Subtarget->hasFPRegs() &&
1884 CCState CCInfo(
CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1885 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1886 CCAssignFnForCall(
CC,
false, isVarArg));
1890 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1905 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1917 if (!Subtarget->hasVFP2Base())
1921 if (!Subtarget->hasVFP2Base())
1931 NumBytes = CCInfo.getStackSize();
1934 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
1935 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1936 TII.get(AdjStackDown))
1940 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1947 "We don't handle NEON/vector parameters yet.");
1954 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
1955 assert(Arg != 0 &&
"Failed to emit a sext");
1963 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
1964 assert(Arg != 0 &&
"Failed to emit a zext");
1970 assert(BC != 0 &&
"Failed to emit a bitcast!");
1980 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1986 "Custom lowering for v2f64 args not available");
1992 "We only handle register args!");
1994 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2005 if (isa<UndefValue>(ArgVal))
2009 Addr.BaseType = Address::RegBase;
2010 Addr.Base.Reg = ARM::SP;
2013 bool EmitRet = ARMEmitStore(ArgVT, Arg,
Addr); (void)EmitRet;
2014 assert(EmitRet &&
"Could not emit a store for argument!");
2023 unsigned &NumBytes,
bool isVarArg) {
2025 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2026 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2027 TII.get(AdjStackUp))
2031 if (RetVT != MVT::isVoid) {
2033 CCState CCInfo(
CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2034 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true, isVarArg));
2037 if (RVLocs.
size() == 2 && RetVT == MVT::f64) {
2040 MVT DestVT = RVLocs[0].getValVT();
2042 Register ResultReg = createResultReg(DstRC);
2043 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2044 TII.get(ARM::VMOVDRR), ResultReg)
2045 .
addReg(RVLocs[0].getLocReg())
2046 .
addReg(RVLocs[1].getLocReg()));
2048 UsedRegs.
push_back(RVLocs[0].getLocReg());
2049 UsedRegs.
push_back(RVLocs[1].getLocReg());
2052 updateValueMap(
I, ResultReg);
2054 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2055 MVT CopyVT = RVLocs[0].getValVT();
2058 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
2063 Register ResultReg = createResultReg(DstRC);
2064 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2065 TII.get(TargetOpcode::COPY),
2066 ResultReg).
addReg(RVLocs[0].getLocReg());
2067 UsedRegs.
push_back(RVLocs[0].getLocReg());
2070 updateValueMap(
I, ResultReg);
2079 const Function &
F = *
I->getParent()->getParent();
2080 const bool IsCmseNSEntry =
F.hasFnAttribute(
"cmse_nonsecure_entry");
2082 if (!FuncInfo.CanLowerReturn)
2086 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2096 if (
Ret->getNumOperands() > 0) {
2102 CCState CCInfo(
CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
2103 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(
CC,
true ,
2106 const Value *RV =
Ret->getOperand(0);
2112 if (ValLocs.
size() != 1)
2126 if (!RVEVT.
isSimple())
return false;
2130 if (RVVT != DestVT) {
2131 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
2134 assert(DestVT == MVT::i32 &&
"ARM should always ext to i32");
2138 if (Outs[0].
Flags.isZExt() || Outs[0].Flags.isSExt()) {
2139 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].
Flags.isZExt());
2140 if (SrcReg == 0)
return false;
2150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2151 TII.get(TargetOpcode::COPY), DstReg).
addReg(SrcReg);
2160 RetOpc = ARM::tBXNS_RET;
2164 RetOpc = Subtarget->getReturnOpcode();
2168 AddOptionalDefs(MIB);
2169 for (
unsigned R : RetRegs)
2174unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2178 return isThumb2 ? ARM::tBL : ARM::BL;
2181unsigned ARMFastISel::getLibcallReg(
const Twine &
Name) {
2183 Type *GVTy = PointerType::get(*Context, 0);
2192 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2208 if (
RetTy->isVoidTy())
2209 RetVT = MVT::isVoid;
2210 else if (!isTypeLegal(
RetTy, RetVT))
2214 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
2216 CCState CCInfo(
CC,
false, *FuncInfo.MF, RVLocs, *Context);
2217 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true,
false));
2218 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2227 Args.reserve(
I->getNumOperands());
2228 ArgRegs.
reserve(
I->getNumOperands());
2229 ArgVTs.
reserve(
I->getNumOperands());
2230 ArgFlags.
reserve(
I->getNumOperands());
2233 if (Arg == 0)
return false;
2235 Type *ArgTy =
Op->getType();
2237 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2240 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2251 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2252 RegArgs,
CC, NumBytes,
false))
2256 if (Subtarget->genLongCalls()) {
2258 if (CalleeReg == 0)
return false;
2262 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2264 MIMD,
TII.get(CallOpc));
2268 if (Subtarget->genLongCalls()) {
2285 if (!FinishCall(RetVT, UsedRegs,
I,
CC, NumBytes,
false))
return false;
2288 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2294 const char *IntrMemName =
nullptr) {
2299 if (isa<InlineAsm>(Callee))
return false;
2310 bool isVarArg = FTy->isVarArg();
2315 if (
RetTy->isVoidTy())
2316 RetVT = MVT::isVoid;
2317 else if (!isTypeLegal(
RetTy, RetVT) && RetVT != MVT::i16 &&
2318 RetVT != MVT::i8 && RetVT != MVT::i1)
2322 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
2323 RetVT != MVT::i16 && RetVT != MVT::i32) {
2325 CCState CCInfo(
CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2326 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(
CC,
true, isVarArg));
2327 if (RVLocs.
size() >= 2 && RetVT != MVT::f64)
2336 unsigned arg_size = CI->
arg_size();
2337 Args.reserve(arg_size);
2341 for (
auto ArgI = CI->
arg_begin(), ArgE = CI->
arg_end(); ArgI != ArgE; ++ArgI) {
2344 if (IntrMemName && ArgE - ArgI <= 1)
2348 unsigned ArgIdx = ArgI - CI->
arg_begin();
2363 Type *ArgTy = (*ArgI)->getType();
2365 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
2369 Register Arg = getRegForValue(*ArgI);
2373 Flags.setOrigAlign(
DL.getABITypeAlign(ArgTy));
2375 Args.push_back(*ArgI);
2384 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2385 RegArgs,
CC, NumBytes, isVarArg))
2389 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
2390 if (!GV || Subtarget->genLongCalls())
UseReg =
true;
2395 CalleeReg = getLibcallReg(IntrMemName);
2397 CalleeReg = getRegForValue(Callee);
2399 if (CalleeReg == 0)
return false;
2403 unsigned CallOpc = ARMSelectCallOp(
UseReg);
2405 MIMD,
TII.get(CallOpc));
2414 }
else if (!IntrMemName)
2429 if (!FinishCall(RetVT, UsedRegs,
I,
CC, NumBytes, isVarArg))
2433 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs,
TRI);
2438bool ARMFastISel::ARMIsMemCpySmall(
uint64_t Len) {
2442bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
2445 if (!ARMIsMemCpySmall(Len))
2450 if (!Alignment || *Alignment >= 4) {
2456 assert(Len == 1 &&
"Expected a length of 1!");
2460 assert(Alignment &&
"Alignment is set in this branch");
2462 if (Len >= 2 && *Alignment == 2)
2471 RV = ARMEmitLoad(VT, ResultReg, Src);
2472 assert(RV &&
"Should be able to handle this load.");
2473 RV = ARMEmitStore(VT, ResultReg, Dest);
2474 assert(RV &&
"Should be able to handle this store.");
2479 Dest.Offset +=
Size;
2488 switch (
I.getIntrinsicID()) {
2489 default:
return false;
2490 case Intrinsic::frameaddress: {
2494 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2496 : &ARM::GPRRegClass;
2509 unsigned Depth = cast<ConstantInt>(
I.getOperand(0))->getZExtValue();
2511 DestReg = createResultReg(RC);
2512 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2513 TII.get(LdrOpc), DestReg)
2517 updateValueMap(&
I, SrcReg);
2520 case Intrinsic::memcpy:
2521 case Intrinsic::memmove: {
2529 bool isMemCpy = (
I.getIntrinsicID() == Intrinsic::memcpy);
2530 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2534 if (ARMIsMemCpySmall(Len)) {
2536 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2543 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2554 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2555 return SelectCall(&
I, IntrMemName);
2557 case Intrinsic::memset: {
2569 return SelectCall(&
I,
"memset");
2571 case Intrinsic::trap: {
2573 if (Subtarget->isThumb())
2574 Opcode = ARM::tTRAP;
2576 Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
2577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode));
2592 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
2594 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
2598 if (!SrcReg)
return false;
2602 updateValueMap(
I, SrcReg);
2606unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2608 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
2610 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
2615 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2619 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2620 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2621 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2630 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2631 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2635 static const struct InstructionTable {
2640 }
IT[2][2][3][2] = {
2682 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2683 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2684 "other sizes unimplemented");
2685 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2686 "other sizes unimplemented");
2688 bool hasV6Ops = Subtarget->hasV6Ops();
2689 unsigned Bitness = SrcBits / 8;
2690 assert((Bitness < 3) &&
"sanity-check table bounds");
2692 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2694 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2695 unsigned Opc = ITP->Opc;
2696 assert(ARM::KILL != Opc &&
"Invalid table entry");
2697 unsigned hasS = ITP->hasS;
2700 "only MOVsi has shift operand addressing mode");
2701 unsigned Imm = ITP->Imm;
2704 bool setsCPSR = &ARM::tGPRRegClass == RC;
2705 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2720 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2721 for (
unsigned Instr = 0;
Instr != NumInstrsEmitted; ++
Instr) {
2722 ResultReg = createResultReg(RC);
2723 bool isLsl = (0 ==
Instr) && !isSingleInstr;
2724 unsigned Opcode = isLsl ? LSLOpc : Opc;
2727 bool isKill = 1 ==
Instr;
2729 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opcode), ResultReg);
2748 Type *DestTy =
I->getType();
2749 Value *Src =
I->getOperand(0);
2750 Type *SrcTy = Src->getType();
2752 bool isZExt = isa<ZExtInst>(
I);
2753 Register SrcReg = getRegForValue(Src);
2754 if (!SrcReg)
return false;
2756 EVT SrcEVT, DestEVT;
2759 if (!SrcEVT.
isSimple())
return false;
2760 if (!DestEVT.
isSimple())
return false;
2764 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2765 if (ResultReg == 0)
return false;
2766 updateValueMap(
I, ResultReg);
2779 if (DestVT != MVT::i32)
2782 unsigned Opc = ARM::MOVsr;
2784 Value *Src2Value =
I->getOperand(1);
2785 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2786 ShiftImm = CI->getZExtValue();
2790 if (ShiftImm == 0 || ShiftImm >=32)
2796 Value *Src1Value =
I->getOperand(0);
2797 Register Reg1 = getRegForValue(Src1Value);
2798 if (Reg1 == 0)
return false;
2801 if (Opc == ARM::MOVsr) {
2802 Reg2 = getRegForValue(Src2Value);
2803 if (Reg2 == 0)
return false;
2806 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2807 if(ResultReg == 0)
return false;
2810 TII.get(Opc), ResultReg)
2813 if (Opc == ARM::MOVsi)
2815 else if (Opc == ARM::MOVsr) {
2820 AddOptionalDefs(MIB);
2821 updateValueMap(
I, ResultReg);
2826bool ARMFastISel::fastSelectInstruction(
const Instruction *
I) {
2827 switch (
I->getOpcode()) {
2828 case Instruction::Load:
2829 return SelectLoad(
I);
2830 case Instruction::Store:
2831 return SelectStore(
I);
2832 case Instruction::Br:
2833 return SelectBranch(
I);
2834 case Instruction::IndirectBr:
2835 return SelectIndirectBr(
I);
2836 case Instruction::ICmp:
2837 case Instruction::FCmp:
2838 return SelectCmp(
I);
2839 case Instruction::FPExt:
2840 return SelectFPExt(
I);
2841 case Instruction::FPTrunc:
2842 return SelectFPTrunc(
I);
2843 case Instruction::SIToFP:
2844 return SelectIToFP(
I,
true);
2845 case Instruction::UIToFP:
2846 return SelectIToFP(
I,
false);
2847 case Instruction::FPToSI:
2848 return SelectFPToI(
I,
true);
2849 case Instruction::FPToUI:
2850 return SelectFPToI(
I,
false);
2851 case Instruction::Add:
2853 case Instruction::Or:
2854 return SelectBinaryIntOp(
I,
ISD::OR);
2855 case Instruction::Sub:
2857 case Instruction::FAdd:
2859 case Instruction::FSub:
2861 case Instruction::FMul:
2863 case Instruction::SDiv:
2864 return SelectDiv(
I,
true);
2865 case Instruction::UDiv:
2866 return SelectDiv(
I,
false);
2867 case Instruction::SRem:
2868 return SelectRem(
I,
true);
2869 case Instruction::URem:
2870 return SelectRem(
I,
false);
2871 case Instruction::Call:
2873 return SelectIntrinsicCall(*
II);
2874 return SelectCall(
I);
2875 case Instruction::Select:
2876 return SelectSelect(
I);
2877 case Instruction::Ret:
2878 return SelectRet(
I);
2879 case Instruction::Trunc:
2880 return SelectTrunc(
I);
2881 case Instruction::ZExt:
2882 case Instruction::SExt:
2883 return SelectIntExt(
I);
2884 case Instruction::Shl:
2886 case Instruction::LShr:
2888 case Instruction::AShr:
2905 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },
2906 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },
2907 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },
2908 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
2909 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
2916bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *
MI,
unsigned OpNo,
2920 if (!isLoadTypeLegal(LI->
getType(), VT))
2927 if (
MI->getNumOperands() < 3 || !
MI->getOperand(2).isImm())
2934 if (FLE.Opc[isThumb2] ==
MI->getOpcode() &&
2938 isZExt = FLE.isZExt;
2941 if (!Found)
return false;
2947 Register ResultReg =
MI->getOperand(0).getReg();
2948 if (!ARMEmitLoad(VT, ResultReg,
Addr, LI->
getAlign(), isZExt,
false))
2951 removeDeadCode(
I, std::next(
I));
2955unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
MVT VT) {
2957 LLVMContext *Context = &MF->getFunction().getContext();
2959 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2966 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*Context, 0));
2967 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2972 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2973 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), TempReg)
2978 if (Opc == ARM::LDRcp)
2984 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2987 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), DestReg)
2989 .
addImm(ARMPCLabelIndex);
2991 if (!Subtarget->isThumb())
2994 if (UseGOT_PREL && Subtarget->isThumb()) {
2996 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2997 TII.get(ARM::t2LDRi12), NewDestReg)
3000 DestReg = NewDestReg;
3001 AddOptionalDefs(MIB);
3006bool ARMFastISel::fastLowerArguments() {
3007 if (!FuncInfo.CanLowerReturn)
3031 if (Arg.getArgNo() >= 4)
3034 if (Arg.hasAttribute(Attribute::InReg) ||
3035 Arg.hasAttribute(Attribute::StructRet) ||
3036 Arg.hasAttribute(Attribute::SwiftSelf) ||
3037 Arg.hasAttribute(Attribute::SwiftError) ||
3038 Arg.hasAttribute(Attribute::ByVal))
3041 Type *ArgTy = Arg.getType();
3046 if (!ArgVT.
isSimple())
return false;
3058 ARM::R0, ARM::R1, ARM::R2, ARM::R3
3063 unsigned ArgNo = Arg.getArgNo();
3065 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3069 Register ResultReg = createResultReg(RC);
3070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3071 TII.get(TargetOpcode::COPY),
3073 updateValueMap(&Arg, ResultReg);
3084 return new ARMFastISel(funcInfo, libInfo);
unsigned const MachineRegisterInfo * MRI
static const MCPhysReg GPRArgRegs[]
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This file defines the FastISel class.
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
static const unsigned FramePtr
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolValue - ARM specific constantpool value.
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
unsigned createPICLabelUId()
bool isThumbFunction() const
bool useFastISel() const
True if fast-isel is used.
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
virtual unsigned fastMaterializeConstant(const Constant *C)
Emit a constant in a register using target-specific logic, such as constant pool loads.
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
const TargetInstrInfo & TII
virtual bool fastSelectInstruction(const Instruction *I)=0
This method is called by target-independent code when the normal FastISel process fails to select an ...
const TargetLowering & TLI
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
virtual unsigned fastMaterializeAlloca(const AllocaInst *C)
Emit an alloca address in a register using target-specific logic.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
PointerType * getType() const
Global values are always pointers.
@ ExternalLinkage
Externally visible function.
Indirect Branch Instruction.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Align getAlign() const
Return the alignment of the access that is being performed.
Describe properties that are true of each instruction in the target description file.
ArrayRef< MCOperandInfo > operands() const
This holds information about one operand of a machine instruction, indicating the register class for ...
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
unsigned getDestAddressSpace() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getSourceAddressSpace() const
MaybeAlign getSourceAlign() const
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
Return a value (possibly void), from a function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
bool isPositionIndependent() const
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Primary interface to the complete machine description for the target machine.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
Target - Wrapper for Target specific information.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isStructTy() const
True if this is an instance of StructType.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StructType * getStructTypeOrNull() const
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GOT_PREL
Thread Local Storage (General Dynamic Mode)
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Swift
Calling convention for Swift.
@ ARM_APCS
ARM Procedure Calling Standard (obsolete, but still used on some targets).
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ ARM_AAPCS
ARM Architecture Procedure Calling Standard calling convention (aka EABI).
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ ARM_AAPCS_VFP
Same as ARM_AAPCS, but uses hard floating point ABI.
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ FADD
Simple binary floating point operators.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
unsigned getKillRegState(bool B)
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
gep_type_iterator gep_type_begin(const User *GEP)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
unsigned getBLXOpcode(const MachineFunction &MF)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.