70 : BaseType(RegBase), Offset(0) {
75 class ARMFastISel final :
public FastISel {
97 TM(funcInfo.MF->getTarget()),
TII(*Subtarget->getInstrInfo()),
98 TLI(*Subtarget->getTargetLowering()) {
106 unsigned fastEmitInst_r(
unsigned MachineInstOpcode,
108 unsigned Op0,
bool Op0IsKill);
109 unsigned fastEmitInst_rr(
unsigned MachineInstOpcode,
111 unsigned Op0,
bool Op0IsKill,
112 unsigned Op1,
bool Op1IsKill);
113 unsigned fastEmitInst_rrr(
unsigned MachineInstOpcode,
115 unsigned Op0,
bool Op0IsKill,
116 unsigned Op1,
bool Op1IsKill,
117 unsigned Op2,
bool Op2IsKill);
118 unsigned fastEmitInst_ri(
unsigned MachineInstOpcode,
120 unsigned Op0,
bool Op0IsKill,
122 unsigned fastEmitInst_rri(
unsigned MachineInstOpcode,
124 unsigned Op0,
bool Op0IsKill,
125 unsigned Op1,
bool Op1IsKill,
127 unsigned fastEmitInst_i(
unsigned MachineInstOpcode,
133 bool fastSelectInstruction(
const Instruction *
I)
override;
134 unsigned fastMaterializeConstant(
const Constant *
C)
override;
135 unsigned fastMaterializeAlloca(
const AllocaInst *AI)
override;
138 bool fastLowerArguments()
override;
140 #include "ARMGenFastISel.inc"
151 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
152 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
157 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
167 bool isTypeLegal(
Type *Ty,
MVT &VT);
168 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
169 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
171 bool ARMEmitLoad(
MVT VT,
unsigned &ResultReg,
Address &Addr,
172 unsigned Alignment = 0,
bool isZExt =
true,
173 bool allocReg =
true);
174 bool ARMEmitStore(
MVT VT,
unsigned SrcReg,
Address &Addr,
175 unsigned Alignment = 0);
176 bool ARMComputeAddress(
const Value *Obj,
Address &Addr);
177 void ARMSimplifyAddress(
Address &Addr,
MVT VT,
bool useAM3);
178 bool ARMIsMemCpySmall(uint64_t Len);
179 bool ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
181 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
185 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
186 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
187 unsigned ARMSelectCallOp(
bool UseReg);
205 unsigned getLibcallReg(
const Twine &
Name);
208 unsigned &NumBytes,
bool isVarArg);
216 void AddLoadStoreOperands(
MVT VT,
Address &Addr,
218 unsigned Flags,
bool useAM3);
223 #include "ARMGenCallingConv.inc"
228 bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
236 if (MO.
getReg() == ARM::CPSR)
242 bool ARMFastISel::isARMNEONPred(
const MachineInstr *MI) {
247 AFI->isThumb2Function())
269 if (isARMNEONPred(MI))
275 if (DefinesOptionalPredicate(MI, &CPSR)) {
284 unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
286 unsigned Op0,
bool Op0IsKill) {
287 unsigned ResultReg = createResultReg(RC);
292 Op0 = constrainOperandRegClass(II, Op0, 1);
294 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
297 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
299 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
306 unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
308 unsigned Op0,
bool Op0IsKill,
309 unsigned Op1,
bool Op1IsKill) {
310 unsigned ResultReg = createResultReg(RC);
315 Op0 = constrainOperandRegClass(II, Op0, 1);
316 Op1 = constrainOperandRegClass(II, Op1, 2);
320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
321 .
addReg(Op0, Op0IsKill * RegState::Kill)
322 .
addReg(Op1, Op1IsKill * RegState::Kill));
324 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
325 .
addReg(Op0, Op0IsKill * RegState::Kill)
326 .
addReg(Op1, Op1IsKill * RegState::Kill));
327 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
334 unsigned ARMFastISel::fastEmitInst_rrr(
unsigned MachineInstOpcode,
336 unsigned Op0,
bool Op0IsKill,
337 unsigned Op1,
bool Op1IsKill,
338 unsigned Op2,
bool Op2IsKill) {
339 unsigned ResultReg = createResultReg(RC);
344 Op0 = constrainOperandRegClass(II, Op0, 1);
345 Op1 = constrainOperandRegClass(II, Op1, 2);
346 Op2 = constrainOperandRegClass(II, Op1, 3);
350 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
351 .
addReg(Op0, Op0IsKill * RegState::Kill)
352 .
addReg(Op1, Op1IsKill * RegState::Kill)
353 .
addReg(Op2, Op2IsKill * RegState::Kill));
355 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
356 .
addReg(Op0, Op0IsKill * RegState::Kill)
357 .
addReg(Op1, Op1IsKill * RegState::Kill)
358 .
addReg(Op2, Op2IsKill * RegState::Kill));
359 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
366 unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
368 unsigned Op0,
bool Op0IsKill,
370 unsigned ResultReg = createResultReg(RC);
375 Op0 = constrainOperandRegClass(II, Op0, 1);
378 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
379 .
addReg(Op0, Op0IsKill * RegState::Kill)
382 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
383 .
addReg(Op0, Op0IsKill * RegState::Kill)
385 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
392 unsigned ARMFastISel::fastEmitInst_rri(
unsigned MachineInstOpcode,
394 unsigned Op0,
bool Op0IsKill,
395 unsigned Op1,
bool Op1IsKill,
397 unsigned ResultReg = createResultReg(RC);
402 Op0 = constrainOperandRegClass(II, Op0, 1);
403 Op1 = constrainOperandRegClass(II, Op1, 2);
406 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
407 .
addReg(Op0, Op0IsKill * RegState::Kill)
408 .
addReg(Op1, Op1IsKill * RegState::Kill)
411 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
412 .
addReg(Op0, Op0IsKill * RegState::Kill)
413 .
addReg(Op1, Op1IsKill * RegState::Kill)
415 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
422 unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
425 unsigned ResultReg = createResultReg(RC);
429 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
432 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
434 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
443 unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
446 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
447 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
448 TII.get(ARM::VMOVSR), MoveReg)
453 unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
456 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
457 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
458 TII.get(ARM::VMOVRS), MoveReg)
466 unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
472 if (TLI.isFPImmLegal(Val, VT)) {
482 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
483 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
484 TII.get(Opc), DestReg).addImm(Imm));
489 if (!Subtarget->hasVFP2())
return false;
495 Align =
DL.getTypeAllocSize(CFP->
getType());
497 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
498 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
499 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg)
504 .addConstantPoolIndex(Idx)
509 unsigned ARMFastISel::ARMMaterializeInt(
const Constant *C,
MVT VT) {
518 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
521 unsigned ImmReg = createResultReg(RC);
522 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
523 TII.get(Opc), ImmReg)
534 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
537 unsigned ImmReg = createResultReg(RC);
538 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
539 TII.get(Opc), ImmReg)
545 unsigned ResultReg = 0;
546 if (Subtarget->useMovt(*FuncInfo.MF))
557 unsigned Align =
DL.getPrefTypeAlignment(C->
getType());
560 Align =
DL.getTypeAllocSize(C->
getType());
562 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
563 ResultReg = createResultReg(TLI.getRegClassFor(VT));
565 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
566 TII.get(ARM::t2LDRpci), ResultReg)
567 .addConstantPoolIndex(Idx));
570 ResultReg = constrainOperandRegClass(
TII.get(ARM::LDRcp), ResultReg, 0);
571 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
572 TII.get(ARM::LDRcp), ResultReg)
573 .addConstantPoolIndex(Idx)
579 unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
584 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM);
587 unsigned DestReg = createResultReg(RC);
592 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
596 if (Subtarget->useMovt(*FuncInfo.MF) &&
599 unsigned char TF = 0;
600 if (Subtarget->isTargetMachO())
605 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
608 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
611 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
612 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
615 unsigned Align =
DL.getPrefTypeAlignment(GV->
getType());
618 Align =
DL.getTypeAllocSize(GV->
getType());
621 if (Subtarget->isTargetELF() && RelocM ==
Reloc::PIC_)
622 return ARMLowerPICELF(GV, Align, VT);
626 (Subtarget->isThumb() ? 4 : 8);
627 unsigned Id = AFI->createPICLabelUId();
631 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
636 unsigned Opc = (RelocM!=
Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
637 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc),
638 DestReg).addConstantPoolIndex(Idx);
641 AddOptionalDefs(MIB);
644 DestReg = constrainOperandRegClass(
TII.get(ARM::LDRcp), DestReg, 0);
645 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
646 TII.get(ARM::LDRcp), DestReg)
647 .addConstantPoolIndex(Idx)
649 AddOptionalDefs(MIB);
652 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
653 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
656 DbgLoc,
TII.get(Opc), NewDestReg)
659 AddOptionalDefs(MIB);
667 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
669 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
670 TII.get(ARM::t2LDRi12), NewDestReg)
674 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
675 TII.get(ARM::LDRi12), NewDestReg)
678 DestReg = NewDestReg;
679 AddOptionalDefs(MIB);
685 unsigned ARMFastISel::fastMaterializeConstant(
const Constant *C) {
686 EVT CEVT = TLI.getValueType(DL, C->
getType(),
true);
692 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
693 return ARMMaterializeFP(CFP, VT);
694 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(C))
695 return ARMMaterializeGV(GV, VT);
696 else if (isa<ConstantInt>(C))
697 return ARMMaterializeInt(C, VT);
704 unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
706 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
709 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
712 FuncInfo.StaticAllocaMap.find(AI);
716 if (SI != FuncInfo.StaticAllocaMap.
end()) {
717 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
719 unsigned ResultReg = createResultReg(RC);
720 ResultReg = constrainOperandRegClass(
TII.get(Opc), ResultReg, 0);
722 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
723 TII.get(Opc), ResultReg)
724 .addFrameIndex(SI->second)
732 bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
733 EVT evt = TLI.getValueType(DL, Ty,
true);
741 return TLI.isTypeLegal(VT);
744 bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
745 if (isTypeLegal(Ty, VT))
return true;
756 bool ARMFastISel::ARMComputeAddress(
const Value *Obj,
Address &Addr) {
758 const User *U =
nullptr;
759 unsigned Opcode = Instruction::UserOp1;
760 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
763 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
764 FuncInfo.MBBMap[
I->getParent()] == FuncInfo.MBB) {
765 Opcode =
I->getOpcode();
768 }
else if (
const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
769 Opcode = C->getOpcode();
774 if (Ty->getAddressSpace() > 255)
782 case Instruction::BitCast:
784 return ARMComputeAddress(U->
getOperand(0), Addr);
785 case Instruction::IntToPtr:
788 TLI.getPointerTy(DL))
789 return ARMComputeAddress(U->
getOperand(0), Addr);
791 case Instruction::PtrToInt:
793 if (TLI.getValueType(DL, U->
getType()) == TLI.getPointerTy(DL))
794 return ARMComputeAddress(U->
getOperand(0), Addr);
796 case Instruction::GetElementPtr: {
798 int TmpOffset = Addr.Offset;
804 i != e; ++i, ++GTI) {
805 const Value *Op = *i;
806 if (
StructType *STy = dyn_cast<StructType>(*GTI)) {
808 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
813 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
818 if (canFoldAddIntoGEP(U, Op)) {
821 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
824 Op = cast<AddOperator>(Op)->getOperand(0);
828 goto unsupported_gep;
834 Addr.Offset = TmpOffset;
835 if (ARMComputeAddress(U->
getOperand(0), Addr))
return true;
843 case Instruction::Alloca: {
846 FuncInfo.StaticAllocaMap.
find(AI);
847 if (SI != FuncInfo.StaticAllocaMap.end()) {
848 Addr.BaseType = Address::FrameIndexBase;
849 Addr.Base.FI = SI->second;
857 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
858 return Addr.Base.Reg != 0;
861 void ARMFastISel::ARMSimplifyAddress(
Address &Addr,
MVT VT,
bool useAM3) {
862 bool needsLowering =
false;
871 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
873 if (needsLowering && isThumb2)
874 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
878 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
884 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
891 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
894 unsigned ResultReg = createResultReg(RC);
895 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
896 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
897 TII.get(Opc), ResultReg)
898 .addFrameIndex(Addr.Base.FI)
900 Addr.Base.Reg = ResultReg;
901 Addr.BaseType = Address::RegBase;
913 void ARMFastISel::AddLoadStoreOperands(
MVT VT,
Address &Addr,
915 unsigned Flags,
bool useAM3) {
922 if (Addr.BaseType == Address::FrameIndexBase) {
923 int FI = Addr.Base.FI;
924 int Offset = Addr.Offset;
926 FuncInfo.MF->getMachineMemOperand(
929 MFI.getObjectSize(FI),
930 MFI.getObjectAlignment(FI));
937 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
946 MIB.
addReg(Addr.Base.Reg);
951 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
958 AddOptionalDefs(MIB);
961 bool ARMFastISel::ARMEmitLoad(
MVT VT,
unsigned &ResultReg,
Address &Addr,
962 unsigned Alignment,
bool isZExt,
bool allocReg) {
965 bool needVMOV =
false;
969 default:
return false;
973 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
974 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
976 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
985 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
988 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
992 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
993 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
995 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
997 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
1000 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
1003 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1007 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1010 Opc = ARM::t2LDRi12;
1014 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
1017 if (!Subtarget->hasVFP2())
return false;
1019 if (Alignment && Alignment < 4) {
1022 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
1023 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
1026 RC = TLI.getRegClassFor(VT);
1030 if (!Subtarget->hasVFP2())
return false;
1033 if (Alignment && Alignment < 4)
1037 RC = TLI.getRegClassFor(VT);
1041 ARMSimplifyAddress(Addr, VT, useAM3);
1045 ResultReg = createResultReg(RC);
1046 assert (ResultReg > 255 &&
"Expected an allocated virtual register.");
1048 TII.get(Opc), ResultReg);
1054 unsigned MoveReg = createResultReg(TLI.getRegClassFor(
MVT::f32));
1055 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1056 TII.get(ARM::VMOVSR), MoveReg)
1057 .addReg(ResultReg));
1058 ResultReg = MoveReg;
1070 if (!isLoadTypeLegal(I->
getType(), VT))
1075 if (!ARMComputeAddress(I->
getOperand(0), Addr))
return false;
1078 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1080 updateValueMap(I, ResultReg);
1084 bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg,
Address &Addr,
1085 unsigned Alignment) {
1087 bool useAM3 =
false;
1090 default:
return false;
1092 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1093 : &ARM::GPRRegClass);
1094 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1095 SrcReg = constrainOperandRegClass(
TII.get(Opc), SrcReg, 1);
1096 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1098 .addReg(SrcReg).
addImm(1));
1103 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1104 StrOpc = ARM::t2STRBi8;
1106 StrOpc = ARM::t2STRBi12;
1108 StrOpc = ARM::STRBi12;
1112 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1116 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1117 StrOpc = ARM::t2STRHi8;
1119 StrOpc = ARM::t2STRHi12;
1126 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1130 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1131 StrOpc = ARM::t2STRi8;
1133 StrOpc = ARM::t2STRi12;
1135 StrOpc = ARM::STRi12;
1139 if (!Subtarget->hasVFP2())
return false;
1141 if (Alignment && Alignment < 4) {
1142 unsigned MoveReg = createResultReg(TLI.getRegClassFor(
MVT::i32));
1143 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1144 TII.get(ARM::VMOVRS), MoveReg)
1148 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1150 StrOpc = ARM::VSTRS;
1154 if (!Subtarget->hasVFP2())
return false;
1157 if (Alignment && Alignment < 4)
1160 StrOpc = ARM::VSTRD;
1164 ARMSimplifyAddress(Addr, VT, useAM3);
1167 SrcReg = constrainOperandRegClass(
TII.get(StrOpc), SrcReg, 0);
1175 bool ARMFastISel::SelectStore(
const Instruction *I) {
1177 unsigned SrcReg = 0;
1180 if (cast<StoreInst>(I)->
isAtomic())
1189 SrcReg = getRegForValue(Op0);
1190 if (SrcReg == 0)
return false;
1194 if (!ARMComputeAddress(I->
getOperand(1), Addr))
1197 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1249 bool ARMFastISel::SelectBranch(
const Instruction *I) {
1264 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1278 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1279 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1281 fastEmitBranch(FBB, DbgLoc);
1282 FuncInfo.MBB->addSuccessor(TBB);
1287 if (TI->hasOneUse() && TI->getParent() == I->
getParent() &&
1288 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1289 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1290 unsigned OpReg = getRegForValue(TI->getOperand(0));
1291 OpReg = constrainOperandRegClass(
TII.get(TstOpc), OpReg, 0);
1292 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1294 .addReg(OpReg).
addImm(1));
1297 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1302 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1303 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1306 fastEmitBranch(FBB, DbgLoc);
1307 FuncInfo.MBB->addSuccessor(TBB);
1314 fastEmitBranch(Target, DbgLoc);
1319 if (CmpReg == 0)
return false;
1328 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1329 CmpReg = constrainOperandRegClass(
TII.get(TstOpc), CmpReg, 0);
1331 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
1336 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1341 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1342 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1344 fastEmitBranch(FBB, DbgLoc);
1345 FuncInfo.MBB->addSuccessor(TBB);
1349 bool ARMFastISel::SelectIndirectBr(
const Instruction *I) {
1350 unsigned AddrReg = getRegForValue(I->
getOperand(0));
1351 if (AddrReg == 0)
return false;
1353 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1354 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1355 TII.get(Opc)).addReg(AddrReg));
1358 for (
unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
1359 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
1364 bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1367 EVT SrcEVT = TLI.getValueType(DL, Ty,
true);
1368 if (!SrcEVT.
isSimple())
return false;
1372 if (isFloat && !Subtarget->hasVFP2())
1378 bool UseImm =
false;
1379 bool isNegativeImm =
false;
1382 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1385 const APInt &CIVal = ConstInt->getValue();
1390 if (Imm < 0 && Imm != (
int)0x80000000) {
1391 isNegativeImm =
true;
1397 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1399 if (ConstFP->isZero() && !ConstFP->isNegative())
1405 bool needsExt =
false;
1407 default:
return false;
1411 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1415 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1425 CmpOpc = ARM::t2CMPrr;
1427 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1430 CmpOpc = ARM::CMPrr;
1432 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1437 unsigned SrcReg1 = getRegForValue(Src1Value);
1438 if (SrcReg1 == 0)
return false;
1440 unsigned SrcReg2 = 0;
1442 SrcReg2 = getRegForValue(Src2Value);
1443 if (SrcReg2 == 0)
return false;
1448 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1,
MVT::i32, isZExt);
1449 if (SrcReg1 == 0)
return false;
1451 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2,
MVT::i32, isZExt);
1452 if (SrcReg2 == 0)
return false;
1457 SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
1459 SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
1460 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1464 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1470 AddOptionalDefs(MIB);
1476 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1481 bool ARMFastISel::SelectCmp(
const Instruction *I) {
1482 const CmpInst *CI = cast<CmpInst>(
I);
1496 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1498 : &ARM::GPRRegClass;
1499 unsigned DestReg = createResultReg(RC);
1501 unsigned ZeroReg = fastMaterializeConstant(Zero);
1503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc), DestReg)
1504 .addReg(ZeroReg).
addImm(1)
1507 updateValueMap(I, DestReg);
1511 bool ARMFastISel::SelectFPExt(
const Instruction *I) {
1513 if (!Subtarget->hasVFP2())
return false;
1519 unsigned Op = getRegForValue(V);
1520 if (Op == 0)
return false;
1522 unsigned Result = createResultReg(&ARM::DPRRegClass);
1523 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1524 TII.get(ARM::VCVTDS), Result)
1526 updateValueMap(I, Result);
1530 bool ARMFastISel::SelectFPTrunc(
const Instruction *I) {
1532 if (!Subtarget->hasVFP2())
return false;
1538 unsigned Op = getRegForValue(V);
1539 if (Op == 0)
return false;
1541 unsigned Result = createResultReg(&ARM::SPRRegClass);
1542 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1543 TII.get(ARM::VCVTSD), Result)
1545 updateValueMap(I, Result);
1549 bool ARMFastISel::SelectIToFP(
const Instruction *I,
bool isSigned) {
1551 if (!Subtarget->hasVFP2())
return false;
1555 if (!isTypeLegal(Ty, DstVT))
1559 EVT SrcEVT = TLI.getValueType(DL, Src->
getType(),
true);
1566 unsigned SrcReg = getRegForValue(Src);
1567 if (SrcReg == 0)
return false;
1571 SrcReg = ARMEmitIntExt(SrcVT, SrcReg,
MVT::i32,
1573 if (SrcReg == 0)
return false;
1578 unsigned FP = ARMMoveToFPReg(
MVT::f32, SrcReg);
1579 if (FP == 0)
return false;
1582 if (Ty->
isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1583 else if (Ty->
isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1586 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1587 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1588 TII.get(Opc), ResultReg).addReg(FP));
1589 updateValueMap(I, ResultReg);
1593 bool ARMFastISel::SelectFPToI(
const Instruction *I,
bool isSigned) {
1595 if (!Subtarget->hasVFP2())
return false;
1599 if (!isTypeLegal(RetTy, DstVT))
1602 unsigned Op = getRegForValue(I->
getOperand(0));
1603 if (Op == 0)
return false;
1607 if (OpTy->
isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1608 else if (OpTy->
isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1612 unsigned ResultReg = createResultReg(TLI.getRegClassFor(
MVT::f32));
1613 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1614 TII.get(Opc), ResultReg).addReg(Op));
1618 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1619 if (IntReg == 0)
return false;
1621 updateValueMap(I, IntReg);
1625 bool ARMFastISel::SelectSelect(
const Instruction *I) {
1627 if (!isTypeLegal(I->
getType(), VT))
1633 unsigned CondReg = getRegForValue(I->
getOperand(0));
1634 if (CondReg == 0)
return false;
1635 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1636 if (Op1Reg == 0)
return false;
1640 bool UseImm =
false;
1641 bool isNegativeImm =
false;
1643 assert (VT ==
MVT::i32 &&
"Expecting an i32.");
1644 Imm = (
int)ConstInt->getValue().getZExtValue();
1646 isNegativeImm =
true;
1653 unsigned Op2Reg = 0;
1656 if (Op2Reg == 0)
return false;
1659 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1660 CondReg = constrainOperandRegClass(
TII.get(TstOpc), CondReg, 0);
1662 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
1669 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1670 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1672 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1674 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1676 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1678 unsigned ResultReg = createResultReg(RC);
1680 Op2Reg = constrainOperandRegClass(
TII.get(MovCCOpc), Op2Reg, 1);
1681 Op1Reg = constrainOperandRegClass(
TII.get(MovCCOpc), Op1Reg, 2);
1682 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc),
1689 Op1Reg = constrainOperandRegClass(
TII.get(MovCCOpc), Op1Reg, 1);
1690 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc),
1697 updateValueMap(I, ResultReg);
1701 bool ARMFastISel::SelectDiv(
const Instruction *I,
bool isSigned) {
1704 if (!isTypeLegal(Ty, VT))
1710 if (Subtarget->hasDivide())
return false;
1726 return ARMEmitLibcall(I, LC);
1729 bool ARMFastISel::SelectRem(
const Instruction *I,
bool isSigned) {
1732 if (!isTypeLegal(Ty, VT))
1748 return ARMEmitLibcall(I, LC);
1751 bool ARMFastISel::SelectBinaryIntOp(
const Instruction *I,
unsigned ISDOpcode) {
1752 EVT DestVT = TLI.getValueType(DL, I->
getType(),
true);
1760 switch (ISDOpcode) {
1761 default:
return false;
1763 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1766 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1769 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1773 unsigned SrcReg1 = getRegForValue(I->
getOperand(0));
1774 if (SrcReg1 == 0)
return false;
1778 unsigned SrcReg2 = getRegForValue(I->
getOperand(1));
1779 if (SrcReg2 == 0)
return false;
1781 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1782 SrcReg1 = constrainOperandRegClass(
TII.get(Opc), SrcReg1, 1);
1783 SrcReg2 = constrainOperandRegClass(
TII.get(Opc), SrcReg2, 2);
1784 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1785 TII.get(Opc), ResultReg)
1786 .addReg(SrcReg1).
addReg(SrcReg2));
1787 updateValueMap(I, ResultReg);
1791 bool ARMFastISel::SelectBinaryFPOp(
const Instruction *I,
unsigned ISDOpcode) {
1792 EVT FPVT = TLI.getValueType(DL, I->
getType(),
true);
1793 if (!FPVT.
isSimple())
return false;
1806 if (isFloat && !Subtarget->hasVFP2())
1811 switch (ISDOpcode) {
1812 default:
return false;
1814 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1817 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1820 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1823 unsigned Op1 = getRegForValue(I->
getOperand(0));
1824 if (Op1 == 0)
return false;
1826 unsigned Op2 = getRegForValue(I->
getOperand(1));
1827 if (Op2 == 0)
return false;
1829 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.
SimpleTy));
1830 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1831 TII.get(Opc), ResultReg)
1832 .addReg(Op1).
addReg(Op2));
1833 updateValueMap(I, ResultReg);
1848 if (Subtarget->hasVFP2() && !isVarArg) {
1849 if (!Subtarget->isAAPCS_ABI())
1850 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1852 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1857 if (Subtarget->isAAPCS_ABI()) {
1858 if (Subtarget->hasVFP2() &&
1860 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1862 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1864 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1867 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1871 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1873 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1878 return CC_ARM_APCS_GHC;
1891 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
1892 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1893 CCAssignFnForCall(CC,
false, isVarArg));
1897 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1912 !VA.
isRegLoc() || !ArgLocs[++i].isRegLoc())
1924 if (!Subtarget->hasVFP2())
1928 if (!Subtarget->hasVFP2())
1938 NumBytes = CCInfo.getNextStackOffset();
1941 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
1942 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1943 TII.get(AdjStackDown))
1947 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1950 unsigned Arg = ArgRegs[VA.
getValNo()];
1954 "We don't handle NEON/vector parameters yet.");
1961 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
1962 assert (Arg != 0 &&
"Failed to emit a sext");
1970 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
1971 assert (Arg != 0 &&
"Failed to emit a zext");
1978 assert(BC != 0 &&
"Failed to emit a bitcast!");
1988 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1994 "Custom lowering for v2f64 args not available");
1998 assert(VA.
isRegLoc() && NextVA.isRegLoc() &&
1999 "We only handle register args!");
2001 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2012 if (isa<UndefValue>(ArgVal))
2016 Addr.BaseType = Address::RegBase;
2017 Addr.Base.Reg = ARM::SP;
2020 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
2021 assert(EmitRet &&
"Could not emit a store for argument!");
2030 unsigned &NumBytes,
bool isVarArg) {
2032 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2033 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2034 TII.get(AdjStackUp))
2035 .addImm(NumBytes).
addImm(0));
2040 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2041 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true, isVarArg));
2047 MVT DestVT = RVLocs[0].getValVT();
2049 unsigned ResultReg = createResultReg(DstRC);
2050 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2052 .addReg(RVLocs[0].getLocReg())
2053 .addReg(RVLocs[1].getLocReg()));
2055 UsedRegs.
push_back(RVLocs[0].getLocReg());
2056 UsedRegs.
push_back(RVLocs[1].getLocReg());
2059 updateValueMap(I, ResultReg);
2061 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2062 MVT CopyVT = RVLocs[0].getValVT();
2070 unsigned ResultReg = createResultReg(DstRC);
2071 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2073 ResultReg).addReg(RVLocs[0].getLocReg());
2074 UsedRegs.
push_back(RVLocs[0].getLocReg());
2077 updateValueMap(I, ResultReg);
2084 bool ARMFastISel::SelectRet(
const Instruction *I) {
2088 if (!FuncInfo.CanLowerReturn)
2106 unsigned Reg = getRegForValue(RV);
2111 if (ValLocs.size() != 1)
2123 unsigned SrcReg = Reg + VA.
getValNo();
2124 EVT RVEVT = TLI.getValueType(DL, RV->
getType());
2125 if (!RVEVT.
isSimple())
return false;
2129 if (RVVT != DestVT) {
2133 assert(DestVT ==
MVT::i32 &&
"ARM should always ext to i32");
2137 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2138 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2139 if (SrcReg == 0)
return false;
2149 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2156 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
2159 AddOptionalDefs(MIB);
2160 for (
unsigned i = 0, e = RetRegs.
size(); i != e; ++i)
2165 unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2167 return isThumb2 ? ARM::tBLXr : ARM::BLX;
2169 return isThumb2 ? ARM::tBL : ARM::BL;
2172 unsigned ARMFastISel::getLibcallReg(
const Twine &
Name) {
2175 EVT LCREVT = TLI.getValueType(DL, GVTy);
2181 assert(GV->
getType() == GVTy &&
"We miscomputed the type for the global!");
2182 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2200 else if (!isTypeLegal(RetTy, RetVT))
2206 CCState CCInfo(CC,
false, *FuncInfo.MF, RVLocs, *Context);
2207 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true,
false));
2223 unsigned Arg = getRegForValue(Op);
2224 if (Arg == 0)
return false;
2228 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2231 unsigned OriginalAlignment =
DL.getABITypeAlignment(ArgTy);
2243 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2244 RegArgs, CC, NumBytes,
false))
2247 unsigned CalleeReg = 0;
2248 if (Subtarget->genLongCalls()) {
2249 CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2250 if (CalleeReg == 0)
return false;
2254 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2256 DbgLoc,
TII.get(CallOpc));
2260 if (Subtarget->genLongCalls())
2261 MIB.addReg(CalleeReg);
2263 MIB.addExternalSymbol(TLI.getLibcallName(Call));
2266 for (
unsigned i = 0, e = RegArgs.
size(); i != e; ++i)
2271 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2275 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes,
false))
return false;
2278 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2283 bool ARMFastISel::SelectCall(
const Instruction *I,
2284 const char *IntrMemName =
nullptr) {
2289 if (isa<InlineAsm>(Callee))
return false;
2300 PointerType *PT = cast<PointerType>(
CS.getCalledValue()->getType());
2302 bool isVarArg = FTy->isVarArg();
2309 else if (!isTypeLegal(RetTy, RetVT) && RetVT !=
MVT::i16 &&
2317 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
2318 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true, isVarArg));
2328 unsigned arg_size =
CS.arg_size();
2337 if (IntrMemName && e-i <= 2)
2341 unsigned AttrInd = i -
CS.arg_begin() + 1;
2354 Type *ArgTy = (*i)->getType();
2356 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT !=
MVT::i16 && ArgVT !=
MVT::i8 &&
2360 unsigned Arg = getRegForValue(*i);
2364 unsigned OriginalAlignment =
DL.getABITypeAlignment(ArgTy);
2376 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2377 RegArgs, CC, NumBytes, isVarArg))
2380 bool UseReg =
false;
2382 if (!GV || Subtarget->genLongCalls()) UseReg =
true;
2384 unsigned CalleeReg = 0;
2387 CalleeReg = getLibcallReg(IntrMemName);
2389 CalleeReg = getRegForValue(Callee);
2391 if (CalleeReg == 0)
return false;
2395 unsigned CallOpc = ARMSelectCallOp(UseReg);
2397 DbgLoc,
TII.get(CallOpc));
2399 unsigned char OpFlags = 0;
2403 if (Subtarget->isTargetELF() &&
TM.getRelocationModel() ==
Reloc::PIC_)
2411 else if (!IntrMemName)
2417 for (
unsigned i = 0, e = RegArgs.
size(); i != e; ++i)
2422 MIB.
addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2426 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2430 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2435 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2439 bool ARMFastISel::ARMTryEmitSmallMemCpy(
Address Dest,
Address Src,
2440 uint64_t Len,
unsigned Alignment) {
2442 if (!ARMIsMemCpySmall(Len))
2447 if (!Alignment || Alignment >= 4) {
2453 assert (Len == 1 &&
"Expected a length of 1!");
2458 if (Len >= 2 && Alignment == 2)
2467 RV = ARMEmitLoad(VT, ResultReg, Src);
2468 assert (RV ==
true &&
"Should be able to handle this load.");
2469 RV = ARMEmitStore(VT, ResultReg, Dest);
2470 assert (RV ==
true &&
"Should be able to handle this store.");
2475 Dest.Offset += Size;
2482 bool ARMFastISel::SelectIntrinsicCall(
const IntrinsicInst &I) {
2485 default:
return false;
2486 case Intrinsic::frameaddress: {
2490 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2492 : &ARM::GPRRegClass;
2505 unsigned Depth = cast<ConstantInt>(I.
getOperand(0))->getZExtValue();
2507 DestReg = createResultReg(RC);
2508 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2509 TII.get(LdrOpc), DestReg)
2510 .addReg(SrcReg).
addImm(0));
2513 updateValueMap(&I, SrcReg);
2516 case Intrinsic::memcpy:
2517 case Intrinsic::memmove: {
2526 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2529 uint64_t Len = cast<ConstantInt>(MTI.
getLength())->getZExtValue();
2530 if (ARMIsMemCpySmall(Len)) {
2532 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2536 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2547 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2548 return SelectCall(&I, IntrMemName);
2550 case Intrinsic::memset: {
2562 return SelectCall(&I,
"memset");
2564 case Intrinsic::trap: {
2565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(
2566 Subtarget->useNaClTrap() ? ARM::TRAPNaCl :
ARM::TRAP));
2572 bool ARMFastISel::SelectTrunc(
const Instruction *I) {
2578 SrcVT = TLI.getValueType(DL, Op->
getType(),
true);
2579 DestVT = TLI.getValueType(DL, I->
getType(),
true);
2586 unsigned SrcReg = getRegForValue(Op);
2587 if (!SrcReg)
return false;
2591 updateValueMap(I, SrcReg);
2595 unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2604 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2608 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2609 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2610 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2619 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2620 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2624 static const struct InstructionTable {
2629 }
IT[2][2][3][2] = {
2671 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2672 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2673 "other sizes unimplemented");
2674 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2675 "other sizes unimplemented");
2677 bool hasV6Ops = Subtarget->hasV6Ops();
2678 unsigned Bitness = SrcBits / 8;
2679 assert((Bitness < 3) &&
"sanity-check table bounds");
2681 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2683 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2684 unsigned Opc = ITP->Opc;
2685 assert(
ARM::KILL != Opc &&
"Invalid table entry");
2686 unsigned hasS = ITP->hasS;
2689 "only MOVsi has shift operand addressing mode");
2690 unsigned Imm = ITP->Imm;
2693 bool setsCPSR = &ARM::tGPRRegClass == RC;
2694 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2709 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2710 for (
unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2711 ResultReg = createResultReg(RC);
2712 bool isLsl = (0 == Instr) && !isSingleInstr;
2713 unsigned Opcode = isLsl ? LSLOpc : Opc;
2716 bool isKill = 1 == Instr;
2718 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opcode), ResultReg);
2721 SrcReg = constrainOperandRegClass(
TII.get(Opcode), SrcReg, 1 + setsCPSR);
2732 bool ARMFastISel::SelectIntExt(
const Instruction *I) {
2739 bool isZExt = isa<ZExtInst>(
I);
2740 unsigned SrcReg = getRegForValue(Src);
2741 if (!SrcReg)
return false;
2743 EVT SrcEVT, DestEVT;
2744 SrcEVT = TLI.getValueType(DL, SrcTy,
true);
2745 DestEVT = TLI.getValueType(DL, DestTy,
true);
2746 if (!SrcEVT.
isSimple())
return false;
2747 if (!DestEVT.
isSimple())
return false;
2751 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2752 if (ResultReg == 0)
return false;
2753 updateValueMap(I, ResultReg);
2757 bool ARMFastISel::SelectShift(
const Instruction *I,
2765 EVT DestVT = TLI.getValueType(DL, I->
getType(),
true);
2769 unsigned Opc = ARM::MOVsr;
2772 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2773 ShiftImm = CI->getZExtValue();
2777 if (ShiftImm == 0 || ShiftImm >=32)
2784 unsigned Reg1 = getRegForValue(Src1Value);
2785 if (Reg1 == 0)
return false;
2788 if (Opc == ARM::MOVsr) {
2789 Reg2 = getRegForValue(Src2Value);
2790 if (Reg2 == 0)
return false;
2793 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2794 if(ResultReg == 0)
return false;
2797 TII.get(Opc), ResultReg)
2800 if (Opc == ARM::MOVsi)
2801 MIB.
addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2802 else if (Opc == ARM::MOVsr) {
2804 MIB.
addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2807 AddOptionalDefs(MIB);
2808 updateValueMap(I, ResultReg);
2813 bool ARMFastISel::fastSelectInstruction(
const Instruction *I) {
2817 return SelectLoad(I);
2819 return SelectStore(I);
2820 case Instruction::Br:
2821 return SelectBranch(I);
2822 case Instruction::IndirectBr:
2823 return SelectIndirectBr(I);
2824 case Instruction::ICmp:
2825 case Instruction::FCmp:
2826 return SelectCmp(I);
2827 case Instruction::FPExt:
2828 return SelectFPExt(I);
2829 case Instruction::FPTrunc:
2830 return SelectFPTrunc(I);
2831 case Instruction::SIToFP:
2832 return SelectIToFP(I,
true);
2833 case Instruction::UIToFP:
2834 return SelectIToFP(I,
false);
2835 case Instruction::FPToSI:
2836 return SelectFPToI(I,
true);
2837 case Instruction::FPToUI:
2838 return SelectFPToI(I,
false);
2839 case Instruction::Add:
2840 return SelectBinaryIntOp(I,
ISD::ADD);
2842 return SelectBinaryIntOp(I,
ISD::OR);
2843 case Instruction::Sub:
2844 return SelectBinaryIntOp(I,
ISD::SUB);
2845 case Instruction::FAdd:
2847 case Instruction::FSub:
2849 case Instruction::FMul:
2851 case Instruction::SDiv:
2852 return SelectDiv(I,
true);
2853 case Instruction::UDiv:
2854 return SelectDiv(I,
false);
2855 case Instruction::SRem:
2856 return SelectRem(I,
true);
2857 case Instruction::URem:
2858 return SelectRem(I,
false);
2861 return SelectIntrinsicCall(*II);
2862 return SelectCall(I);
2864 return SelectSelect(I);
2866 return SelectRet(I);
2867 case Instruction::Trunc:
2868 return SelectTrunc(I);
2869 case Instruction::ZExt:
2870 case Instruction::SExt:
2871 return SelectIntExt(I);
2872 case Instruction::Shl:
2874 case Instruction::LShr:
2876 case Instruction::AShr:
2888 const struct FoldableLoadExtendsStruct {
2890 uint8_t ExpectedImm;
2892 uint8_t ExpectedVT : 7;
2893 } FoldableLoadExtends[] = {
2896 { { ARM::ANDri, ARM::t2ANDri }, 255, 1,
MVT::i8 },
2906 bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *MI,
unsigned OpNo,
2910 if (!isLoadTypeLegal(LI->
getType(), VT))
2925 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->
getOpcode() &&
2926 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm &&
2929 isZExt = FoldableLoadExtends[i].isZExt;
2932 if (!Found)
return false;
2936 if (!ARMComputeAddress(LI->
getOperand(0), Addr))
return false;
2939 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->
getAlignment(), isZExt,
false))
2945 unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
2946 unsigned Align,
MVT VT) {
2950 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
2953 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT));
2956 DestReg1 = constrainOperandRegClass(
TII.get(ARM::t2LDRpci), DestReg1, 0);
2957 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2958 TII.get(ARM::t2LDRpci), DestReg1)
2959 .addConstantPoolIndex(Idx));
2960 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
2963 DestReg1 = constrainOperandRegClass(
TII.get(ARM::LDRcp), DestReg1, 0);
2964 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2965 DbgLoc,
TII.get(ARM::LDRcp), DestReg1)
2966 .addConstantPoolIndex(Idx).
addImm(0));
2967 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs;
2971 if (GlobalBaseReg == 0) {
2972 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT));
2973 AFI->setGlobalBaseReg(GlobalBaseReg);
2976 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT));
2977 DestReg2 = constrainOperandRegClass(
TII.get(Opc), DestReg2, 0);
2978 DestReg1 = constrainOperandRegClass(
TII.get(Opc), DestReg1, 1);
2981 DbgLoc,
TII.get(Opc), DestReg2)
2986 AddOptionalDefs(MIB);
2991 bool ARMFastISel::fastLowerArguments() {
2992 if (!FuncInfo.CanLowerReturn)
3015 I != E; ++
I, ++Idx) {
3028 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3029 if (!ArgVT.
isSimple())
return false;
3042 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
3048 I != E; ++
I, ++Idx) {
3049 unsigned SrcReg = GPRArgRegs[Idx];
3050 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3054 unsigned ResultReg = createResultReg(RC);
3055 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3058 updateValueMap(I, ResultReg);
3068 return new ARMFastISel(funcInfo, libInfo);
unsigned getAlignment() const
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
ReturnInst - Return a value (possibly void), from a function.
const Value * getCalledValue() const
getCalledValue - Get a pointer to the function that is invoked by this instruction.
void push_back(const T &Elt)
The memory access reads data.
Type * getIndexedType() const
This class is the base class for the comparison instructions.
The memory access writes data.
bool isPredicate() const
Set if this is one of the operands that made up of the predicate operand that controls an isPredicabl...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
uint64_t getZExtValue() const
Get zero extended value.
Sign extended before/after call.
LocInfo getLocInfo() const
Force argument to be passed in register.
A Module instance is used to store all the information related to an LLVM module. ...
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
Intrinsic::ID getIntrinsicID() const
getIntrinsicID - Return the intrinsic ID of this intrinsic.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
static unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned getNumOperands() const
Nested function static chain.
ARMConstantPoolValue - ARM specific constantpool value.
Describe properties that are true of each instruction in the target description file.
CallInst - This class represents a function call, abstracting a target machine's calling convention...
static PointerType * getInt32PtrTy(LLVMContext &C, unsigned AS=0)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
bool useFastISel() const
True if fast-isel is used.
0 1 0 0 True if ordered and less than
unsigned getSizeInBits() const
Externally visible function.
bool isDoubleTy() const
isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
1 1 1 0 True if unordered or not equal
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
MemSetInst - This class wraps the llvm.memset intrinsic.
const Function * getParent() const
Return the enclosing method, or null if none.
bool isThumbFunction() const
LoadInst - an instruction for reading from memory.
User::const_op_iterator arg_iterator
arg_iterator - The type of iterator to use when looping over actual arguments at this call site...
MO_PLT - On a symbol operand, this represents an ELF PLT reference on a call operand.
void reserve(size_type N)
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
unsigned getValNo() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
getFixedStack - Return a MachinePointerInfo record that refers to the the specified FrameIndex...
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
COPY - Target-independent register copy.
1 0 0 1 True if unordered or equal
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
MachineMemOperand - A description of a memory reference used in the backend.
static const MachineInstrBuilder & AddDefaultPred(const MachineInstrBuilder &MIB)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT,"arm-default-it","Generate IT block based on arch"), clEnumValN(RestrictedIT,"arm-restrict-it","Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT,"arm-no-restrict-it","Allow IT blocks based on ARMv7"), clEnumValEnd))
StructType - Class to represent struct types.
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
A Use represents the edge between a Value definition and its users.
unsigned getFrameRegister(const MachineFunction &MF) const override
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Hidden pointer to structure to return.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionType - Class to represent function types.
ConstantExpr - a constant value that is initialized with an expression using other constant values...
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
Simple integer binary arithmetic operators.
static int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
BasicBlock * getSuccessor(unsigned i) const
bool isArrayTy() const
isArrayTy - True if this is an instance of ArrayType.
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static const MCPhysReg GPRArgRegs[]
void setOrigAlign(unsigned A)
Type * getElementType() const
This class represents a truncation of integer types.
PointerType - Class to represent pointers.
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
TargetInstrInfo - Interface to description of machine instruction set.
uint64_t getElementOffset(unsigned Idx) const
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
Simple binary floating point operators.
BranchInst - Conditional or Unconditional Branch instruction.
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
This is an important base class in LLVM.
PointerType * getType() const
getType - Overload to return most specific pointer type
bool isVector() const
isVector - Return true if this is a vector value type.
bool hasHiddenVisibility() const
int64_t getSExtValue() const
Get sign extended value.
const MachineOperand & getOperand(unsigned i) const
IndirectBrInst - Indirect Branch Instruction.
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
ConstantFP - Floating Point Values [float, double].
Value * getRawDest() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Value * getOperand(unsigned i) const
Zero extended before/after call.
0 1 1 1 True if ordered (no nans)
Predicate getPredicate() const
Return the predicate for this instruction.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
EVT - Extended Value Type.
LLVMContext & getContext() const
All values hold a context through their type.
static bool isAtomic(Instruction *I)
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
1 1 0 1 True if unordered, less than, or equal
0 0 1 0 True if ordered and greater than
ARMConstantPoolConstant - ARM-specific constant pool values for Constants, Functions, and BlockAddresses.
static const MachineInstrBuilder & AddDefaultCC(const MachineInstrBuilder &MIB)
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
CCState - This class holds information needed while lowering arguments and return values...
This is the shared class of boolean and integer constants.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
1 1 0 0 True if unordered or less than
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
Value * getLength() const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const MachineInstrBuilder & addFrameIndex(int Idx) const
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
This file defines the FastISel class.
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
KILL - This instruction is a noop that is used only to adjust the liveness of registers.
static int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
bool isStructTy() const
isStructTy - True if this is an instance of StructType.
Representation of each machine instruction.
PointerType * getType() const
Global values are always pointers.
MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
Value * getCondition() const
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned greater or equal
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
static unsigned UseReg(const MachineOperand &MO)
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
ImmutableCallSite - establish a view to a call site for examination.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
bool hasOneUse() const
Return true if there is exactly one user of this value.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
0 1 1 0 True if ordered and operands are unequal
iterator find(const KeyT &Val)
1 0 1 0 True if unordered or greater than
bool hasLocalLinkage() const
const APFloat & getValueAPF() const
Value * getRawSource() const
get* - Return the arguments to the instruction.
unsigned getReg() const
getReg - Returns the register number.
bool isUnsigned() const
Determine if this instruction is using an unsigned comparison.
const uint16_t * ImplicitDefs
bool isFloat(MCInstrInfo const &MCII, MCInst const &MCI)
Return whether it is a floating-point insn.
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
0 0 0 1 True if ordered and equal
LLVM Value Representation.
1 0 1 1 True if unordered, greater than, or equal
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getDestAddressSpace() const
const MCOperandInfo * OpInfo
static const Function * getParent(const Value *V)
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
bool isUInt< 16 >(uint64_t x)
static const unsigned FramePtr
Primary interface to the complete machine description for the target machine.
C - The default llvm calling convention, compatible with C.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
unsigned getLocMemOffset() const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
0 0 1 1 True if ordered and greater than or equal
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
unsigned getSourceAddressSpace() const
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
const BasicBlock * getParent() const
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
static const MachineInstrBuilder & AddDefaultT1CC(const MachineInstrBuilder &MIB, bool isDead=false)
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
isVoidTy - Return true if this is 'void'.
AllocaInst - an instruction to allocate memory on the stack.
gep_type_iterator gep_type_begin(const User *GEP)
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.