68 : BaseType(RegBase),
Offset(0) {
73 class ARMFastISel final :
public FastISel {
95 TM(funcInfo.MF->getTarget()),
TII(*Subtarget->getInstrInfo()),
96 TLI(*Subtarget->getTargetLowering()) {
104 unsigned fastEmitInst_r(
unsigned MachineInstOpcode,
106 unsigned Op0,
bool Op0IsKill);
107 unsigned fastEmitInst_rr(
unsigned MachineInstOpcode,
109 unsigned Op0,
bool Op0IsKill,
110 unsigned Op1,
bool Op1IsKill);
111 unsigned fastEmitInst_ri(
unsigned MachineInstOpcode,
113 unsigned Op0,
bool Op0IsKill,
115 unsigned fastEmitInst_i(
unsigned MachineInstOpcode,
121 bool fastSelectInstruction(
const Instruction *
I)
override;
122 unsigned fastMaterializeConstant(
const Constant *
C)
override;
123 unsigned fastMaterializeAlloca(
const AllocaInst *AI)
override;
126 bool fastLowerArguments()
override;
128 #include "ARMGenFastISel.inc"
139 bool SelectBinaryIntOp(
const Instruction *
I,
unsigned ISDOpcode);
140 bool SelectBinaryFPOp(
const Instruction *
I,
unsigned ISDOpcode);
145 bool SelectCall(
const Instruction *
I,
const char *IntrMemName);
155 bool isPositionIndependent()
const;
156 bool isTypeLegal(
Type *Ty,
MVT &VT);
157 bool isLoadTypeLegal(
Type *Ty,
MVT &VT);
158 bool ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
160 bool ARMEmitLoad(
MVT VT,
unsigned &ResultReg,
Address &Addr,
161 unsigned Alignment = 0,
bool isZExt =
true,
162 bool allocReg =
true);
163 bool ARMEmitStore(
MVT VT,
unsigned SrcReg,
Address &Addr,
164 unsigned Alignment = 0);
165 bool ARMComputeAddress(
const Value *Obj,
Address &Addr);
166 void ARMSimplifyAddress(
Address &Addr,
MVT VT,
bool useAM3);
167 bool ARMIsMemCpySmall(uint64_t Len);
168 bool ARMTryEmitSmallMemCpy(
Address Dest,
Address Src, uint64_t Len,
170 unsigned ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
174 unsigned ARMMoveToFPReg(
MVT VT,
unsigned SrcReg);
175 unsigned ARMMoveToIntReg(
MVT VT,
unsigned SrcReg);
176 unsigned ARMSelectCallOp(
bool UseReg);
177 unsigned ARMLowerPICELF(
const GlobalValue *GV,
unsigned Align,
MVT VT);
194 unsigned getLibcallReg(
const Twine &
Name);
197 unsigned &NumBytes,
bool isVarArg);
205 void AddLoadStoreOperands(
MVT VT,
Address &Addr,
212 #include "ARMGenCallingConv.inc"
217 bool ARMFastISel::DefinesOptionalPredicate(
MachineInstr *
MI,
bool *CPSR) {
225 if (MO.
getReg() == ARM::CPSR)
231 bool ARMFastISel::isARMNEONPred(
const MachineInstr *MI) {
236 AFI->isThumb2Function())
258 if (isARMNEONPred(MI))
264 if (DefinesOptionalPredicate(MI, &CPSR)) {
273 unsigned ARMFastISel::fastEmitInst_r(
unsigned MachineInstOpcode,
275 unsigned Op0,
bool Op0IsKill) {
276 unsigned ResultReg = createResultReg(RC);
283 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
286 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
288 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
289 TII.get(TargetOpcode::COPY), ResultReg)
295 unsigned ARMFastISel::fastEmitInst_rr(
unsigned MachineInstOpcode,
297 unsigned Op0,
bool Op0IsKill,
298 unsigned Op1,
bool Op1IsKill) {
299 unsigned ResultReg = createResultReg(RC);
309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
310 .
addReg(Op0, Op0IsKill * RegState::Kill)
311 .
addReg(Op1, Op1IsKill * RegState::Kill));
313 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
314 .
addReg(Op0, Op0IsKill * RegState::Kill)
315 .
addReg(Op1, Op1IsKill * RegState::Kill));
316 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
317 TII.get(TargetOpcode::COPY), ResultReg)
323 unsigned ARMFastISel::fastEmitInst_ri(
unsigned MachineInstOpcode,
325 unsigned Op0,
bool Op0IsKill,
327 unsigned ResultReg = createResultReg(RC);
335 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
336 .
addReg(Op0, Op0IsKill * RegState::Kill)
339 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
340 .
addReg(Op0, Op0IsKill * RegState::Kill)
342 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
343 TII.get(TargetOpcode::COPY), ResultReg)
349 unsigned ARMFastISel::fastEmitInst_i(
unsigned MachineInstOpcode,
352 unsigned ResultReg = createResultReg(RC);
356 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
359 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
361 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
362 TII.get(TargetOpcode::COPY), ResultReg)
370 unsigned ARMFastISel::ARMMoveToFPReg(
MVT VT,
unsigned SrcReg) {
373 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
374 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
375 TII.get(ARM::VMOVSR), MoveReg)
380 unsigned ARMFastISel::ARMMoveToIntReg(
MVT VT,
unsigned SrcReg) {
383 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
384 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
385 TII.get(ARM::VMOVRS), MoveReg)
393 unsigned ARMFastISel::ARMMaterializeFP(
const ConstantFP *CFP,
MVT VT) {
399 if (TLI.isFPImmLegal(Val, VT)) {
409 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
410 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
411 TII.get(Opc), DestReg).addImm(Imm));
416 if (!Subtarget->hasVFP2())
return false;
419 unsigned Align =
DL.getPrefTypeAlignment(CFP->
getType());
422 Align =
DL.getTypeAllocSize(CFP->
getType());
424 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
425 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
426 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
430 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg)
431 .addConstantPoolIndex(Idx)
436 unsigned ARMFastISel::ARMMaterializeInt(
const Constant *
C,
MVT VT) {
445 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
448 unsigned ImmReg = createResultReg(RC);
449 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
450 TII.get(Opc), ImmReg)
461 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
464 unsigned ImmReg = createResultReg(RC);
465 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
466 TII.get(Opc), ImmReg)
472 unsigned ResultReg = 0;
473 if (Subtarget->useMovt(*FuncInfo.MF))
484 unsigned Align =
DL.getPrefTypeAlignment(C->
getType());
487 Align =
DL.getTypeAllocSize(C->
getType());
489 unsigned Idx = MCP.getConstantPoolIndex(C, Align);
490 ResultReg = createResultReg(TLI.getRegClassFor(VT));
492 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
493 TII.get(ARM::t2LDRpci), ResultReg)
494 .addConstantPoolIndex(Idx));
498 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
499 TII.get(ARM::LDRcp), ResultReg)
500 .addConstantPoolIndex(Idx)
506 bool ARMFastISel::isPositionIndependent()
const {
507 return TLI.isPositionIndependent();
510 unsigned ARMFastISel::ARMMaterializeGV(
const GlobalValue *GV,
MVT VT) {
515 if (Subtarget->isROPI() || Subtarget->isRWPI())
518 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
521 unsigned DestReg = createResultReg(RC);
526 if (!Subtarget->isTargetMachO() && IsThreadLocal)
return 0;
528 bool IsPositionIndependent = isPositionIndependent();
531 if (Subtarget->useMovt(*FuncInfo.MF) &&
532 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
534 unsigned char TF = 0;
535 if (Subtarget->isTargetMachO())
538 if (IsPositionIndependent)
539 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
541 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
542 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
543 TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
546 unsigned Align =
DL.getPrefTypeAlignment(GV->
getType());
549 Align =
DL.getTypeAllocSize(GV->
getType());
552 if (Subtarget->isTargetELF() && IsPositionIndependent)
553 return ARMLowerPICELF(GV, Align, VT);
556 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
557 unsigned Id = AFI->createPICLabelUId();
561 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
566 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
567 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc),
568 DestReg).addConstantPoolIndex(Idx);
569 if (IsPositionIndependent)
571 AddOptionalDefs(MIB);
575 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
576 TII.get(ARM::LDRcp), DestReg)
577 .addConstantPoolIndex(Idx)
579 AddOptionalDefs(MIB);
581 if (IsPositionIndependent) {
582 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
583 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
586 DbgLoc,
TII.get(Opc), NewDestReg)
589 AddOptionalDefs(MIB);
597 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
599 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
600 TII.get(ARM::t2LDRi12), NewDestReg)
604 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
605 TII.get(ARM::LDRi12), NewDestReg)
608 DestReg = NewDestReg;
609 AddOptionalDefs(MIB);
615 unsigned ARMFastISel::fastMaterializeConstant(
const Constant *C) {
616 EVT CEVT = TLI.getValueType(DL, C->
getType(),
true);
622 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
623 return ARMMaterializeFP(CFP, VT);
624 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(C))
625 return ARMMaterializeGV(GV, VT);
626 else if (isa<ConstantInt>(C))
627 return ARMMaterializeInt(C, VT);
634 unsigned ARMFastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
636 if (!FuncInfo.StaticAllocaMap.count(AI))
return 0;
639 if (!isLoadTypeLegal(AI->
getType(), VT))
return 0;
642 FuncInfo.StaticAllocaMap.find(AI);
646 if (SI != FuncInfo.StaticAllocaMap.
end()) {
647 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
649 unsigned ResultReg = createResultReg(RC);
652 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
653 TII.get(Opc), ResultReg)
654 .addFrameIndex(SI->second)
662 bool ARMFastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
663 EVT evt = TLI.getValueType(DL, Ty,
true);
671 return TLI.isTypeLegal(VT);
674 bool ARMFastISel::isLoadTypeLegal(
Type *Ty,
MVT &VT) {
675 if (isTypeLegal(Ty, VT))
return true;
686 bool ARMFastISel::ARMComputeAddress(
const Value *Obj,
Address &Addr) {
688 const User *U =
nullptr;
689 unsigned Opcode = Instruction::UserOp1;
690 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
693 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
694 FuncInfo.MBBMap[
I->getParent()] == FuncInfo.MBB) {
695 Opcode =
I->getOpcode();
698 }
else if (
const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
699 Opcode = C->getOpcode();
704 if (Ty->getAddressSpace() > 255)
712 case Instruction::BitCast:
714 return ARMComputeAddress(U->
getOperand(0), Addr);
715 case Instruction::IntToPtr:
718 TLI.getPointerTy(DL))
719 return ARMComputeAddress(U->
getOperand(0), Addr);
721 case Instruction::PtrToInt:
723 if (TLI.getValueType(DL, U->
getType()) == TLI.getPointerTy(DL))
724 return ARMComputeAddress(U->
getOperand(0), Addr);
726 case Instruction::GetElementPtr: {
728 int TmpOffset = Addr.Offset;
734 i != e; ++
i, ++GTI) {
738 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
743 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
748 if (canFoldAddIntoGEP(U, Op)) {
751 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
754 Op = cast<AddOperator>(
Op)->getOperand(0);
758 goto unsupported_gep;
764 Addr.Offset = TmpOffset;
765 if (ARMComputeAddress(U->
getOperand(0), Addr))
return true;
773 case Instruction::Alloca: {
776 FuncInfo.StaticAllocaMap.
find(AI);
777 if (SI != FuncInfo.StaticAllocaMap.end()) {
778 Addr.BaseType = Address::FrameIndexBase;
779 Addr.Base.FI = SI->second;
787 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
788 return Addr.Base.Reg != 0;
791 void ARMFastISel::ARMSimplifyAddress(
Address &Addr,
MVT VT,
bool useAM3) {
792 bool needsLowering =
false;
801 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
803 if (needsLowering && isThumb2)
804 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
808 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
814 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
821 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
824 unsigned ResultReg = createResultReg(RC);
825 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
826 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
827 TII.get(Opc), ResultReg)
828 .addFrameIndex(Addr.Base.FI)
830 Addr.Base.Reg = ResultReg;
831 Addr.BaseType = Address::RegBase;
843 void ARMFastISel::AddLoadStoreOperands(
MVT VT,
Address &Addr,
853 if (Addr.BaseType == Address::FrameIndexBase) {
854 int FI = Addr.Base.FI;
858 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
865 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
874 MIB.
addReg(Addr.Base.Reg);
879 int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
886 AddOptionalDefs(MIB);
889 bool ARMFastISel::ARMEmitLoad(
MVT VT,
unsigned &ResultReg,
Address &Addr,
890 unsigned Alignment,
bool isZExt,
bool allocReg) {
893 bool needVMOV =
false;
897 default:
return false;
901 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
902 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
904 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
913 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
916 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
920 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
921 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
923 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
925 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
928 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
931 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
935 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
942 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
945 if (!Subtarget->hasVFP2())
return false;
947 if (Alignment && Alignment < 4) {
950 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
951 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
954 RC = TLI.getRegClassFor(VT);
958 if (!Subtarget->hasVFP2())
return false;
961 if (Alignment && Alignment < 4)
965 RC = TLI.getRegClassFor(VT);
969 ARMSimplifyAddress(Addr, VT, useAM3);
973 ResultReg = createResultReg(RC);
974 assert (ResultReg > 255 &&
"Expected an allocated virtual register.");
976 TII.get(Opc), ResultReg);
982 unsigned MoveReg = createResultReg(TLI.getRegClassFor(
MVT::f32));
983 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
984 TII.get(ARM::VMOVSR), MoveReg)
997 if (TLI.supportSwiftError()) {
1000 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
1001 if (Arg->hasSwiftErrorAttr())
1005 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1006 if (Alloca->isSwiftError())
1013 if (!isLoadTypeLegal(I->
getType(), VT))
1018 if (!ARMComputeAddress(I->
getOperand(0), Addr))
return false;
1021 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
1023 updateValueMap(I, ResultReg);
1027 bool ARMFastISel::ARMEmitStore(
MVT VT,
unsigned SrcReg,
Address &Addr,
1028 unsigned Alignment) {
1030 bool useAM3 =
false;
1033 default:
return false;
1035 unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
1036 : &ARM::GPRRegClass);
1037 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
1039 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1041 .addReg(SrcReg).
addImm(1));
1047 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1048 StrOpc = ARM::t2STRBi8;
1050 StrOpc = ARM::t2STRBi12;
1052 StrOpc = ARM::STRBi12;
1056 if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
1060 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1061 StrOpc = ARM::t2STRHi8;
1063 StrOpc = ARM::t2STRHi12;
1070 if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
1074 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
1075 StrOpc = ARM::t2STRi8;
1077 StrOpc = ARM::t2STRi12;
1079 StrOpc = ARM::STRi12;
1083 if (!Subtarget->hasVFP2())
return false;
1085 if (Alignment && Alignment < 4) {
1086 unsigned MoveReg = createResultReg(TLI.getRegClassFor(
MVT::i32));
1087 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1088 TII.get(ARM::VMOVRS), MoveReg)
1092 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
1094 StrOpc = ARM::VSTRS;
1098 if (!Subtarget->hasVFP2())
return false;
1101 if (Alignment && Alignment < 4)
1104 StrOpc = ARM::VSTRD;
1108 ARMSimplifyAddress(Addr, VT, useAM3);
1119 bool ARMFastISel::SelectStore(
const Instruction *I) {
1121 unsigned SrcReg = 0;
1124 if (cast<StoreInst>(I)->
isAtomic())
1128 if (TLI.supportSwiftError()) {
1131 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1132 if (Arg->hasSwiftErrorAttr())
1136 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1137 if (Alloca->isSwiftError())
1148 SrcReg = getRegForValue(Op0);
1149 if (SrcReg == 0)
return false;
1153 if (!ARMComputeAddress(I->
getOperand(1), Addr))
1156 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
1208 bool ARMFastISel::SelectBranch(
const Instruction *I) {
1223 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1237 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1238 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1240 finishCondBranch(BI->
getParent(), TBB, FBB);
1245 if (TI->hasOneUse() && TI->getParent() == I->
getParent() &&
1246 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
1247 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1248 unsigned OpReg = getRegForValue(TI->getOperand(0));
1250 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1252 .addReg(OpReg).
addImm(1));
1255 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1260 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1261 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1264 finishCondBranch(BI->
getParent(), TBB, FBB);
1271 fastEmitBranch(Target, DbgLoc);
1276 if (CmpReg == 0)
return false;
1285 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
1293 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
1298 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1299 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(BrOpc))
1301 finishCondBranch(BI->
getParent(), TBB, FBB);
1305 bool ARMFastISel::SelectIndirectBr(
const Instruction *I) {
1306 unsigned AddrReg = getRegForValue(I->
getOperand(0));
1307 if (AddrReg == 0)
return false;
1309 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
1310 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1311 TII.get(Opc)).addReg(AddrReg));
1314 for (
const BasicBlock *SuccBB : IB->successors())
1315 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
1320 bool ARMFastISel::ARMEmitCmp(
const Value *Src1Value,
const Value *Src2Value,
1323 EVT SrcEVT = TLI.getValueType(DL, Ty,
true);
1324 if (!SrcEVT.
isSimple())
return false;
1328 if (isFloat && !Subtarget->hasVFP2())
1334 bool UseImm =
false;
1335 bool isNegativeImm =
false;
1338 if (
const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
1341 const APInt &CIVal = ConstInt->getValue();
1346 if (Imm < 0 && Imm != (
int)0x80000000) {
1347 isNegativeImm =
true;
1353 }
else if (
const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
1355 if (ConstFP->isZero() && !ConstFP->isNegative())
1361 bool needsExt =
false;
1363 default:
return false;
1367 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
1371 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
1381 CmpOpc = ARM::t2CMPrr;
1383 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
1386 CmpOpc = ARM::CMPrr;
1388 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
1393 unsigned SrcReg1 = getRegForValue(Src1Value);
1394 if (SrcReg1 == 0)
return false;
1396 unsigned SrcReg2 = 0;
1398 SrcReg2 = getRegForValue(Src2Value);
1399 if (SrcReg2 == 0)
return false;
1404 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1,
MVT::i32, isZExt);
1405 if (SrcReg1 == 0)
return false;
1407 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2,
MVT::i32, isZExt);
1408 if (SrcReg2 == 0)
return false;
1416 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1420 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1426 AddOptionalDefs(MIB);
1432 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1437 bool ARMFastISel::SelectCmp(
const Instruction *I) {
1438 const CmpInst *CI = cast<CmpInst>(
I);
1452 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1454 : &ARM::GPRRegClass;
1455 unsigned DestReg = createResultReg(RC);
1457 unsigned ZeroReg = fastMaterializeConstant(Zero);
1459 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc), DestReg)
1460 .addReg(ZeroReg).
addImm(1)
1463 updateValueMap(I, DestReg);
1467 bool ARMFastISel::SelectFPExt(
const Instruction *I) {
1469 if (!Subtarget->hasVFP2())
return false;
1475 unsigned Op = getRegForValue(V);
1476 if (Op == 0)
return false;
1478 unsigned Result = createResultReg(&ARM::DPRRegClass);
1479 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1480 TII.get(ARM::VCVTDS), Result)
1482 updateValueMap(I, Result);
1486 bool ARMFastISel::SelectFPTrunc(
const Instruction *I) {
1488 if (!Subtarget->hasVFP2())
return false;
1494 unsigned Op = getRegForValue(V);
1495 if (Op == 0)
return false;
1497 unsigned Result = createResultReg(&ARM::SPRRegClass);
1498 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1499 TII.get(ARM::VCVTSD), Result)
1501 updateValueMap(I, Result);
1505 bool ARMFastISel::SelectIToFP(
const Instruction *I,
bool isSigned) {
1507 if (!Subtarget->hasVFP2())
return false;
1511 if (!isTypeLegal(Ty, DstVT))
1515 EVT SrcEVT = TLI.getValueType(DL, Src->
getType(),
true);
1522 unsigned SrcReg = getRegForValue(Src);
1523 if (SrcReg == 0)
return false;
1527 SrcReg = ARMEmitIntExt(SrcVT, SrcReg,
MVT::i32,
1529 if (SrcReg == 0)
return false;
1534 unsigned FP = ARMMoveToFPReg(
MVT::f32, SrcReg);
1535 if (FP == 0)
return false;
1538 if (Ty->
isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
1539 else if (Ty->
isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
1542 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1543 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1544 TII.get(Opc), ResultReg).addReg(FP));
1545 updateValueMap(I, ResultReg);
1549 bool ARMFastISel::SelectFPToI(
const Instruction *I,
bool isSigned) {
1551 if (!Subtarget->hasVFP2())
return false;
1555 if (!isTypeLegal(RetTy, DstVT))
1558 unsigned Op = getRegForValue(I->
getOperand(0));
1559 if (Op == 0)
return false;
1563 if (OpTy->
isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
1564 else if (OpTy->
isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
1568 unsigned ResultReg = createResultReg(TLI.getRegClassFor(
MVT::f32));
1569 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1570 TII.get(Opc), ResultReg).addReg(Op));
1574 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
1575 if (IntReg == 0)
return false;
1577 updateValueMap(I, IntReg);
1581 bool ARMFastISel::SelectSelect(
const Instruction *I) {
1583 if (!isTypeLegal(I->
getType(), VT))
1589 unsigned CondReg = getRegForValue(I->
getOperand(0));
1590 if (CondReg == 0)
return false;
1591 unsigned Op1Reg = getRegForValue(I->
getOperand(1));
1592 if (Op1Reg == 0)
return false;
1596 bool UseImm =
false;
1597 bool isNegativeImm =
false;
1600 Imm = (int)ConstInt->getValue().getZExtValue();
1602 isNegativeImm =
true;
1609 unsigned Op2Reg = 0;
1612 if (Op2Reg == 0)
return false;
1615 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
1618 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
1625 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
1626 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
1628 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
1630 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
1632 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
1634 unsigned ResultReg = createResultReg(RC);
1638 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc),
1646 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(MovCCOpc),
1653 updateValueMap(I, ResultReg);
1657 bool ARMFastISel::SelectDiv(
const Instruction *I,
bool isSigned) {
1660 if (!isTypeLegal(Ty, VT))
1666 if (Subtarget->hasDivide())
return false;
1682 return ARMEmitLibcall(I, LC);
1685 bool ARMFastISel::SelectRem(
const Instruction *I,
bool isSigned) {
1688 if (!isTypeLegal(Ty, VT))
1694 if (!TLI.hasStandaloneRem(VT)) {
1711 return ARMEmitLibcall(I, LC);
1714 bool ARMFastISel::SelectBinaryIntOp(
const Instruction *I,
unsigned ISDOpcode) {
1715 EVT DestVT = TLI.getValueType(DL, I->
getType(),
true);
1723 switch (ISDOpcode) {
1724 default:
return false;
1726 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
1729 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
1732 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
1736 unsigned SrcReg1 = getRegForValue(I->
getOperand(0));
1737 if (SrcReg1 == 0)
return false;
1741 unsigned SrcReg2 = getRegForValue(I->
getOperand(1));
1742 if (SrcReg2 == 0)
return false;
1744 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
1747 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1748 TII.get(Opc), ResultReg)
1749 .addReg(SrcReg1).
addReg(SrcReg2));
1750 updateValueMap(I, ResultReg);
1754 bool ARMFastISel::SelectBinaryFPOp(
const Instruction *I,
unsigned ISDOpcode) {
1755 EVT FPVT = TLI.getValueType(DL, I->
getType(),
true);
1756 if (!FPVT.
isSimple())
return false;
1769 if (isFloat && !Subtarget->hasVFP2())
1774 switch (ISDOpcode) {
1775 default:
return false;
1777 Opc = is64bit ? ARM::VADDD : ARM::VADDS;
1780 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
1783 Opc = is64bit ? ARM::VMULD : ARM::VMULS;
1786 unsigned Op1 = getRegForValue(I->
getOperand(0));
1787 if (Op1 == 0)
return false;
1789 unsigned Op2 = getRegForValue(I->
getOperand(1));
1790 if (Op2 == 0)
return false;
1792 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.
SimpleTy));
1793 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1794 TII.get(Opc), ResultReg)
1795 .addReg(Op1).
addReg(Op2));
1796 updateValueMap(I, ResultReg);
1811 if (Subtarget->hasVFP2() && !isVarArg) {
1812 if (!Subtarget->isAAPCS_ABI())
1813 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1815 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1821 if (Subtarget->isAAPCS_ABI()) {
1822 if (Subtarget->hasVFP2() &&
1824 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1826 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1828 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1833 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
1838 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
1840 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
1845 return CC_ARM_APCS_GHC;
1859 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
1860 CCAssignFnForCall(CC,
false, isVarArg));
1864 for (
unsigned i = 0, e = ArgLocs.
size();
i != e; ++
i) {
1879 !VA.
isRegLoc() || !ArgLocs[++
i].isRegLoc())
1891 if (!Subtarget->hasVFP2())
1895 if (!Subtarget->hasVFP2())
1905 NumBytes = CCInfo.getNextStackOffset();
1908 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
1909 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1910 TII.get(AdjStackDown))
1914 for (
unsigned i = 0, e = ArgLocs.
size();
i != e; ++
i) {
1917 unsigned Arg = ArgRegs[VA.
getValNo()];
1921 "We don't handle NEON/vector parameters yet.");
1928 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
false);
1929 assert (Arg != 0 &&
"Failed to emit a sext");
1937 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT,
true);
1938 assert (Arg != 0 &&
"Failed to emit a zext");
1945 assert(BC != 0 &&
"Failed to emit a bitcast!");
1955 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1956 TII.get(TargetOpcode::COPY), VA.
getLocReg()).addReg(Arg);
1961 "Custom lowering for v2f64 args not available");
1966 "We only handle register args!");
1968 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1979 if (isa<UndefValue>(ArgVal))
1983 Addr.BaseType = Address::RegBase;
1984 Addr.Base.Reg = ARM::SP;
1987 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
1988 assert(EmitRet &&
"Could not emit a store for argument!");
1997 unsigned &NumBytes,
bool isVarArg) {
1999 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
2000 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2001 TII.get(AdjStackUp))
2002 .addImm(NumBytes).
addImm(0));
2008 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true, isVarArg));
2014 MVT DestVT = RVLocs[0].getValVT();
2016 unsigned ResultReg = createResultReg(DstRC);
2017 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2019 .addReg(RVLocs[0].getLocReg())
2020 .addReg(RVLocs[1].getLocReg()));
2022 UsedRegs.
push_back(RVLocs[0].getLocReg());
2023 UsedRegs.
push_back(RVLocs[1].getLocReg());
2026 updateValueMap(I, ResultReg);
2028 assert(RVLocs.
size() == 1 &&
"Can't handle non-double multi-reg retvals!");
2029 MVT CopyVT = RVLocs[0].getValVT();
2037 unsigned ResultReg = createResultReg(DstRC);
2038 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2039 TII.get(TargetOpcode::COPY),
2040 ResultReg).addReg(RVLocs[0].getLocReg());
2041 UsedRegs.
push_back(RVLocs[0].getLocReg());
2044 updateValueMap(I, ResultReg);
2051 bool ARMFastISel::SelectRet(
const Instruction *I) {
2055 if (!FuncInfo.CanLowerReturn)
2058 if (TLI.supportSwiftError() &&
2059 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
2062 if (TLI.supportSplitCSR(FuncInfo.MF))
2080 unsigned Reg = getRegForValue(RV);
2085 if (ValLocs.size() != 1)
2097 unsigned SrcReg = Reg + VA.
getValNo();
2098 EVT RVEVT = TLI.getValueType(DL, RV->
getType());
2099 if (!RVEVT.
isSimple())
return false;
2103 if (RVVT != DestVT) {
2111 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
2112 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
2113 if (SrcReg == 0)
return false;
2123 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2124 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
2130 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
2133 AddOptionalDefs(MIB);
2134 for (
unsigned i = 0, e = RetRegs.
size();
i != e; ++
i)
2139 unsigned ARMFastISel::ARMSelectCallOp(
bool UseReg) {
2141 return isThumb2 ? ARM::tBLXr : ARM::BLX;
2143 return isThumb2 ? ARM::tBL : ARM::BL;
2146 unsigned ARMFastISel::getLibcallReg(
const Twine &
Name) {
2149 EVT LCREVT = TLI.getValueType(DL, GVTy);
2155 assert(GV->
getType() == GVTy &&
"We miscomputed the type for the global!");
2156 return ARMMaterializeGV(GV, LCREVT.
getSimpleVT());
2174 else if (!isTypeLegal(RetTy, RetVT))
2181 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true,
false));
2197 unsigned Arg = getRegForValue(Op);
2198 if (Arg == 0)
return false;
2202 if (!isTypeLegal(ArgTy, ArgVT))
return false;
2205 unsigned OriginalAlignment =
DL.getABITypeAlignment(ArgTy);
2217 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2218 RegArgs, CC, NumBytes,
false))
2221 unsigned CalleeReg = 0;
2222 if (Subtarget->genLongCalls()) {
2223 CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
2224 if (CalleeReg == 0)
return false;
2228 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
2230 DbgLoc,
TII.get(CallOpc));
2234 if (Subtarget->genLongCalls())
2235 MIB.addReg(CalleeReg);
2237 MIB.addExternalSymbol(TLI.getLibcallName(Call));
2240 for (
unsigned i = 0, e = RegArgs.
size();
i != e; ++
i)
2245 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2249 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes,
false))
return false;
2252 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2257 bool ARMFastISel::SelectCall(
const Instruction *I,
2258 const char *IntrMemName =
nullptr) {
2263 if (isa<InlineAsm>(Callee))
return false;
2282 else if (!isTypeLegal(RetTy, RetVT) && RetVT !=
MVT::i16 &&
2291 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC,
true, isVarArg));
2301 unsigned arg_size =
CS.arg_size();
2310 if (IntrMemName && e-i <= 2)
2314 unsigned AttrInd = i -
CS.arg_begin() + 1;
2315 if (
CS.paramHasAttr(AttrInd, Attribute::SExt))
2317 if (
CS.paramHasAttr(AttrInd, Attribute::ZExt))
2321 if (
CS.paramHasAttr(AttrInd, Attribute::InReg) ||
2322 CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
2323 CS.paramHasAttr(AttrInd, Attribute::SwiftSelf) ||
2324 CS.paramHasAttr(AttrInd, Attribute::SwiftError) ||
2325 CS.paramHasAttr(AttrInd, Attribute::Nest) ||
2326 CS.paramHasAttr(AttrInd, Attribute::ByVal))
2329 Type *ArgTy = (*i)->getType();
2331 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT !=
MVT::i16 && ArgVT !=
MVT::i8 &&
2335 unsigned Arg = getRegForValue(*i);
2339 unsigned OriginalAlignment =
DL.getABITypeAlignment(ArgTy);
2351 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
2352 RegArgs, CC, NumBytes, isVarArg))
2355 bool UseReg =
false;
2357 if (!GV || Subtarget->genLongCalls()) UseReg =
true;
2359 unsigned CalleeReg = 0;
2362 CalleeReg = getLibcallReg(IntrMemName);
2364 CalleeReg = getRegForValue(Callee);
2366 if (CalleeReg == 0)
return false;
2370 unsigned CallOpc = ARMSelectCallOp(UseReg);
2372 DbgLoc,
TII.get(CallOpc));
2379 else if (!IntrMemName)
2385 for (
unsigned i = 0, e = RegArgs.
size(); i != e; ++
i)
2390 MIB.
addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
2394 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
2398 static_cast<MachineInstr *
>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
2403 bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
2407 bool ARMFastISel::ARMTryEmitSmallMemCpy(
Address Dest,
Address Src,
2408 uint64_t Len,
unsigned Alignment) {
2410 if (!ARMIsMemCpySmall(Len))
2415 if (!Alignment || Alignment >= 4) {
2421 assert (Len == 1 &&
"Expected a length of 1!");
2426 if (Len >= 2 && Alignment == 2)
2435 RV = ARMEmitLoad(VT, ResultReg, Src);
2436 assert (RV ==
true &&
"Should be able to handle this load.");
2437 RV = ARMEmitStore(VT, ResultReg, Dest);
2438 assert (RV ==
true &&
"Should be able to handle this store.");
2443 Dest.Offset += Size;
2450 bool ARMFastISel::SelectIntrinsicCall(
const IntrinsicInst &I) {
2453 default:
return false;
2454 case Intrinsic::frameaddress: {
2458 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
2460 : &ARM::GPRRegClass;
2475 DestReg = createResultReg(RC);
2476 AddOptionalDefs(
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2477 TII.get(LdrOpc), DestReg)
2478 .addReg(SrcReg).
addImm(0));
2481 updateValueMap(&I, SrcReg);
2484 case Intrinsic::memcpy:
2485 case Intrinsic::memmove: {
2494 if (isa<ConstantInt>(MTI.
getLength()) && isMemCpy) {
2497 uint64_t Len = cast<ConstantInt>(MTI.
getLength())->getZExtValue();
2498 if (ARMIsMemCpySmall(Len)) {
2500 if (!ARMComputeAddress(MTI.
getRawDest(), Dest) ||
2504 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
2515 const char *IntrMemName = isa<MemCpyInst>(
I) ?
"memcpy" :
"memmove";
2516 return SelectCall(&I, IntrMemName);
2518 case Intrinsic::memset: {
2530 return SelectCall(&I,
"memset");
2532 case Intrinsic::trap: {
2533 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(
2534 Subtarget->useNaClTrap() ? ARM::TRAPNaCl :
ARM::TRAP));
2540 bool ARMFastISel::SelectTrunc(
const Instruction *I) {
2546 SrcVT = TLI.getValueType(DL, Op->
getType(),
true);
2547 DestVT = TLI.getValueType(DL, I->
getType(),
true);
2554 unsigned SrcReg = getRegForValue(Op);
2555 if (!SrcReg)
return false;
2559 updateValueMap(I, SrcReg);
2563 unsigned ARMFastISel::ARMEmitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
2572 static const uint8_t isSingleInstrTbl[3][2][2][2] = {
2576 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
2577 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
2578 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
2587 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
2588 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }
2592 static const struct InstructionTable {
2597 }
IT[2][2][3][2] = {
2639 assert((SrcBits < DestBits) &&
"can only extend to larger types");
2640 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
2641 "other sizes unimplemented");
2642 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
2643 "other sizes unimplemented");
2645 bool hasV6Ops = Subtarget->hasV6Ops();
2646 unsigned Bitness = SrcBits / 8;
2647 assert((Bitness < 3) &&
"sanity-check table bounds");
2649 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
2651 const InstructionTable *ITP = &
IT[isSingleInstr][isThumb2][Bitness][isZExt];
2652 unsigned Opc = ITP->Opc;
2654 unsigned hasS = ITP->hasS;
2657 "only MOVsi has shift operand addressing mode");
2658 unsigned Imm = ITP->Imm;
2661 bool setsCPSR = &ARM::tGPRRegClass == RC;
2662 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
2677 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
2678 for (
unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
2679 ResultReg = createResultReg(RC);
2680 bool isLsl = (0 == Instr) && !isSingleInstr;
2681 unsigned Opcode = isLsl ? LSLOpc : Opc;
2684 bool isKill = 1 == Instr;
2686 *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opcode), ResultReg);
2700 bool ARMFastISel::SelectIntExt(
const Instruction *I) {
2707 bool isZExt = isa<ZExtInst>(
I);
2708 unsigned SrcReg = getRegForValue(Src);
2709 if (!SrcReg)
return false;
2711 EVT SrcEVT, DestEVT;
2712 SrcEVT = TLI.getValueType(DL, SrcTy,
true);
2713 DestEVT = TLI.getValueType(DL, DestTy,
true);
2714 if (!SrcEVT.
isSimple())
return false;
2715 if (!DestEVT.
isSimple())
return false;
2719 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
2720 if (ResultReg == 0)
return false;
2721 updateValueMap(I, ResultReg);
2725 bool ARMFastISel::SelectShift(
const Instruction *I,
2733 EVT DestVT = TLI.getValueType(DL, I->
getType(),
true);
2737 unsigned Opc = ARM::MOVsr;
2740 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
2741 ShiftImm = CI->getZExtValue();
2745 if (ShiftImm == 0 || ShiftImm >=32)
2752 unsigned Reg1 = getRegForValue(Src1Value);
2753 if (Reg1 == 0)
return false;
2756 if (Opc == ARM::MOVsr) {
2757 Reg2 = getRegForValue(Src2Value);
2758 if (Reg2 == 0)
return false;
2761 unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
2762 if(ResultReg == 0)
return false;
2765 TII.get(Opc), ResultReg)
2768 if (Opc == ARM::MOVsi)
2769 MIB.
addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
2770 else if (Opc == ARM::MOVsr) {
2772 MIB.
addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
2775 AddOptionalDefs(MIB);
2776 updateValueMap(I, ResultReg);
2781 bool ARMFastISel::fastSelectInstruction(
const Instruction *I) {
2785 return SelectLoad(I);
2787 return SelectStore(I);
2788 case Instruction::Br:
2789 return SelectBranch(I);
2790 case Instruction::IndirectBr:
2791 return SelectIndirectBr(I);
2792 case Instruction::ICmp:
2793 case Instruction::FCmp:
2794 return SelectCmp(I);
2795 case Instruction::FPExt:
2796 return SelectFPExt(I);
2797 case Instruction::FPTrunc:
2798 return SelectFPTrunc(I);
2799 case Instruction::SIToFP:
2800 return SelectIToFP(I,
true);
2801 case Instruction::UIToFP:
2802 return SelectIToFP(I,
false);
2803 case Instruction::FPToSI:
2804 return SelectFPToI(I,
true);
2805 case Instruction::FPToUI:
2806 return SelectFPToI(I,
false);
2808 return SelectBinaryIntOp(I,
ISD::ADD);
2810 return SelectBinaryIntOp(I,
ISD::OR);
2811 case Instruction::Sub:
2812 return SelectBinaryIntOp(I,
ISD::SUB);
2813 case Instruction::FAdd:
2815 case Instruction::FSub:
2817 case Instruction::FMul:
2819 case Instruction::SDiv:
2820 return SelectDiv(I,
true);
2821 case Instruction::UDiv:
2822 return SelectDiv(I,
false);
2823 case Instruction::SRem:
2824 return SelectRem(I,
true);
2825 case Instruction::URem:
2826 return SelectRem(I,
false);
2829 return SelectIntrinsicCall(*II);
2830 return SelectCall(I);
2832 return SelectSelect(I);
2834 return SelectRet(I);
2835 case Instruction::Trunc:
2836 return SelectTrunc(I);
2837 case Instruction::ZExt:
2838 case Instruction::SExt:
2839 return SelectIntExt(I);
2840 case Instruction::Shl:
2842 case Instruction::LShr:
2844 case Instruction::AShr:
2856 const struct FoldableLoadExtendsStruct {
2858 uint8_t ExpectedImm;
2860 uint8_t ExpectedVT : 7;
2861 } FoldableLoadExtends[] = {
2864 { { ARM::ANDri, ARM::t2ANDri }, 255, 1,
MVT::i8 },
2874 bool ARMFastISel::tryToFoldLoadIntoMI(
MachineInstr *MI,
unsigned OpNo,
2878 if (!isLoadTypeLegal(LI->
getType(), VT))
2893 if (FoldableLoadExtends[i].Opc[isThumb2] == MI->
getOpcode() &&
2894 (uint64_t)FoldableLoadExtends[i].ExpectedImm == Imm &&
2897 isZExt = FoldableLoadExtends[
i].isZExt;
2900 if (!Found)
return false;
2904 if (!ARMComputeAddress(LI->
getOperand(0), Addr))
return false;
2907 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->
getAlignment(), isZExt,
false))
2913 unsigned ARMFastISel::ARMLowerPICELF(
const GlobalValue *GV,
2914 unsigned Align,
MVT VT) {
2915 bool UseGOT_PREL = !
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV);
2918 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2919 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2925 unsigned ConstAlign =
2927 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
2929 unsigned TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
2930 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
2932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), TempReg)
2933 .addConstantPoolIndex(Idx);
2934 if (Opc == ARM::LDRcp)
2939 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
2940 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
2943 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg)
2945 .
addImm(ARMPCLabelIndex);
2946 if (!Subtarget->isThumb())
2949 if (UseGOT_PREL && Subtarget->isThumb()) {
2950 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
2951 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2952 TII.get(ARM::t2LDRi12), NewDestReg)
2955 DestReg = NewDestReg;
2956 AddOptionalDefs(MIB);
2961 bool ARMFastISel::fastLowerArguments() {
2962 if (!FuncInfo.CanLowerReturn)
2986 I !=
E; ++
I, ++Idx) {
3001 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3002 if (!ArgVT.
isSimple())
return false;
3015 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
3021 I !=
E; ++
I, ++Idx) {
3022 unsigned SrcReg = GPRArgRegs[Idx];
3023 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3027 unsigned ResultReg = createResultReg(RC);
3028 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3029 TII.get(TargetOpcode::COPY),
3031 updateValueMap(&*I, ResultReg);
3041 return new ARMFastISel(funcInfo, libInfo);
unsigned getAlignment() const
void setFrameAddressIsTaken(bool T)
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT,"arm-default-it","Generate IT block based on arch"), clEnumValN(RestrictedIT,"arm-restrict-it","Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT,"arm-no-restrict-it","Allow IT blocks based on ARMv7")))
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Return a value (possibly void), from a function.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
void push_back(const T &Elt)
Type * getIndexedType() const
This class is the base class for the comparison instructions.
bool isPredicate() const
Set if this is one of the operands that made up of the predicate operand that controls an isPredicabl...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
LLVM Argument representation.
uint64_t getZExtValue() const
Get zero extended value.
LocInfo getLocInfo() const
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II, unsigned Reg, unsigned OpIdx)
Try to constrain Reg so that it is usable by argument OpIdx of the provided MCInstrDesc II...
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
A Module instance is used to store all the information related to an LLVM module. ...
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
static unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned getNumOperands() const
ARMConstantPoolValue - ARM specific constantpool value.
Describe properties that are true of each instruction in the target description file.
This class represents a function call, abstracting a target machine's calling convention.
static PointerType * getInt32PtrTy(LLVMContext &C, unsigned AS=0)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool isPredicable(QueryType Type=AllInBundle) const
Return true if this instruction has a predicate operand that controls execution.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
bool useFastISel() const
True if fast-isel is used.
0 1 0 0 True if ordered and less than
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
unsigned getSizeInBits() const
Externally visible function.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
1 1 1 0 True if unordered or not equal
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
This class wraps the llvm.memset intrinsic.
const Function * getParent() const
Return the enclosing method, or null if none.
bool isThumbFunction() const
An instruction for reading from memory.
void reserve(size_type N)
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
unsigned getValNo() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
1 0 0 1 True if unordered or equal
StructType * getStructTypeOrNull() const
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
A description of a memory reference used in the backend.
static const MachineInstrBuilder & AddDefaultPred(const MachineInstrBuilder &MIB)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
struct fuzzer::@269 Flags
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
Class to represent struct types.
static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
A Use represents the edge between a Value definition and its users.
unsigned getFrameRegister(const MachineFunction &MF) const override
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
0 1 0 1 True if ordered and less than or equal
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
unsigned getNumOperands() const
Access to explicit operands of the instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Class to represent function types.
A constant value that is initialized with an expression using other constant values.
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
Simple integer binary arithmetic operators.
static int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
BasicBlock * getSuccessor(unsigned i) const
bool isArrayTy() const
True if this is an instance of ArrayType.
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static const MCPhysReg GPRArgRegs[]
void setOrigAlign(unsigned A)
This class represents a truncation of integer types.
Class to represent pointers.
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
TargetInstrInfo - Interface to description of machine instruction set.
uint64_t getElementOffset(unsigned Idx) const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned const MachineRegisterInfo * MRI
MVT - Machine Value Type.
LLVM Basic Block Representation.
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
Simple binary floating point operators.
Conditional or Unconditional Branch instruction.
C - The default llvm calling convention, compatible with C.
bool isVectorTy() const
True if this is an instance of VectorType.
This is an important base class in LLVM.
PointerType * getType() const
Overload to return most specific pointer type.
bool isVector() const
isVector - Return true if this is a vector value type.
int64_t getSExtValue() const
Get sign extended value.
const MachineOperand & getOperand(unsigned i) const
Indirect Branch Instruction.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
ConstantFP - Floating Point Values [float, double].
const MCPhysReg * ImplicitDefs
Value * getRawDest() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
TRAP - Trapping instruction.
Thread Local Storage (General Dynamic Mode)
Value * getOperand(unsigned i) const
0 1 1 1 True if ordered (no nans)
Predicate getPredicate() const
Return the predicate for this instruction.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
EVT - Extended Value Type.
LLVMContext & getContext() const
All values hold a context through their type.
static bool isAtomic(Instruction *I)
1 1 0 1 True if unordered, less than, or equal
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
User::const_op_iterator arg_iterator
arg_iterator - The type of iterator to use when looping over actual arguments at this call site...
The memory access writes data.
0 0 1 0 True if ordered and greater than
static const MachineInstrBuilder & AddDefaultCC(const MachineInstrBuilder &MIB)
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
This is the shared class of boolean and integer constants.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
1 1 0 0 True if unordered or less than
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Value * getLength() const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const MachineInstrBuilder & addFrameIndex(int Idx) const
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
bool isIntegerTy() const
True if this is an instance of IntegerType.
This file defines the FastISel class.
static int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
Flags
Flags values. These may be or'd together.
bool isStructTy() const
True if this is an instance of StructType.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
PointerType * getType() const
Global values are always pointers.
This class wraps the llvm.memcpy/memmove intrinsics.
Value * getCondition() const
static IntegerType * getInt32Ty(LLVMContext &C)
unsigned greater or equal
unsigned getAlignment() const
Return the alignment of the access that is being performed.
static unsigned UseReg(const MachineOperand &MO)
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
ImmutableCallSite - establish a view to a call site for examination.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
0 1 1 0 True if ordered and operands are unequal
iterator find(const KeyT &Val)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
1 0 1 0 True if unordered or greater than
constexpr bool isUInt< 16 >(uint64_t x)
const APFloat & getValueAPF() const
Value * getRawSource() const
Return the arguments to the instruction.
unsigned getReg() const
getReg - Returns the register number.
bool isUnsigned() const
Determine if this instruction is using an unsigned comparison.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isFloat(MCInstrInfo const &MCII, MCInst const &MCI)
Return whether it is a floating-point insn.
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
0 0 0 1 True if ordered and equal
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
1 0 1 1 True if unordered, greater than, or equal
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getDestAddressSpace() const
const MCOperandInfo * OpInfo
static const Function * getParent(const Value *V)
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
static const unsigned FramePtr
Primary interface to the complete machine description for the target machine.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
unsigned getLocMemOffset() const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
0 0 1 1 True if ordered and greater than or equal
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getSourceAddressSpace() const
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
Fast - This calling convention attempts to make calls as fast as possible (e.g.
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
const BasicBlock * getParent() const
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
static const MachineInstrBuilder & AddDefaultT1CC(const MachineInstrBuilder &MIB, bool isDead=false)
A wrapper class for inspecting calls to intrinsic functions.
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
Return true if this is 'void'.
an instruction to allocate memory on the stack
gep_type_iterator gep_type_begin(const User *GEP)
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.