57#include "llvm/IR/IntrinsicsAArch64.h"
82class AArch64FastISel final :
public FastISel {
85 using BaseKind =
enum {
91 BaseKind
Kind = RegBase;
97 unsigned OffsetReg = 0;
105 void setKind(BaseKind K) {
Kind =
K; }
106 BaseKind getKind()
const {
return Kind; }
109 bool isRegBase()
const {
return Kind == RegBase; }
110 bool isFIBase()
const {
return Kind == FrameIndexBase; }
112 void setReg(
unsigned Reg) {
113 assert(isRegBase() &&
"Invalid base register access!");
118 assert(isRegBase() &&
"Invalid base register access!");
122 void setOffsetReg(
unsigned Reg) {
126 unsigned getOffsetReg()
const {
130 void setFI(
unsigned FI) {
131 assert(isFIBase() &&
"Invalid base frame index access!");
135 unsigned getFI()
const {
136 assert(isFIBase() &&
"Invalid base frame index access!");
140 void setOffset(int64_t O) {
Offset =
O; }
142 void setShift(
unsigned S) { Shift = S; }
143 unsigned getShift() {
return Shift; }
172 bool selectRem(
const Instruction *
I,
unsigned ISDOpcode);
185 bool isTypeLegal(
Type *Ty,
MVT &VT);
186 bool isTypeSupported(
Type *Ty,
MVT &VT,
bool IsVectorAllowed =
false);
187 bool isValueAvailable(
const Value *V)
const;
188 bool computeAddress(
const Value *Obj, Address &
Addr,
Type *Ty =
nullptr);
189 bool computeCallAddress(
const Value *V, Address &
Addr);
190 bool simplifyAddress(Address &
Addr,
MVT VT);
195 bool tryEmitSmallMemCpy(Address Dest, Address Src,
uint64_t Len,
204 unsigned emitAddSub(
bool UseAdd,
MVT RetVT,
const Value *LHS,
205 const Value *RHS,
bool SetFlags =
false,
206 bool WantResult =
true,
bool IsZExt =
false);
207 unsigned emitAddSub_rr(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
208 unsigned RHSReg,
bool SetFlags =
false,
209 bool WantResult =
true);
210 unsigned emitAddSub_ri(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
211 uint64_t Imm,
bool SetFlags =
false,
212 bool WantResult =
true);
213 unsigned emitAddSub_rs(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
215 uint64_t ShiftImm,
bool SetFlags =
false,
216 bool WantResult =
true);
217 unsigned emitAddSub_rx(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
219 uint64_t ShiftImm,
bool SetFlags =
false,
220 bool WantResult =
true);
223 bool emitCompareAndBranch(
const BranchInst *BI);
225 bool emitICmp(
MVT RetVT,
const Value *LHS,
const Value *RHS,
bool IsZExt);
226 bool emitICmp_ri(
MVT RetVT,
unsigned LHSReg,
uint64_t Imm);
227 bool emitFCmp(
MVT RetVT,
const Value *LHS,
const Value *RHS);
232 bool emitStoreRelease(
MVT VT,
unsigned SrcReg,
unsigned AddrReg,
234 unsigned emitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
bool isZExt);
235 unsigned emiti1Ext(
unsigned SrcReg,
MVT DestVT,
bool isZExt);
236 unsigned emitAdd(
MVT RetVT,
const Value *LHS,
const Value *RHS,
237 bool SetFlags =
false,
bool WantResult =
true,
238 bool IsZExt =
false);
239 unsigned emitAdd_ri_(
MVT VT,
unsigned Op0, int64_t Imm);
240 unsigned emitSub(
MVT RetVT,
const Value *LHS,
const Value *RHS,
241 bool SetFlags =
false,
bool WantResult =
true,
242 bool IsZExt =
false);
243 unsigned emitSubs_rr(
MVT RetVT,
unsigned LHSReg,
unsigned RHSReg,
244 bool WantResult =
true);
245 unsigned emitSubs_rs(
MVT RetVT,
unsigned LHSReg,
unsigned RHSReg,
247 bool WantResult =
true);
248 unsigned emitLogicalOp(
unsigned ISDOpc,
MVT RetVT,
const Value *LHS,
250 unsigned emitLogicalOp_ri(
unsigned ISDOpc,
MVT RetVT,
unsigned LHSReg,
252 unsigned emitLogicalOp_rs(
unsigned ISDOpc,
MVT RetVT,
unsigned LHSReg,
253 unsigned RHSReg,
uint64_t ShiftImm);
254 unsigned emitAnd_ri(
MVT RetVT,
unsigned LHSReg,
uint64_t Imm);
255 unsigned emitMul_rr(
MVT RetVT,
unsigned Op0,
unsigned Op1);
256 unsigned emitSMULL_rr(
MVT RetVT,
unsigned Op0,
unsigned Op1);
257 unsigned emitUMULL_rr(
MVT RetVT,
unsigned Op0,
unsigned Op1);
258 unsigned emitLSL_rr(
MVT RetVT,
unsigned Op0Reg,
unsigned Op1Reg);
259 unsigned emitLSL_ri(
MVT RetVT,
MVT SrcVT,
unsigned Op0Reg,
uint64_t Imm,
261 unsigned emitLSR_rr(
MVT RetVT,
unsigned Op0Reg,
unsigned Op1Reg);
262 unsigned emitLSR_ri(
MVT RetVT,
MVT SrcVT,
unsigned Op0Reg,
uint64_t Imm,
264 unsigned emitASR_rr(
MVT RetVT,
unsigned Op0Reg,
unsigned Op1Reg);
265 unsigned emitASR_ri(
MVT RetVT,
MVT SrcVT,
unsigned Op0Reg,
uint64_t Imm,
266 bool IsZExt =
false);
277 bool finishCall(CallLoweringInfo &CLI,
unsigned NumBytes);
294#include "AArch64GenFastISel.inc"
301 assert((isa<ZExtInst>(
I) || isa<SExtInst>(
I)) &&
302 "Unexpected integer extend instruction.");
303 assert(!
I->getType()->isVectorTy() &&
I->getType()->isIntegerTy() &&
304 "Unexpected value type.");
305 bool IsZExt = isa<ZExtInst>(
I);
307 if (
const auto *LI = dyn_cast<LoadInst>(
I->getOperand(0)))
311 if (
const auto *Arg = dyn_cast<Argument>(
I->getOperand(0)))
312 if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr()))
343 if (Subtarget->isTargetDarwin())
345 if (Subtarget->isTargetWindows())
350unsigned AArch64FastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
352 "Alloca should always return a pointer.");
355 if (!FuncInfo.StaticAllocaMap.count(AI))
359 FuncInfo.StaticAllocaMap.find(AI);
361 if (SI != FuncInfo.StaticAllocaMap.end()) {
362 Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
363 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADDXri),
374unsigned AArch64FastISel::materializeInt(
const ConstantInt *CI,
MVT VT) {
383 : &AArch64::GPR32RegClass;
384 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
385 Register ResultReg = createResultReg(RC);
386 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::COPY),
391unsigned AArch64FastISel::materializeFP(
const ConstantFP *CFP,
MVT VT) {
395 return fastMaterializeFloatZero(CFP);
397 if (VT != MVT::f32 && VT != MVT::f64)
401 bool Is64Bit = (VT == MVT::f64);
407 unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
408 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
413 unsigned Opc1 = Is64Bit ? AArch64::MOVi64imm : AArch64::MOVi32imm;
415 &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
417 Register TmpReg = createResultReg(RC);
418 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc1), TmpReg)
421 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
422 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
423 TII.get(TargetOpcode::COPY), ResultReg)
433 unsigned CPI = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
434 Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
435 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
438 unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui;
439 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
440 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg)
446unsigned AArch64FastISel::materializeGV(
const GlobalValue *GV) {
453 if (!Subtarget->useSmallAddressing() && !Subtarget->isTargetMachO())
456 unsigned OpFlags = Subtarget->ClassifyGlobalReference(GV,
TM);
458 EVT DestEVT = TLI.getValueType(
DL, GV->
getType(),
true);
462 Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
467 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
472 if (Subtarget->isTargetILP32()) {
473 ResultReg = createResultReg(&AArch64::GPR32RegClass);
474 LdrOpc = AArch64::LDRWui;
476 ResultReg = createResultReg(&AArch64::GPR64RegClass);
477 LdrOpc = AArch64::LDRXui;
479 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(LdrOpc),
484 if (!Subtarget->isTargetILP32())
489 Register Result64 = createResultReg(&AArch64::GPR64RegClass);
490 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
491 TII.get(TargetOpcode::SUBREG_TO_REG))
499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
517 unsigned DstReg = createResultReg(&AArch64::GPR64commonRegClass);
518 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::MOVKXi),
527 ResultReg = createResultReg(&AArch64::GPR64spRegClass);
528 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADDXri),
538unsigned AArch64FastISel::fastMaterializeConstant(
const Constant *
C) {
539 EVT CEVT = TLI.getValueType(
DL,
C->getType(),
true);
547 if (isa<ConstantPointerNull>(
C)) {
548 assert(VT == MVT::i64 &&
"Expected 64-bit pointers");
552 if (
const auto *CI = dyn_cast<ConstantInt>(
C))
553 return materializeInt(CI, VT);
554 else if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
555 return materializeFP(CFP, VT);
556 else if (
const GlobalValue *GV = dyn_cast<GlobalValue>(
C))
557 return materializeGV(GV);
562unsigned AArch64FastISel::fastMaterializeFloatZero(
const ConstantFP* CFP) {
564 "Floating-point constant is not a positive zero.");
566 if (!isTypeLegal(CFP->
getType(), VT))
569 if (VT != MVT::f32 && VT != MVT::f64)
572 bool Is64Bit = (VT == MVT::f64);
573 unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
574 unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
575 return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg);
580 if (
const auto *
MI = dyn_cast<MulOperator>(
I)) {
581 if (
const auto *
C = dyn_cast<ConstantInt>(
MI->getOperand(0)))
582 if (
C->getValue().isPowerOf2())
584 if (
const auto *
C = dyn_cast<ConstantInt>(
MI->getOperand(1)))
585 if (
C->getValue().isPowerOf2())
592bool AArch64FastISel::computeAddress(
const Value *Obj, Address &
Addr,
Type *Ty)
594 const User *
U =
nullptr;
595 unsigned Opcode = Instruction::UserOp1;
596 if (
const Instruction *
I = dyn_cast<Instruction>(Obj)) {
599 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
600 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
601 Opcode =
I->getOpcode();
604 }
else if (
const ConstantExpr *
C = dyn_cast<ConstantExpr>(Obj)) {
605 Opcode =
C->getOpcode();
609 if (
auto *Ty = dyn_cast<PointerType>(Obj->
getType()))
610 if (Ty->getAddressSpace() > 255)
618 case Instruction::BitCast:
620 return computeAddress(
U->getOperand(0),
Addr, Ty);
622 case Instruction::IntToPtr:
624 if (TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
625 TLI.getPointerTy(
DL))
626 return computeAddress(
U->getOperand(0),
Addr, Ty);
629 case Instruction::PtrToInt:
631 if (TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
632 return computeAddress(
U->getOperand(0),
Addr, Ty);
635 case Instruction::GetElementPtr: {
643 const Value *
Op = GTI.getOperand();
644 if (
StructType *STy = GTI.getStructTypeOrNull()) {
646 unsigned Idx = cast<ConstantInt>(
Op)->getZExtValue();
649 uint64_t S = GTI.getSequentialElementStride(
DL);
656 if (canFoldAddIntoGEP(U,
Op)) {
659 cast<ConstantInt>(cast<AddOperator>(
Op)->getOperand(1));
662 Op = cast<AddOperator>(
Op)->getOperand(0);
666 goto unsupported_gep;
672 Addr.setOffset(TmpOffset);
673 if (computeAddress(
U->getOperand(0),
Addr, Ty))
682 case Instruction::Alloca: {
685 FuncInfo.StaticAllocaMap.find(AI);
686 if (SI != FuncInfo.StaticAllocaMap.end()) {
687 Addr.setKind(Address::FrameIndexBase);
693 case Instruction::Add: {
698 if (isa<ConstantInt>(LHS))
701 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
703 return computeAddress(LHS,
Addr, Ty);
707 if (computeAddress(LHS,
Addr, Ty) && computeAddress(RHS,
Addr, Ty))
713 case Instruction::Sub: {
718 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
720 return computeAddress(LHS,
Addr, Ty);
724 case Instruction::Shl: {
725 if (
Addr.getOffsetReg())
728 const auto *CI = dyn_cast<ConstantInt>(
U->getOperand(1));
733 if (Val < 1 || Val > 3)
739 NumBytes = NumBits / 8;
744 if (NumBytes != (1ULL << Val))
750 const Value *Src =
U->getOperand(0);
751 if (
const auto *
I = dyn_cast<Instruction>(Src)) {
752 if (FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
754 if (
const auto *ZE = dyn_cast<ZExtInst>(
I)) {
756 ZE->getOperand(0)->getType()->isIntegerTy(32)) {
758 Src = ZE->getOperand(0);
760 }
else if (
const auto *SE = dyn_cast<SExtInst>(
I)) {
762 SE->getOperand(0)->getType()->isIntegerTy(32)) {
764 Src = SE->getOperand(0);
770 if (
const auto *AI = dyn_cast<BinaryOperator>(Src))
771 if (AI->
getOpcode() == Instruction::And) {
775 if (
const auto *
C = dyn_cast<ConstantInt>(LHS))
776 if (
C->getValue() == 0xffffffff)
779 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
780 if (
C->getValue() == 0xffffffff) {
785 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32);
786 Addr.setOffsetReg(Reg);
794 Addr.setOffsetReg(Reg);
797 case Instruction::Mul: {
798 if (
Addr.getOffsetReg())
808 if (
const auto *
C = dyn_cast<ConstantInt>(LHS))
809 if (
C->getValue().isPowerOf2())
812 assert(isa<ConstantInt>(RHS) &&
"Expected an ConstantInt.");
813 const auto *
C = cast<ConstantInt>(RHS);
814 unsigned Val =
C->getValue().logBase2();
815 if (Val < 1 || Val > 3)
821 NumBytes = NumBits / 8;
826 if (NumBytes != (1ULL << Val))
833 if (
const auto *
I = dyn_cast<Instruction>(Src)) {
834 if (FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
836 if (
const auto *ZE = dyn_cast<ZExtInst>(
I)) {
838 ZE->getOperand(0)->getType()->isIntegerTy(32)) {
840 Src = ZE->getOperand(0);
842 }
else if (
const auto *SE = dyn_cast<SExtInst>(
I)) {
844 SE->getOperand(0)->getType()->isIntegerTy(32)) {
846 Src = SE->getOperand(0);
855 Addr.setOffsetReg(Reg);
858 case Instruction::And: {
859 if (
Addr.getOffsetReg())
862 if (!Ty ||
DL.getTypeSizeInBits(Ty) != 8)
868 if (
const auto *
C = dyn_cast<ConstantInt>(LHS))
869 if (
C->getValue() == 0xffffffff)
872 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
873 if (
C->getValue() == 0xffffffff) {
881 Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32);
882 Addr.setOffsetReg(Reg);
887 case Instruction::SExt:
888 case Instruction::ZExt: {
889 if (!
Addr.getReg() ||
Addr.getOffsetReg())
892 const Value *Src =
nullptr;
894 if (
const auto *ZE = dyn_cast<ZExtInst>(U)) {
895 if (!
isIntExtFree(ZE) && ZE->getOperand(0)->getType()->isIntegerTy(32)) {
897 Src = ZE->getOperand(0);
899 }
else if (
const auto *SE = dyn_cast<SExtInst>(U)) {
900 if (!
isIntExtFree(SE) && SE->getOperand(0)->getType()->isIntegerTy(32)) {
902 Src = SE->getOperand(0);
913 Addr.setOffsetReg(Reg);
918 if (
Addr.isRegBase() && !
Addr.getReg()) {
926 if (!
Addr.getOffsetReg()) {
930 Addr.setOffsetReg(Reg);
937bool AArch64FastISel::computeCallAddress(
const Value *V, Address &
Addr) {
938 const User *
U =
nullptr;
939 unsigned Opcode = Instruction::UserOp1;
942 if (
const auto *
I = dyn_cast<Instruction>(V)) {
943 Opcode =
I->getOpcode();
945 InMBB =
I->getParent() == FuncInfo.MBB->getBasicBlock();
946 }
else if (
const auto *
C = dyn_cast<ConstantExpr>(V)) {
947 Opcode =
C->getOpcode();
953 case Instruction::BitCast:
956 return computeCallAddress(
U->getOperand(0),
Addr);
958 case Instruction::IntToPtr:
961 TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
962 TLI.getPointerTy(
DL))
963 return computeCallAddress(
U->getOperand(0),
Addr);
965 case Instruction::PtrToInt:
967 if (InMBB && TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
968 return computeCallAddress(
U->getOperand(0),
Addr);
972 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
973 Addr.setGlobalValue(GV);
978 if (!
Addr.getGlobalValue()) {
979 Addr.setReg(getRegForValue(V));
980 return Addr.getReg() != 0;
986bool AArch64FastISel::isTypeLegal(
Type *Ty,
MVT &VT) {
987 EVT evt = TLI.getValueType(
DL, Ty,
true);
989 if (Subtarget->isTargetILP32() && Ty->
isPointerTy())
993 if (evt == MVT::Other || !evt.
isSimple())
1003 return TLI.isTypeLegal(VT);
1010bool AArch64FastISel::isTypeSupported(
Type *Ty,
MVT &VT,
bool IsVectorAllowed) {
1014 if (isTypeLegal(Ty, VT))
1019 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
1025bool AArch64FastISel::isValueAvailable(
const Value *V)
const {
1026 if (!isa<Instruction>(V))
1029 const auto *
I = cast<Instruction>(V);
1030 return FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB;
1033bool AArch64FastISel::simplifyAddress(Address &
Addr,
MVT VT) {
1034 if (Subtarget->isTargetILP32())
1041 bool ImmediateOffsetNeedsLowering =
false;
1042 bool RegisterOffsetNeedsLowering =
false;
1045 ImmediateOffsetNeedsLowering =
true;
1046 else if (
Offset > 0 && !(
Offset & (ScaleFactor - 1)) &&
1047 !isUInt<12>(
Offset / ScaleFactor))
1048 ImmediateOffsetNeedsLowering =
true;
1053 if (!ImmediateOffsetNeedsLowering &&
Addr.getOffset() &&
Addr.getOffsetReg())
1054 RegisterOffsetNeedsLowering =
true;
1057 if (
Addr.isRegBase() &&
Addr.getOffsetReg() && !
Addr.getReg())
1058 RegisterOffsetNeedsLowering =
true;
1063 if ((ImmediateOffsetNeedsLowering ||
Addr.getOffsetReg()) &&
Addr.isFIBase())
1065 Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
1066 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADDXri),
1071 Addr.setKind(Address::RegBase);
1072 Addr.setReg(ResultReg);
1075 if (RegisterOffsetNeedsLowering) {
1076 unsigned ResultReg = 0;
1077 if (
Addr.getReg()) {
1080 ResultReg = emitAddSub_rx(
true, MVT::i64,
Addr.getReg(),
1081 Addr.getOffsetReg(),
Addr.getExtendType(),
1084 ResultReg = emitAddSub_rs(
true, MVT::i64,
Addr.getReg(),
1089 ResultReg = emitLSL_ri(MVT::i64, MVT::i32,
Addr.getOffsetReg(),
1090 Addr.getShift(),
true);
1092 ResultReg = emitLSL_ri(MVT::i64, MVT::i32,
Addr.getOffsetReg(),
1093 Addr.getShift(),
false);
1095 ResultReg = emitLSL_ri(MVT::i64, MVT::i64,
Addr.getOffsetReg(),
1101 Addr.setReg(ResultReg);
1102 Addr.setOffsetReg(0);
1109 if (ImmediateOffsetNeedsLowering) {
1113 ResultReg = emitAdd_ri_(MVT::i64,
Addr.getReg(),
Offset);
1119 Addr.setReg(ResultReg);
1125void AArch64FastISel::addLoadStoreOperands(Address &
Addr,
1128 unsigned ScaleFactor,
1130 int64_t
Offset =
Addr.getOffset() / ScaleFactor;
1132 if (
Addr.isFIBase()) {
1133 int FI =
Addr.getFI();
1136 MMO = FuncInfo.MF->getMachineMemOperand(
1138 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
1142 assert(
Addr.isRegBase() &&
"Unexpected address kind.");
1149 if (
Addr.getOffsetReg()) {
1150 assert(
Addr.getOffset() == 0 &&
"Unexpected offset");
1165unsigned AArch64FastISel::emitAddSub(
bool UseAdd,
MVT RetVT,
const Value *LHS,
1166 const Value *RHS,
bool SetFlags,
1167 bool WantResult,
bool IsZExt) {
1169 bool NeedExtend =
false;
1192 if (UseAdd && isa<Constant>(LHS) && !isa<Constant>(RHS))
1196 if (UseAdd &&
LHS->
hasOneUse() && isValueAvailable(LHS))
1201 if (UseAdd &&
LHS->
hasOneUse() && isValueAvailable(LHS))
1202 if (
const auto *SI = dyn_cast<BinaryOperator>(LHS))
1203 if (isa<ConstantInt>(
SI->getOperand(1)))
1204 if (
SI->getOpcode() == Instruction::Shl ||
1205 SI->getOpcode() == Instruction::LShr ||
1206 SI->getOpcode() == Instruction::AShr )
1209 Register LHSReg = getRegForValue(LHS);
1214 LHSReg = emitIntExt(SrcVT, LHSReg, RetVT, IsZExt);
1216 unsigned ResultReg = 0;
1217 if (
const auto *
C = dyn_cast<ConstantInt>(RHS)) {
1218 uint64_t Imm = IsZExt ?
C->getZExtValue() :
C->getSExtValue();
1219 if (
C->isNegative())
1220 ResultReg = emitAddSub_ri(!UseAdd, RetVT, LHSReg, -Imm, SetFlags,
1223 ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, Imm, SetFlags,
1225 }
else if (
const auto *
C = dyn_cast<Constant>(RHS))
1226 if (
C->isNullValue())
1227 ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, 0, SetFlags, WantResult);
1234 isValueAvailable(RHS)) {
1235 Register RHSReg = getRegForValue(RHS);
1238 return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType, 0,
1239 SetFlags, WantResult);
1245 const Value *MulLHS = cast<MulOperator>(RHS)->getOperand(0);
1246 const Value *MulRHS = cast<MulOperator>(RHS)->getOperand(1);
1248 if (
const auto *
C = dyn_cast<ConstantInt>(MulLHS))
1249 if (
C->getValue().isPowerOf2())
1252 assert(isa<ConstantInt>(MulRHS) &&
"Expected a ConstantInt.");
1253 uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
1254 Register RHSReg = getRegForValue(MulLHS);
1257 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg,
AArch64_AM::LSL,
1258 ShiftVal, SetFlags, WantResult);
1266 if (
const auto *SI = dyn_cast<BinaryOperator>(RHS)) {
1267 if (
const auto *
C = dyn_cast<ConstantInt>(
SI->getOperand(1))) {
1269 switch (
SI->getOpcode()) {
1277 Register RHSReg = getRegForValue(
SI->getOperand(0));
1280 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, ShiftType,
1281 ShiftVal, SetFlags, WantResult);
1289 Register RHSReg = getRegForValue(RHS);
1294 RHSReg = emitIntExt(SrcVT, RHSReg, RetVT, IsZExt);
1296 return emitAddSub_rr(UseAdd, RetVT, LHSReg, RHSReg, SetFlags, WantResult);
1299unsigned AArch64FastISel::emitAddSub_rr(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
1300 unsigned RHSReg,
bool SetFlags,
1302 assert(LHSReg && RHSReg &&
"Invalid register number.");
1304 if (LHSReg == AArch64::SP || LHSReg == AArch64::WSP ||
1305 RHSReg == AArch64::SP || RHSReg == AArch64::WSP)
1308 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1311 static const unsigned OpcTable[2][2][2] = {
1312 { { AArch64::SUBWrr, AArch64::SUBXrr },
1313 { AArch64::ADDWrr, AArch64::ADDXrr } },
1314 { { AArch64::SUBSWrr, AArch64::SUBSXrr },
1315 { AArch64::ADDSWrr, AArch64::ADDSXrr } }
1317 bool Is64Bit = RetVT == MVT::i64;
1318 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1320 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1323 ResultReg = createResultReg(RC);
1325 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1330 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1336unsigned AArch64FastISel::emitAddSub_ri(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
1339 assert(LHSReg &&
"Invalid register number.");
1341 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1345 if (isUInt<12>(Imm))
1347 else if ((Imm & 0xfff000) == Imm) {
1353 static const unsigned OpcTable[2][2][2] = {
1354 { { AArch64::SUBWri, AArch64::SUBXri },
1355 { AArch64::ADDWri, AArch64::ADDXri } },
1356 { { AArch64::SUBSWri, AArch64::SUBSXri },
1357 { AArch64::ADDSWri, AArch64::ADDSXri } }
1359 bool Is64Bit = RetVT == MVT::i64;
1360 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1363 RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1365 RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
1368 ResultReg = createResultReg(RC);
1370 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1374 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1381unsigned AArch64FastISel::emitAddSub_rs(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
1386 assert(LHSReg && RHSReg &&
"Invalid register number.");
1387 assert(LHSReg != AArch64::SP && LHSReg != AArch64::WSP &&
1388 RHSReg != AArch64::SP && RHSReg != AArch64::WSP);
1390 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1397 static const unsigned OpcTable[2][2][2] = {
1398 { { AArch64::SUBWrs, AArch64::SUBXrs },
1399 { AArch64::ADDWrs, AArch64::ADDXrs } },
1400 { { AArch64::SUBSWrs, AArch64::SUBSXrs },
1401 { AArch64::ADDSWrs, AArch64::ADDSXrs } }
1403 bool Is64Bit = RetVT == MVT::i64;
1404 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1406 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1409 ResultReg = createResultReg(RC);
1411 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1416 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1419 .
addImm(getShifterImm(ShiftType, ShiftImm));
1423unsigned AArch64FastISel::emitAddSub_rx(
bool UseAdd,
MVT RetVT,
unsigned LHSReg,
1428 assert(LHSReg && RHSReg &&
"Invalid register number.");
1429 assert(LHSReg != AArch64::XZR && LHSReg != AArch64::WZR &&
1430 RHSReg != AArch64::XZR && RHSReg != AArch64::WZR);
1432 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1438 static const unsigned OpcTable[2][2][2] = {
1439 { { AArch64::SUBWrx, AArch64::SUBXrx },
1440 { AArch64::ADDWrx, AArch64::ADDXrx } },
1441 { { AArch64::SUBSWrx, AArch64::SUBSXrx },
1442 { AArch64::ADDSWrx, AArch64::ADDSXrx } }
1444 bool Is64Bit = RetVT == MVT::i64;
1445 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1448 RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1450 RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
1453 ResultReg = createResultReg(RC);
1455 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1460 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1463 .
addImm(getArithExtendImm(ExtType, ShiftImm));
1467bool AArch64FastISel::emitCmp(
const Value *LHS,
const Value *RHS,
bool IsZExt) {
1469 EVT EVT = TLI.getValueType(
DL, Ty,
true);
1482 return emitICmp(VT, LHS, RHS, IsZExt);
1485 return emitFCmp(VT, LHS, RHS);
1489bool AArch64FastISel::emitICmp(
MVT RetVT,
const Value *LHS,
const Value *RHS,
1491 return emitSub(RetVT, LHS, RHS,
true,
false,
1495bool AArch64FastISel::emitICmp_ri(
MVT RetVT,
unsigned LHSReg,
uint64_t Imm) {
1496 return emitAddSub_ri(
false, RetVT, LHSReg, Imm,
1500bool AArch64FastISel::emitFCmp(
MVT RetVT,
const Value *LHS,
const Value *RHS) {
1501 if (RetVT != MVT::f32 && RetVT != MVT::f64)
1506 bool UseImm =
false;
1507 if (
const auto *CFP = dyn_cast<ConstantFP>(RHS))
1511 Register LHSReg = getRegForValue(LHS);
1516 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri;
1517 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc))
1522 Register RHSReg = getRegForValue(RHS);
1526 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr;
1527 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc))
1533unsigned AArch64FastISel::emitAdd(
MVT RetVT,
const Value *LHS,
const Value *RHS,
1534 bool SetFlags,
bool WantResult,
bool IsZExt) {
1535 return emitAddSub(
true, RetVT, LHS, RHS, SetFlags, WantResult,
1544unsigned AArch64FastISel::emitAdd_ri_(
MVT VT,
unsigned Op0, int64_t Imm) {
1547 ResultReg = emitAddSub_ri(
false, VT, Op0, -Imm);
1549 ResultReg = emitAddSub_ri(
true, VT, Op0, Imm);
1558 ResultReg = emitAddSub_rr(
true, VT, Op0, CReg);
1562unsigned AArch64FastISel::emitSub(
MVT RetVT,
const Value *LHS,
const Value *RHS,
1563 bool SetFlags,
bool WantResult,
bool IsZExt) {
1564 return emitAddSub(
false, RetVT, LHS, RHS, SetFlags, WantResult,
1568unsigned AArch64FastISel::emitSubs_rr(
MVT RetVT,
unsigned LHSReg,
1569 unsigned RHSReg,
bool WantResult) {
1570 return emitAddSub_rr(
false, RetVT, LHSReg, RHSReg,
1574unsigned AArch64FastISel::emitSubs_rs(
MVT RetVT,
unsigned LHSReg,
1577 uint64_t ShiftImm,
bool WantResult) {
1578 return emitAddSub_rs(
false, RetVT, LHSReg, RHSReg, ShiftType,
1579 ShiftImm,
true, WantResult);
1582unsigned AArch64FastISel::emitLogicalOp(
unsigned ISDOpc,
MVT RetVT,
1585 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
1595 if (
const auto *SI = dyn_cast<ShlOperator>(LHS))
1596 if (isa<ConstantInt>(
SI->getOperand(1)))
1599 Register LHSReg = getRegForValue(LHS);
1603 unsigned ResultReg = 0;
1604 if (
const auto *
C = dyn_cast<ConstantInt>(RHS)) {
1606 ResultReg = emitLogicalOp_ri(ISDOpc, RetVT, LHSReg, Imm);
1614 const Value *MulLHS = cast<MulOperator>(RHS)->getOperand(0);
1615 const Value *MulRHS = cast<MulOperator>(RHS)->getOperand(1);
1617 if (
const auto *
C = dyn_cast<ConstantInt>(MulLHS))
1618 if (
C->getValue().isPowerOf2())
1621 assert(isa<ConstantInt>(MulRHS) &&
"Expected a ConstantInt.");
1622 uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
1624 Register RHSReg = getRegForValue(MulLHS);
1627 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
1635 if (
const auto *SI = dyn_cast<ShlOperator>(RHS))
1636 if (
const auto *
C = dyn_cast<ConstantInt>(
SI->getOperand(1))) {
1638 Register RHSReg = getRegForValue(
SI->getOperand(0));
1641 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
1647 Register RHSReg = getRegForValue(RHS);
1652 ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, RHSReg);
1653 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
1655 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
1660unsigned AArch64FastISel::emitLogicalOp_ri(
unsigned ISDOpc,
MVT RetVT,
1663 "ISD nodes are not consecutive!");
1664 static const unsigned OpcTable[3][2] = {
1665 { AArch64::ANDWri, AArch64::ANDXri },
1666 { AArch64::ORRWri, AArch64::ORRXri },
1667 { AArch64::EORWri, AArch64::EORXri }
1680 Opc = OpcTable[
Idx][0];
1681 RC = &AArch64::GPR32spRegClass;
1686 Opc = OpcTable[ISDOpc -
ISD::AND][1];
1687 RC = &AArch64::GPR64spRegClass;
1696 fastEmitInst_ri(Opc, RC, LHSReg,
1698 if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc !=
ISD::AND) {
1700 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
1705unsigned AArch64FastISel::emitLogicalOp_rs(
unsigned ISDOpc,
MVT RetVT,
1706 unsigned LHSReg,
unsigned RHSReg,
1709 "ISD nodes are not consecutive!");
1710 static const unsigned OpcTable[3][2] = {
1711 { AArch64::ANDWrs, AArch64::ANDXrs },
1712 { AArch64::ORRWrs, AArch64::ORRXrs },
1713 { AArch64::EORWrs, AArch64::EORXrs }
1729 Opc = OpcTable[ISDOpc -
ISD::AND][0];
1730 RC = &AArch64::GPR32RegClass;
1733 Opc = OpcTable[ISDOpc -
ISD::AND][1];
1734 RC = &AArch64::GPR64RegClass;
1738 fastEmitInst_rri(Opc, RC, LHSReg, RHSReg,
1740 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
1742 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
1747unsigned AArch64FastISel::emitAnd_ri(
MVT RetVT,
unsigned LHSReg,
1749 return emitLogicalOp_ri(
ISD::AND, RetVT, LHSReg, Imm);
1752unsigned AArch64FastISel::emitLoad(
MVT VT,
MVT RetVT, Address
Addr,
1754 if (!TLI.allowsMisalignedMemoryAccesses(VT))
1758 if (!simplifyAddress(
Addr, VT))
1767 bool UseScaled =
true;
1768 if ((
Addr.getOffset() < 0) || (
Addr.getOffset() & (ScaleFactor - 1))) {
1773 static const unsigned GPOpcTable[2][8][4] = {
1775 { { AArch64::LDURSBWi, AArch64::LDURSHWi, AArch64::LDURWi,
1777 { AArch64::LDURSBXi, AArch64::LDURSHXi, AArch64::LDURSWi,
1779 { AArch64::LDRSBWui, AArch64::LDRSHWui, AArch64::LDRWui,
1781 { AArch64::LDRSBXui, AArch64::LDRSHXui, AArch64::LDRSWui,
1783 { AArch64::LDRSBWroX, AArch64::LDRSHWroX, AArch64::LDRWroX,
1785 { AArch64::LDRSBXroX, AArch64::LDRSHXroX, AArch64::LDRSWroX,
1787 { AArch64::LDRSBWroW, AArch64::LDRSHWroW, AArch64::LDRWroW,
1789 { AArch64::LDRSBXroW, AArch64::LDRSHXroW, AArch64::LDRSWroW,
1793 { { AArch64::LDURBBi, AArch64::LDURHHi, AArch64::LDURWi,
1795 { AArch64::LDURBBi, AArch64::LDURHHi, AArch64::LDURWi,
1797 { AArch64::LDRBBui, AArch64::LDRHHui, AArch64::LDRWui,
1799 { AArch64::LDRBBui, AArch64::LDRHHui, AArch64::LDRWui,
1801 { AArch64::LDRBBroX, AArch64::LDRHHroX, AArch64::LDRWroX,
1803 { AArch64::LDRBBroX, AArch64::LDRHHroX, AArch64::LDRWroX,
1805 { AArch64::LDRBBroW, AArch64::LDRHHroW, AArch64::LDRWroW,
1807 { AArch64::LDRBBroW, AArch64::LDRHHroW, AArch64::LDRWroW,
1812 static const unsigned FPOpcTable[4][2] = {
1813 { AArch64::LDURSi, AArch64::LDURDi },
1814 { AArch64::LDRSui, AArch64::LDRDui },
1815 { AArch64::LDRSroX, AArch64::LDRDroX },
1816 { AArch64::LDRSroW, AArch64::LDRDroW }
1821 bool UseRegOffset =
Addr.isRegBase() && !
Addr.getOffset() &&
Addr.getReg() &&
1822 Addr.getOffsetReg();
1823 unsigned Idx = UseRegOffset ? 2 : UseScaled ? 1 : 0;
1828 bool IsRet64Bit = RetVT == MVT::i64;
1834 Opc = GPOpcTable[WantZExt][2 *
Idx + IsRet64Bit][0];
1835 RC = (IsRet64Bit && !WantZExt) ?
1836 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1839 Opc = GPOpcTable[WantZExt][2 *
Idx + IsRet64Bit][1];
1840 RC = (IsRet64Bit && !WantZExt) ?
1841 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1844 Opc = GPOpcTable[WantZExt][2 *
Idx + IsRet64Bit][2];
1845 RC = (IsRet64Bit && !WantZExt) ?
1846 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1849 Opc = GPOpcTable[WantZExt][2 *
Idx + IsRet64Bit][3];
1850 RC = &AArch64::GPR64RegClass;
1853 Opc = FPOpcTable[
Idx][0];
1854 RC = &AArch64::FPR32RegClass;
1857 Opc = FPOpcTable[
Idx][1];
1858 RC = &AArch64::FPR64RegClass;
1863 Register ResultReg = createResultReg(RC);
1865 TII.get(Opc), ResultReg);
1869 if (VT == MVT::i1) {
1870 unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, 1);
1871 assert(ANDReg &&
"Unexpected AND instruction emission failure.");
1877 if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) {
1878 Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
1879 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1880 TII.get(AArch64::SUBREG_TO_REG), Reg64)
1883 .
addImm(AArch64::sub_32);
1889bool AArch64FastISel::selectAddSub(
const Instruction *
I) {
1891 if (!isTypeSupported(
I->getType(), VT,
true))
1895 return selectOperator(
I,
I->getOpcode());
1898 switch (
I->getOpcode()) {
1901 case Instruction::Add:
1902 ResultReg = emitAdd(VT,
I->getOperand(0),
I->getOperand(1));
1904 case Instruction::Sub:
1905 ResultReg = emitSub(VT,
I->getOperand(0),
I->getOperand(1));
1911 updateValueMap(
I, ResultReg);
1915bool AArch64FastISel::selectLogicalOp(
const Instruction *
I) {
1917 if (!isTypeSupported(
I->getType(), VT,
true))
1921 return selectOperator(
I,
I->getOpcode());
1924 switch (
I->getOpcode()) {
1927 case Instruction::And:
1928 ResultReg = emitLogicalOp(
ISD::AND, VT,
I->getOperand(0),
I->getOperand(1));
1930 case Instruction::Or:
1931 ResultReg = emitLogicalOp(
ISD::OR, VT,
I->getOperand(0),
I->getOperand(1));
1933 case Instruction::Xor:
1934 ResultReg = emitLogicalOp(
ISD::XOR, VT,
I->getOperand(0),
I->getOperand(1));
1940 updateValueMap(
I, ResultReg);
1944bool AArch64FastISel::selectLoad(
const Instruction *
I) {
1949 if (!isTypeSupported(
I->getType(), VT,
true) ||
1950 cast<LoadInst>(
I)->isAtomic())
1953 const Value *SV =
I->getOperand(0);
1954 if (TLI.supportSwiftError()) {
1957 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
1958 if (Arg->hasSwiftErrorAttr())
1962 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1963 if (Alloca->isSwiftError())
1970 if (!computeAddress(
I->getOperand(0),
Addr,
I->getType()))
1974 bool WantZExt =
true;
1976 const Value *IntExtVal =
nullptr;
1977 if (
I->hasOneUse()) {
1978 if (
const auto *ZE = dyn_cast<ZExtInst>(
I->use_begin()->getUser())) {
1979 if (isTypeSupported(ZE->getType(), RetVT))
1983 }
else if (
const auto *SE = dyn_cast<SExtInst>(
I->use_begin()->getUser())) {
1984 if (isTypeSupported(SE->getType(), RetVT))
1992 unsigned ResultReg =
1993 emitLoad(VT, RetVT,
Addr, WantZExt, createMachineMemOperandFor(
I));
2014 auto *
MI =
MRI.getUniqueVRegDef(Reg);
2016 if (RetVT == MVT::i64 && VT <= MVT::i32) {
2020 ResultReg = std::prev(
I)->getOperand(0).getReg();
2021 removeDeadCode(
I, std::next(
I));
2023 ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg,
2026 updateValueMap(
I, ResultReg);
2035 for (
auto &Opnd :
MI->uses()) {
2037 Reg = Opnd.getReg();
2042 removeDeadCode(
I, std::next(
I));
2045 MI =
MRI.getUniqueVRegDef(Reg);
2047 updateValueMap(IntExtVal, ResultReg);
2051 updateValueMap(
I, ResultReg);
2055bool AArch64FastISel::emitStoreRelease(
MVT VT,
unsigned SrcReg,
2060 default:
return false;
2061 case MVT::i8: Opc = AArch64::STLRB;
break;
2062 case MVT::i16: Opc = AArch64::STLRH;
break;
2063 case MVT::i32: Opc = AArch64::STLRW;
break;
2064 case MVT::i64: Opc = AArch64::STLRX;
break;
2070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
2077bool AArch64FastISel::emitStore(
MVT VT,
unsigned SrcReg, Address
Addr,
2079 if (!TLI.allowsMisalignedMemoryAccesses(VT))
2083 if (!simplifyAddress(
Addr, VT))
2092 bool UseScaled =
true;
2093 if ((
Addr.getOffset() < 0) || (
Addr.getOffset() & (ScaleFactor - 1))) {
2098 static const unsigned OpcTable[4][6] = {
2099 { AArch64::STURBBi, AArch64::STURHHi, AArch64::STURWi, AArch64::STURXi,
2100 AArch64::STURSi, AArch64::STURDi },
2101 { AArch64::STRBBui, AArch64::STRHHui, AArch64::STRWui, AArch64::STRXui,
2102 AArch64::STRSui, AArch64::STRDui },
2103 { AArch64::STRBBroX, AArch64::STRHHroX, AArch64::STRWroX, AArch64::STRXroX,
2104 AArch64::STRSroX, AArch64::STRDroX },
2105 { AArch64::STRBBroW, AArch64::STRHHroW, AArch64::STRWroW, AArch64::STRXroW,
2106 AArch64::STRSroW, AArch64::STRDroW }
2110 bool VTIsi1 =
false;
2111 bool UseRegOffset =
Addr.isRegBase() && !
Addr.getOffset() &&
Addr.getReg() &&
2112 Addr.getOffsetReg();
2113 unsigned Idx = UseRegOffset ? 2 : UseScaled ? 1 : 0;
2120 case MVT::i1: VTIsi1 =
true; [[fallthrough]];
2121 case MVT::i8: Opc = OpcTable[
Idx][0];
break;
2122 case MVT::i16: Opc = OpcTable[
Idx][1];
break;
2123 case MVT::i32: Opc = OpcTable[
Idx][2];
break;
2124 case MVT::i64: Opc = OpcTable[
Idx][3];
break;
2125 case MVT::f32: Opc = OpcTable[
Idx][4];
break;
2126 case MVT::f64: Opc = OpcTable[
Idx][5];
break;
2130 if (VTIsi1 && SrcReg != AArch64::WZR) {
2131 unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, 1);
2132 assert(ANDReg &&
"Unexpected AND instruction emission failure.");
2145bool AArch64FastISel::selectStore(
const Instruction *
I) {
2147 const Value *Op0 =
I->getOperand(0);
2151 if (!isTypeSupported(Op0->
getType(), VT,
true))
2154 const Value *PtrV =
I->getOperand(1);
2155 if (TLI.supportSwiftError()) {
2158 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
2159 if (Arg->hasSwiftErrorAttr())
2163 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
2164 if (Alloca->isSwiftError())
2171 unsigned SrcReg = 0;
2172 if (
const auto *CI = dyn_cast<ConstantInt>(Op0)) {
2174 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
2175 }
else if (
const auto *CF = dyn_cast<ConstantFP>(Op0)) {
2176 if (CF->isZero() && !CF->isNegative()) {
2178 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
2183 SrcReg = getRegForValue(Op0);
2188 auto *
SI = cast<StoreInst>(
I);
2191 if (
SI->isAtomic()) {
2196 Register AddrReg = getRegForValue(PtrV);
2197 return emitStoreRelease(VT, SrcReg, AddrReg,
2198 createMachineMemOperandFor(
I));
2259bool AArch64FastISel::emitCompareAndBranch(
const BranchInst *BI) {
2263 if (FuncInfo.MF->getFunction().hasFnAttribute(
2264 Attribute::SpeculativeLoadHardening))
2286 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
2293 switch (Predicate) {
2298 if (isa<Constant>(LHS) && cast<Constant>(LHS)->isNullValue())
2301 if (!isa<Constant>(RHS) || !cast<Constant>(RHS)->isNullValue())
2304 if (
const auto *AI = dyn_cast<BinaryOperator>(LHS))
2305 if (AI->
getOpcode() == Instruction::And && isValueAvailable(AI)) {
2309 if (
const auto *
C = dyn_cast<ConstantInt>(AndLHS))
2310 if (
C->getValue().isPowerOf2())
2313 if (
const auto *
C = dyn_cast<ConstantInt>(AndRHS))
2314 if (
C->getValue().isPowerOf2()) {
2315 TestBit =
C->getValue().logBase2();
2327 if (!isa<Constant>(RHS) || !cast<Constant>(RHS)->isNullValue())
2335 if (!isa<ConstantInt>(RHS))
2338 if (cast<ConstantInt>(RHS)->getValue() !=
APInt(BW, -1,
true))
2346 static const unsigned OpcTable[2][2][2] = {
2347 { {AArch64::CBZW, AArch64::CBZX },
2348 {AArch64::CBNZW, AArch64::CBNZX} },
2349 { {AArch64::TBZW, AArch64::TBZX },
2350 {AArch64::TBNZW, AArch64::TBNZX} }
2353 bool IsBitTest = TestBit != -1;
2354 bool Is64Bit = BW == 64;
2355 if (TestBit < 32 && TestBit >= 0)
2358 unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit];
2361 Register SrcReg = getRegForValue(LHS);
2365 if (BW == 64 && !Is64Bit)
2366 SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, AArch64::sub_32);
2368 if ((BW < 32) && !IsBitTest)
2369 SrcReg = emitIntExt(VT, SrcReg, MVT::i32,
true);
2374 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc))
2384bool AArch64FastISel::selectBranch(
const Instruction *
I) {
2396 if (CI->
hasOneUse() && isValueAvailable(CI)) {
2399 switch (Predicate) {
2403 fastEmitBranch(FBB, MIMD.getDL());
2406 fastEmitBranch(
TBB, MIMD.getDL());
2411 if (emitCompareAndBranch(BI))
2415 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
2428 switch (Predicate) {
2444 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::Bcc))
2450 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::Bcc))
2457 }
else if (
const auto *CI = dyn_cast<ConstantInt>(BI->
getCondition())) {
2460 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::B))
2469 FuncInfo.MBB->addSuccessorWithoutProb(
Target);
2481 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::Bcc))
2495 unsigned Opcode = AArch64::TBNZW;
2496 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
2498 Opcode = AArch64::TBZW;
2504 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
2505 .
addReg(ConstrainedCondReg)
2513bool AArch64FastISel::selectIndirectBr(
const Instruction *
I) {
2520 if (FuncInfo.MF->getFunction().hasFnAttribute(
"ptrauth-indirect-gotos"))
2530 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(Succ));
2536 const CmpInst *CI = cast<CmpInst>(
I);
2544 unsigned ResultReg = 0;
2545 switch (Predicate) {
2549 ResultReg = createResultReg(&AArch64::GPR32RegClass);
2550 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2551 TII.get(TargetOpcode::COPY), ResultReg)
2555 ResultReg = fastEmit_i(MVT::i32, MVT::i32,
ISD::Constant, 1);
2560 updateValueMap(
I, ResultReg);
2568 ResultReg = createResultReg(&AArch64::GPR32RegClass);
2572 static unsigned CondCodeTable[2][2] = {
2577 switch (Predicate) {
2589 Register TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
2590 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr),
2595 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr),
2601 updateValueMap(
I, ResultReg);
2609 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr),
2615 updateValueMap(
I, ResultReg);
2621bool AArch64FastISel::optimizeSelect(
const SelectInst *SI) {
2622 if (!
SI->getType()->isIntegerTy(1))
2625 const Value *Src1Val, *Src2Val;
2627 bool NeedExtraOp =
false;
2628 if (
auto *CI = dyn_cast<ConstantInt>(
SI->getTrueValue())) {
2630 Src1Val =
SI->getCondition();
2631 Src2Val =
SI->getFalseValue();
2632 Opc = AArch64::ORRWrr;
2635 Src1Val =
SI->getFalseValue();
2636 Src2Val =
SI->getCondition();
2637 Opc = AArch64::BICWrr;
2639 }
else if (
auto *CI = dyn_cast<ConstantInt>(
SI->getFalseValue())) {
2641 Src1Val =
SI->getCondition();
2642 Src2Val =
SI->getTrueValue();
2643 Opc = AArch64::ORRWrr;
2647 Src1Val =
SI->getCondition();
2648 Src2Val =
SI->getTrueValue();
2649 Opc = AArch64::ANDWrr;
2656 Register Src1Reg = getRegForValue(Src1Val);
2660 Register Src2Reg = getRegForValue(Src2Val);
2665 Src1Reg = emitLogicalOp_ri(
ISD::XOR, MVT::i32, Src1Reg, 1);
2667 Register ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
2669 updateValueMap(SI, ResultReg);
2673bool AArch64FastISel::selectSelect(
const Instruction *
I) {
2674 assert(isa<SelectInst>(
I) &&
"Expected a select instruction.");
2676 if (!isTypeSupported(
I->getType(), VT))
2688 Opc = AArch64::CSELWr;
2689 RC = &AArch64::GPR32RegClass;
2692 Opc = AArch64::CSELXr;
2693 RC = &AArch64::GPR64RegClass;
2696 Opc = AArch64::FCSELSrrr;
2697 RC = &AArch64::FPR32RegClass;
2700 Opc = AArch64::FCSELDrrr;
2701 RC = &AArch64::FPR64RegClass;
2710 if (optimizeSelect(SI))
2714 if (foldXALUIntrinsic(
CC,
I,
Cond)) {
2719 }
else if (isa<CmpInst>(
Cond) && cast<CmpInst>(
Cond)->hasOneUse() &&
2720 isValueAvailable(
Cond)) {
2721 const auto *
Cmp = cast<CmpInst>(
Cond);
2724 const Value *FoldSelect =
nullptr;
2725 switch (Predicate) {
2729 FoldSelect =
SI->getFalseValue();
2732 FoldSelect =
SI->getTrueValue();
2737 Register SrcReg = getRegForValue(FoldSelect);
2741 updateValueMap(
I, SrcReg);
2751 switch (Predicate) {
2773 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
2779 Register Src1Reg = getRegForValue(
SI->getTrueValue());
2780 Register Src2Reg = getRegForValue(
SI->getFalseValue());
2782 if (!Src1Reg || !Src2Reg)
2786 Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, ExtraCC);
2788 Register ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg,
CC);
2789 updateValueMap(
I, ResultReg);
2793bool AArch64FastISel::selectFPExt(
const Instruction *
I) {
2795 if (!
I->getType()->isDoubleTy() || !
V->getType()->isFloatTy())
2802 Register ResultReg = createResultReg(&AArch64::FPR64RegClass);
2803 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::FCVTDSr),
2805 updateValueMap(
I, ResultReg);
2809bool AArch64FastISel::selectFPTrunc(
const Instruction *
I) {
2811 if (!
I->getType()->isFloatTy() || !
V->getType()->isDoubleTy())
2818 Register ResultReg = createResultReg(&AArch64::FPR32RegClass);
2819 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::FCVTSDr),
2821 updateValueMap(
I, ResultReg);
2828 if (!isTypeLegal(
I->getType(), DestVT) || DestVT.
isVector())
2831 Register SrcReg = getRegForValue(
I->getOperand(0));
2835 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType(),
true);
2836 if (SrcVT == MVT::f128 || SrcVT == MVT::f16 || SrcVT == MVT::bf16)
2840 if (SrcVT == MVT::f64) {
2842 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr;
2844 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr;
2847 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr;
2849 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
2851 Register ResultReg = createResultReg(
2852 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
2853 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg)
2855 updateValueMap(
I, ResultReg);
2861 if (!isTypeLegal(
I->getType(), DestVT) || DestVT.
isVector())
2864 if (DestVT == MVT::f16 || DestVT == MVT::bf16)
2867 assert((DestVT == MVT::f32 || DestVT == MVT::f64) &&
2868 "Unexpected value type.");
2870 Register SrcReg = getRegForValue(
I->getOperand(0));
2874 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType(),
true);
2877 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
2885 if (SrcVT == MVT::i64) {
2887 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri;
2889 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri;
2892 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri;
2894 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
2897 Register ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg);
2898 updateValueMap(
I, ResultReg);
2902bool AArch64FastISel::fastLowerArguments() {
2903 if (!FuncInfo.CanLowerReturn)
2914 if (Subtarget->hasCustomCallingConv())
2918 unsigned GPRCnt = 0;
2919 unsigned FPRCnt = 0;
2920 for (
auto const &Arg :
F->args()) {
2921 if (Arg.hasAttribute(Attribute::ByVal) ||
2922 Arg.hasAttribute(Attribute::InReg) ||
2923 Arg.hasAttribute(Attribute::StructRet) ||
2924 Arg.hasAttribute(Attribute::SwiftSelf) ||
2925 Arg.hasAttribute(Attribute::SwiftAsync) ||
2926 Arg.hasAttribute(Attribute::SwiftError) ||
2927 Arg.hasAttribute(Attribute::Nest))
2930 Type *ArgTy = Arg.getType();
2934 EVT ArgVT = TLI.getValueType(
DL, ArgTy);
2943 (!Subtarget->hasNEON() || !Subtarget->isLittleEndian()))
2946 if (VT >= MVT::i1 && VT <= MVT::i64)
2948 else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.
is64BitVector() ||
2954 if (GPRCnt > 8 || FPRCnt > 8)
2959 { AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4,
2960 AArch64::W5, AArch64::W6, AArch64::W7 },
2961 { AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
2962 AArch64::X5, AArch64::X6, AArch64::X7 },
2963 { AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
2964 AArch64::H5, AArch64::H6, AArch64::H7 },
2965 { AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4,
2966 AArch64::S5, AArch64::S6, AArch64::S7 },
2967 { AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4,
2968 AArch64::D5, AArch64::D6, AArch64::D7 },
2969 { AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
2970 AArch64::Q5, AArch64::Q6, AArch64::Q7 }
2974 unsigned FPRIdx = 0;
2975 for (
auto const &Arg :
F->args()) {
2976 MVT VT = TLI.getSimpleValueType(
DL, Arg.getType());
2979 if (VT >= MVT::i1 && VT <= MVT::i32) {
2981 RC = &AArch64::GPR32RegClass;
2983 }
else if (VT == MVT::i64) {
2985 RC = &AArch64::GPR64RegClass;
2986 }
else if (VT == MVT::f16 || VT == MVT::bf16) {
2988 RC = &AArch64::FPR16RegClass;
2989 }
else if (VT == MVT::f32) {
2991 RC = &AArch64::FPR32RegClass;
2994 RC = &AArch64::FPR64RegClass;
2997 RC = &AArch64::FPR128RegClass;
3001 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3005 Register ResultReg = createResultReg(RC);
3006 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3007 TII.get(TargetOpcode::COPY), ResultReg)
3009 updateValueMap(&Arg, ResultReg);
3014bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
3016 unsigned &NumBytes) {
3019 CCState CCInfo(
CC,
false, *FuncInfo.MF, ArgLocs, *Context);
3020 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(
CC));
3023 NumBytes = CCInfo.getStackSize();
3026 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
3027 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AdjStackDown))
3032 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
3033 MVT ArgVT = OutVTs[VA.getValNo()];
3035 Register ArgReg = getRegForValue(ArgVal);
3040 switch (VA.getLocInfo()) {
3044 MVT DestVT = VA.getLocVT();
3046 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT,
false);
3054 MVT DestVT = VA.getLocVT();
3056 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT,
true);
3066 if (VA.isRegLoc() && !VA.needsCustom()) {
3067 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3068 TII.get(TargetOpcode::COPY), VA.getLocReg()).
addReg(ArgReg);
3069 CLI.OutRegs.push_back(VA.getLocReg());
3070 }
else if (VA.needsCustom()) {
3074 assert(VA.isMemLoc() &&
"Assuming store on stack.");
3077 if (isa<UndefValue>(ArgVal))
3083 unsigned BEAlign = 0;
3084 if (ArgSize < 8 && !Subtarget->isLittleEndian())
3085 BEAlign = 8 - ArgSize;
3088 Addr.setKind(Address::RegBase);
3089 Addr.setReg(AArch64::SP);
3090 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
3104bool AArch64FastISel::finishCall(CallLoweringInfo &CLI,
unsigned NumBytes) {
3108 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
3109 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AdjStackUp))
3114 CCState CCInfo(
CC,
false, *FuncInfo.MF, RVLocs, *Context);
3115 CCInfo.AnalyzeCallResult(CLI.Ins, CCAssignFnForCall(
CC));
3117 Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3118 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3121 unsigned CopyReg = ResultReg + i;
3124 if (CopyVT.
isVector() && !Subtarget->isLittleEndian())
3128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::COPY),
3134 CLI.ResultReg = ResultReg;
3135 CLI.NumResultRegs = RVLocs.
size();
3140bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3142 bool IsTailCall = CLI.IsTailCall;
3143 bool IsVarArg = CLI.IsVarArg;
3147 if (!Callee && !Symbol)
3152 if (CLI.CB && CLI.CB->hasFnAttr(Attribute::ReturnsTwice) &&
3153 !Subtarget->noBTIAtReturnTwice() &&
3158 if (CLI.CB && CLI.CB->isIndirectCall() &&
3168 if (Subtarget->isTargetILP32())
3182 if (MF->getFunction().getParent()->getRtLibUseGOT())
3189 if (Subtarget->isWindowsArm64EC())
3192 for (
auto Flag : CLI.OutFlags)
3194 Flag.isSwiftSelf() ||
Flag.isSwiftAsync() ||
Flag.isSwiftError())
3199 OutVTs.
reserve(CLI.OutVals.size());
3201 for (
auto *Val : CLI.OutVals) {
3203 if (!isTypeLegal(Val->getType(), VT) &&
3204 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
3215 if (Callee && !computeCallAddress(Callee,
Addr))
3221 if (Subtarget->isTargetWindows() &&
Addr.getGlobalValue() &&
3222 Addr.getGlobalValue()->hasExternalWeakLinkage())
3227 if (!processCallArgs(CLI, OutVTs, NumBytes))
3231 if (
RegInfo->isAnyArgRegReserved(*MF))
3232 RegInfo->emitReservedArgRegCallError(*MF);
3236 if (Subtarget->useSmallAddressing()) {
3239 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II);
3242 else if (
Addr.getGlobalValue())
3244 else if (
Addr.getReg()) {
3250 unsigned CallReg = 0;
3252 Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
3253 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
3257 CallReg = createResultReg(&AArch64::GPR64RegClass);
3258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3259 TII.get(AArch64::LDRXui), CallReg)
3263 }
else if (
Addr.getGlobalValue())
3264 CallReg = materializeGV(
Addr.getGlobalValue());
3265 else if (
Addr.getReg())
3266 CallReg =
Addr.getReg();
3273 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II).
addReg(CallReg);
3277 for (
auto Reg : CLI.OutRegs)
3287 return finishCall(CLI, NumBytes);
3292 return Len / Alignment->value() <= 4;
3297bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest, Address Src,
3300 if (!isMemCpySmall(Len, Alignment))
3303 int64_t UnscaledOffset = 0;
3309 if (!Alignment || *Alignment >= 8) {
3320 assert(Alignment &&
"Alignment is set in this branch");
3322 if (Len >= 4 && *Alignment == 4)
3324 else if (Len >= 2 && *Alignment == 2)
3331 unsigned ResultReg =
emitLoad(VT, VT, Src);
3340 UnscaledOffset +=
Size;
3343 Dest.setOffset(OrigDest.getOffset() + UnscaledOffset);
3344 Src.setOffset(OrigSrc.getOffset() + UnscaledOffset);
3355 if (!isa<ExtractValueInst>(
Cond))
3358 const auto *EV = cast<ExtractValueInst>(
Cond);
3359 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
3362 const auto *
II = cast<IntrinsicInst>(EV->getAggregateOperand());
3366 cast<StructType>(
Callee->getReturnType())->getTypeAtIndex(0U);
3367 if (!isTypeLegal(
RetTy, RetVT))
3370 if (RetVT != MVT::i32 && RetVT != MVT::i64)
3377 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
II->isCommutative())
3385 case Intrinsic::smul_with_overflow:
3386 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
3387 if (
C->getValue() == 2)
3388 IID = Intrinsic::sadd_with_overflow;
3390 case Intrinsic::umul_with_overflow:
3391 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
3392 if (
C->getValue() == 2)
3393 IID = Intrinsic::uadd_with_overflow;
3401 case Intrinsic::sadd_with_overflow:
3402 case Intrinsic::ssub_with_overflow:
3405 case Intrinsic::uadd_with_overflow:
3408 case Intrinsic::usub_with_overflow:
3411 case Intrinsic::smul_with_overflow:
3412 case Intrinsic::umul_with_overflow:
3418 if (!isValueAvailable(
II))
3424 for (
auto Itr = std::prev(Start); Itr !=
End; --Itr) {
3427 if (!isa<ExtractValueInst>(Itr))
3431 const auto *EVI = cast<ExtractValueInst>(Itr);
3432 if (EVI->getAggregateOperand() !=
II)
3440bool AArch64FastISel::fastLowerIntrinsicCall(
const IntrinsicInst *
II) {
3442 switch (
II->getIntrinsicID()) {
3443 default:
return false;
3444 case Intrinsic::frameaddress: {
3450 Register SrcReg =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3451 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3459 unsigned Depth = cast<ConstantInt>(
II->getOperand(0))->getZExtValue();
3461 DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
3463 assert(DestReg &&
"Unexpected LDR instruction emission failure.");
3467 updateValueMap(
II, SrcReg);
3470 case Intrinsic::sponentry: {
3475 Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
3476 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3477 TII.get(AArch64::ADDXri), ResultReg)
3482 updateValueMap(
II, ResultReg);
3485 case Intrinsic::memcpy:
3486 case Intrinsic::memmove: {
3487 const auto *MTI = cast<MemTransferInst>(
II);
3489 if (MTI->isVolatile())
3494 bool IsMemCpy = (
II->getIntrinsicID() == Intrinsic::memcpy);
3495 if (isa<ConstantInt>(MTI->getLength()) && IsMemCpy) {
3498 uint64_t Len = cast<ConstantInt>(MTI->getLength())->getZExtValue();
3500 if (MTI->getDestAlign() || MTI->getSourceAlign())
3501 Alignment = std::min(MTI->getDestAlign().valueOrOne(),
3502 MTI->getSourceAlign().valueOrOne());
3503 if (isMemCpySmall(Len, Alignment)) {
3505 if (!computeAddress(MTI->getRawDest(), Dest) ||
3506 !computeAddress(MTI->getRawSource(), Src))
3508 if (tryEmitSmallMemCpy(Dest, Src, Len, Alignment))
3513 if (!MTI->getLength()->getType()->isIntegerTy(64))
3516 if (MTI->getSourceAddressSpace() > 255 || MTI->getDestAddressSpace() > 255)
3521 const char *IntrMemName = isa<MemCpyInst>(
II) ?
"memcpy" :
"memmove";
3522 return lowerCallTo(
II, IntrMemName,
II->arg_size() - 1);
3524 case Intrinsic::memset: {
3538 return lowerCallTo(
II,
"memset",
II->arg_size() - 1);
3540 case Intrinsic::sin:
3541 case Intrinsic::cos:
3542 case Intrinsic::tan:
3543 case Intrinsic::pow: {
3545 if (!isTypeLegal(
II->getType(), RetVT))
3548 if (RetVT != MVT::f32 && RetVT != MVT::f64)
3552 {RTLIB::SIN_F32, RTLIB::SIN_F64},
3553 {RTLIB::COS_F32, RTLIB::COS_F64},
3554 {RTLIB::TAN_F32, RTLIB::TAN_F64},
3555 {RTLIB::POW_F32, RTLIB::POW_F64}};
3557 bool Is64Bit = RetVT == MVT::f64;
3558 switch (
II->getIntrinsicID()) {
3561 case Intrinsic::sin:
3562 LC = LibCallTable[0][Is64Bit];
3564 case Intrinsic::cos:
3565 LC = LibCallTable[1][Is64Bit];
3567 case Intrinsic::tan:
3568 LC = LibCallTable[2][Is64Bit];
3570 case Intrinsic::pow:
3571 LC = LibCallTable[3][Is64Bit];
3576 Args.reserve(
II->arg_size());
3579 for (
auto &Arg :
II->args()) {
3582 Entry.Ty = Arg->getType();
3583 Args.push_back(Entry);
3586 CallLoweringInfo CLI;
3588 CLI.setCallee(
DL, Ctx, TLI.getLibcallCallingConv(LC),
II->getType(),
3589 TLI.getLibcallName(LC), std::move(Args));
3590 if (!lowerCallTo(CLI))
3592 updateValueMap(
II, CLI.ResultReg);
3595 case Intrinsic::fabs: {
3597 if (!isTypeLegal(
II->getType(), VT))
3605 Opc = AArch64::FABSSr;
3608 Opc = AArch64::FABSDr;
3611 Register SrcReg = getRegForValue(
II->getOperand(0));
3614 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3615 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(Opc), ResultReg)
3617 updateValueMap(
II, ResultReg);
3620 case Intrinsic::trap:
3621 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::BRK))
3624 case Intrinsic::debugtrap:
3625 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::BRK))
3629 case Intrinsic::sqrt: {
3630 Type *
RetTy =
II->getCalledFunction()->getReturnType();
3633 if (!isTypeLegal(
RetTy, VT))
3636 Register Op0Reg = getRegForValue(
II->getOperand(0));
3640 unsigned ResultReg = fastEmit_r(VT, VT,
ISD::FSQRT, Op0Reg);
3644 updateValueMap(
II, ResultReg);
3647 case Intrinsic::sadd_with_overflow:
3648 case Intrinsic::uadd_with_overflow:
3649 case Intrinsic::ssub_with_overflow:
3650 case Intrinsic::usub_with_overflow:
3651 case Intrinsic::smul_with_overflow:
3652 case Intrinsic::umul_with_overflow: {
3655 auto *Ty = cast<StructType>(
Callee->getReturnType());
3659 if (!isTypeLegal(
RetTy, VT))
3662 if (VT != MVT::i32 && VT != MVT::i64)
3668 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
II->isCommutative())
3676 case Intrinsic::smul_with_overflow:
3677 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
3678 if (
C->getValue() == 2) {
3679 IID = Intrinsic::sadd_with_overflow;
3683 case Intrinsic::umul_with_overflow:
3684 if (
const auto *
C = dyn_cast<ConstantInt>(RHS))
3685 if (
C->getValue() == 2) {
3686 IID = Intrinsic::uadd_with_overflow;
3692 unsigned ResultReg1 = 0, ResultReg2 = 0, MulReg = 0;
3696 case Intrinsic::sadd_with_overflow:
3697 ResultReg1 = emitAdd(VT, LHS, RHS,
true);
3700 case Intrinsic::uadd_with_overflow:
3701 ResultReg1 = emitAdd(VT, LHS, RHS,
true);
3704 case Intrinsic::ssub_with_overflow:
3705 ResultReg1 = emitSub(VT, LHS, RHS,
true);
3708 case Intrinsic::usub_with_overflow:
3709 ResultReg1 = emitSub(VT, LHS, RHS,
true);
3712 case Intrinsic::smul_with_overflow: {
3714 Register LHSReg = getRegForValue(LHS);
3718 Register RHSReg = getRegForValue(RHS);
3722 if (VT == MVT::i32) {
3723 MulReg = emitSMULL_rr(MVT::i64, LHSReg, RHSReg);
3725 fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32);
3727 emitAddSub_rx(
false, MVT::i64, MulReg, MulSubReg,
3732 assert(VT == MVT::i64 &&
"Unexpected value type.");
3735 MulReg = emitMul_rr(VT, LHSReg, RHSReg);
3736 unsigned SMULHReg = fastEmit_rr(VT, VT,
ISD::MULHS, LHSReg, RHSReg);
3742 case Intrinsic::umul_with_overflow: {
3744 Register LHSReg = getRegForValue(LHS);
3748 Register RHSReg = getRegForValue(RHS);
3752 if (VT == MVT::i32) {
3753 MulReg = emitUMULL_rr(MVT::i64, LHSReg, RHSReg);
3755 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3756 TII.get(AArch64::ANDSXri), AArch64::XZR)
3759 MulReg = fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32);
3761 assert(VT == MVT::i64 &&
"Unexpected value type.");
3764 MulReg = emitMul_rr(VT, LHSReg, RHSReg);
3765 unsigned UMULHReg = fastEmit_rr(VT, VT,
ISD::MULHU, LHSReg, RHSReg);
3766 emitSubs_rr(VT, AArch64::XZR, UMULHReg,
false);
3773 ResultReg1 = createResultReg(TLI.getRegClassFor(VT));
3774 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3775 TII.get(TargetOpcode::COPY), ResultReg1).
addReg(MulReg);
3781 ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
3782 AArch64::WZR, AArch64::WZR,
3783 getInvertedCondCode(
CC));
3785 assert((ResultReg1 + 1) == ResultReg2 &&
3786 "Nonconsecutive result registers.");
3787 updateValueMap(
II, ResultReg1, 2);
3790 case Intrinsic::aarch64_crc32b:
3791 case Intrinsic::aarch64_crc32h:
3792 case Intrinsic::aarch64_crc32w:
3793 case Intrinsic::aarch64_crc32x:
3794 case Intrinsic::aarch64_crc32cb:
3795 case Intrinsic::aarch64_crc32ch:
3796 case Intrinsic::aarch64_crc32cw:
3797 case Intrinsic::aarch64_crc32cx: {
3798 if (!Subtarget->hasCRC())
3802 switch (
II->getIntrinsicID()) {
3805 case Intrinsic::aarch64_crc32b:
3806 Opc = AArch64::CRC32Brr;
3808 case Intrinsic::aarch64_crc32h:
3809 Opc = AArch64::CRC32Hrr;
3811 case Intrinsic::aarch64_crc32w:
3812 Opc = AArch64::CRC32Wrr;
3814 case Intrinsic::aarch64_crc32x:
3815 Opc = AArch64::CRC32Xrr;
3817 case Intrinsic::aarch64_crc32cb:
3818 Opc = AArch64::CRC32CBrr;
3820 case Intrinsic::aarch64_crc32ch:
3821 Opc = AArch64::CRC32CHrr;
3823 case Intrinsic::aarch64_crc32cw:
3824 Opc = AArch64::CRC32CWrr;
3826 case Intrinsic::aarch64_crc32cx:
3827 Opc = AArch64::CRC32CXrr;
3831 Register LHSReg = getRegForValue(
II->getArgOperand(0));
3832 Register RHSReg = getRegForValue(
II->getArgOperand(1));
3833 if (!LHSReg || !RHSReg)
3837 fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, LHSReg, RHSReg);
3838 updateValueMap(
II, ResultReg);
3847 const Function &
F = *
I->getParent()->getParent();
3849 if (!FuncInfo.CanLowerReturn)
3855 if (TLI.supportSwiftError() &&
3856 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
3859 if (TLI.supportSplitCSR(FuncInfo.MF))
3865 if (
Ret->getNumOperands() > 0) {
3872 CCState CCInfo(
CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
3876 if (ValLocs.
size() != 1)
3880 const Value *RV =
Ret->getOperand(0);
3898 if (!
MRI.getRegClass(SrcReg)->contains(DestReg))
3907 !Subtarget->isLittleEndian())
3911 if (RVVT == MVT::f128)
3916 if (RVVT != DestVT) {
3917 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
3920 if (!Outs[0].
Flags.isZExt() && !Outs[0].Flags.isSExt())
3923 bool IsZExt = Outs[0].Flags.isZExt();
3924 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
3932 SrcReg = emitAnd_ri(MVT::i64, SrcReg, 0xffffffff);
3935 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3936 TII.get(TargetOpcode::COPY), DestReg).
addReg(SrcReg);
3943 TII.get(AArch64::RET_ReallyLR));
3944 for (
unsigned RetReg : RetRegs)
3949bool AArch64FastISel::selectTrunc(
const Instruction *
I) {
3950 Type *DestTy =
I->getType();
3952 Type *SrcTy =
Op->getType();
3954 EVT SrcEVT = TLI.getValueType(
DL, SrcTy,
true);
3955 EVT DestEVT = TLI.getValueType(
DL, DestTy,
true);
3964 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
3967 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 &&
3981 if (SrcVT == MVT::i64) {
3998 Register Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg,
4001 ResultReg = emitAnd_ri(MVT::i32, Reg32, Mask);
4002 assert(ResultReg &&
"Unexpected AND instruction emission failure.");
4004 ResultReg = createResultReg(&AArch64::GPR32RegClass);
4005 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4006 TII.get(TargetOpcode::COPY), ResultReg)
4010 updateValueMap(
I, ResultReg);
4014unsigned AArch64FastISel::emiti1Ext(
unsigned SrcReg,
MVT DestVT,
bool IsZExt) {
4015 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
4016 DestVT == MVT::i64) &&
4017 "Unexpected value type.");
4019 if (DestVT == MVT::i8 || DestVT == MVT::i16)
4023 unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, 1);
4024 assert(ResultReg &&
"Unexpected AND instruction emission failure.");
4025 if (DestVT == MVT::i64) {
4028 Register Reg64 =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
4029 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4030 TII.get(AArch64::SUBREG_TO_REG), Reg64)
4033 .
addImm(AArch64::sub_32);
4038 if (DestVT == MVT::i64) {
4042 return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
4047unsigned AArch64FastISel::emitMul_rr(
MVT RetVT,
unsigned Op0,
unsigned Op1) {
4055 Opc = AArch64::MADDWrrr; ZReg = AArch64::WZR;
break;
4057 Opc = AArch64::MADDXrrr; ZReg = AArch64::XZR;
break;
4061 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4062 return fastEmitInst_rrr(Opc, RC, Op0, Op1, ZReg);
4065unsigned AArch64FastISel::emitSMULL_rr(
MVT RetVT,
unsigned Op0,
unsigned Op1) {
4066 if (RetVT != MVT::i64)
4069 return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
4070 Op0, Op1, AArch64::XZR);
4073unsigned AArch64FastISel::emitUMULL_rr(
MVT RetVT,
unsigned Op0,
unsigned Op1) {
4074 if (RetVT != MVT::i64)
4077 return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
4078 Op0, Op1, AArch64::XZR);
4081unsigned AArch64FastISel::emitLSL_rr(
MVT RetVT,
unsigned Op0Reg,
4084 bool NeedTrunc =
false;
4088 case MVT::i8: Opc = AArch64::LSLVWr; NeedTrunc =
true;
Mask = 0xff;
break;
4089 case MVT::i16: Opc = AArch64::LSLVWr; NeedTrunc =
true;
Mask = 0xffff;
break;
4090 case MVT::i32: Opc = AArch64::LSLVWr;
break;
4091 case MVT::i64: Opc = AArch64::LSLVXr;
break;
4095 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4097 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
4099 Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
4101 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
4105unsigned AArch64FastISel::emitLSL_ri(
MVT RetVT,
MVT SrcVT,
unsigned Op0,
4108 "Unexpected source/return type pair.");
4109 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4110 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4111 "Unexpected source value type.");
4112 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4113 RetVT == MVT::i64) &&
"Unexpected return value type.");
4115 bool Is64Bit = (RetVT == MVT::i64);
4116 unsigned RegSize = Is64Bit ? 64 : 32;
4120 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4124 if (RetVT == SrcVT) {
4125 Register ResultReg = createResultReg(RC);
4126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4127 TII.get(TargetOpcode::COPY), ResultReg)
4131 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4135 if (Shift >= DstBits)
4163 unsigned ImmR =
RegSize - Shift;
4165 unsigned ImmS = std::min<unsigned>(SrcBits - 1, DstBits - 1 - Shift);
4166 static const unsigned OpcTable[2][2] = {
4167 {AArch64::SBFMWri, AArch64::SBFMXri},
4168 {AArch64::UBFMWri, AArch64::UBFMXri}
4170 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4171 if (SrcVT.
SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4173 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4174 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4177 .
addImm(AArch64::sub_32);
4180 return fastEmitInst_rii(Opc, RC, Op0, ImmR, ImmS);
4183unsigned AArch64FastISel::emitLSR_rr(
MVT RetVT,
unsigned Op0Reg,
4186 bool NeedTrunc =
false;
4190 case MVT::i8: Opc = AArch64::LSRVWr; NeedTrunc =
true;
Mask = 0xff;
break;
4191 case MVT::i16: Opc = AArch64::LSRVWr; NeedTrunc =
true;
Mask = 0xffff;
break;
4192 case MVT::i32: Opc = AArch64::LSRVWr;
break;
4193 case MVT::i64: Opc = AArch64::LSRVXr;
break;
4197 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4199 Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Mask);
4200 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
4202 Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
4204 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
4208unsigned AArch64FastISel::emitLSR_ri(
MVT RetVT,
MVT SrcVT,
unsigned Op0,
4211 "Unexpected source/return type pair.");
4212 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4213 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4214 "Unexpected source value type.");
4215 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4216 RetVT == MVT::i64) &&
"Unexpected return value type.");
4218 bool Is64Bit = (RetVT == MVT::i64);
4219 unsigned RegSize = Is64Bit ? 64 : 32;
4223 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4227 if (RetVT == SrcVT) {
4228 Register ResultReg = createResultReg(RC);
4229 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4230 TII.get(TargetOpcode::COPY), ResultReg)
4234 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4238 if (Shift >= DstBits)
4266 if (Shift >= SrcBits && IsZExt)
4267 return materializeInt(ConstantInt::get(*Context,
APInt(
RegSize, 0)), RetVT);
4272 Op0 = emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4280 unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
4281 unsigned ImmS = SrcBits - 1;
4282 static const unsigned OpcTable[2][2] = {
4283 {AArch64::SBFMWri, AArch64::SBFMXri},
4284 {AArch64::UBFMWri, AArch64::UBFMXri}
4286 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4287 if (SrcVT.
SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4290 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4293 .
addImm(AArch64::sub_32);
4296 return fastEmitInst_rii(Opc, RC, Op0, ImmR, ImmS);
4299unsigned AArch64FastISel::emitASR_rr(
MVT RetVT,
unsigned Op0Reg,
4302 bool NeedTrunc =
false;
4306 case MVT::i8: Opc = AArch64::ASRVWr; NeedTrunc =
true;
Mask = 0xff;
break;
4307 case MVT::i16: Opc = AArch64::ASRVWr; NeedTrunc =
true;
Mask = 0xffff;
break;
4308 case MVT::i32: Opc = AArch64::ASRVWr;
break;
4309 case MVT::i64: Opc = AArch64::ASRVXr;
break;
4313 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4315 Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32,
false);
4316 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
4318 Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
4320 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
4324unsigned AArch64FastISel::emitASR_ri(
MVT RetVT,
MVT SrcVT,
unsigned Op0,
4327 "Unexpected source/return type pair.");
4328 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4329 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4330 "Unexpected source value type.");
4331 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4332 RetVT == MVT::i64) &&
"Unexpected return value type.");
4334 bool Is64Bit = (RetVT == MVT::i64);
4335 unsigned RegSize = Is64Bit ? 64 : 32;
4339 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4343 if (RetVT == SrcVT) {
4344 Register ResultReg = createResultReg(RC);
4345 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4346 TII.get(TargetOpcode::COPY), ResultReg)
4350 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4354 if (Shift >= DstBits)
4382 if (Shift >= SrcBits && IsZExt)
4383 return materializeInt(ConstantInt::get(*Context,
APInt(
RegSize, 0)), RetVT);
4385 unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
4386 unsigned ImmS = SrcBits - 1;
4387 static const unsigned OpcTable[2][2] = {
4388 {AArch64::SBFMWri, AArch64::SBFMXri},
4389 {AArch64::UBFMWri, AArch64::UBFMXri}
4391 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4392 if (SrcVT.
SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4394 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4395 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4398 .
addImm(AArch64::sub_32);
4401 return fastEmitInst_rii(Opc, RC, Op0, ImmR, ImmS);
4404unsigned AArch64FastISel::emitIntExt(
MVT SrcVT,
unsigned SrcReg,
MVT DestVT,
4406 assert(DestVT != MVT::i1 &&
"ZeroExt/SignExt an i1?");
4412 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) &&
4413 (DestVT != MVT::i32) && (DestVT != MVT::i64)) ||
4414 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) &&
4415 (SrcVT != MVT::i16) && (SrcVT != MVT::i32)))
4425 return emiti1Ext(SrcReg, DestVT, IsZExt);
4427 if (DestVT == MVT::i64)
4428 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4430 Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
4434 if (DestVT == MVT::i64)
4435 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4437 Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
4441 assert(DestVT == MVT::i64 &&
"IntExt i32 to i32?!?");
4442 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4448 if (DestVT == MVT::i8 || DestVT == MVT::i16)
4450 else if (DestVT == MVT::i64) {
4451 Register Src64 =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
4452 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4453 TII.get(AArch64::SUBREG_TO_REG), Src64)
4456 .
addImm(AArch64::sub_32);
4461 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4462 return fastEmitInst_rii(Opc, RC, SrcReg, 0, Imm);
4469 case AArch64::LDURBBi:
4470 case AArch64::LDURHHi:
4471 case AArch64::LDURWi:
4472 case AArch64::LDRBBui:
4473 case AArch64::LDRHHui:
4474 case AArch64::LDRWui:
4475 case AArch64::LDRBBroX:
4476 case AArch64::LDRHHroX:
4477 case AArch64::LDRWroX:
4478 case AArch64::LDRBBroW:
4479 case AArch64::LDRHHroW:
4480 case AArch64::LDRWroW:
4489 case AArch64::LDURSBWi:
4490 case AArch64::LDURSHWi:
4491 case AArch64::LDURSBXi:
4492 case AArch64::LDURSHXi:
4493 case AArch64::LDURSWi:
4494 case AArch64::LDRSBWui:
4495 case AArch64::LDRSHWui:
4496 case AArch64::LDRSBXui:
4497 case AArch64::LDRSHXui:
4498 case AArch64::LDRSWui:
4499 case AArch64::LDRSBWroX:
4500 case AArch64::LDRSHWroX:
4501 case AArch64::LDRSBXroX:
4502 case AArch64::LDRSHXroX:
4503 case AArch64::LDRSWroX:
4504 case AArch64::LDRSBWroW:
4505 case AArch64::LDRSHWroW:
4506 case AArch64::LDRSBXroW:
4507 case AArch64::LDRSHXroW:
4508 case AArch64::LDRSWroW:
4513bool AArch64FastISel::optimizeIntExtLoad(
const Instruction *
I,
MVT RetVT,
4515 const auto *LI = dyn_cast<LoadInst>(
I->getOperand(0));
4516 if (!LI || !LI->hasOneUse())
4530 bool IsZExt = isa<ZExtInst>(
I);
4531 const auto *LoadMI =
MI;
4532 if (LoadMI->getOpcode() == TargetOpcode::COPY &&
4533 LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) {
4534 Register LoadReg =
MI->getOperand(1).getReg();
4535 LoadMI =
MRI.getUniqueVRegDef(LoadReg);
4536 assert(LoadMI &&
"Expected valid instruction");
4542 if (RetVT != MVT::i64 || SrcVT > MVT::i32) {
4543 updateValueMap(
I, Reg);
4548 Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
4549 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4550 TII.get(AArch64::SUBREG_TO_REG), Reg64)
4553 .
addImm(AArch64::sub_32);
4556 assert((
MI->getOpcode() == TargetOpcode::COPY &&
4557 MI->getOperand(1).getSubReg() == AArch64::sub_32) &&
4558 "Expected copy instruction");
4559 Reg =
MI->getOperand(1).getReg();
4561 removeDeadCode(
I, std::next(
I));
4563 updateValueMap(
I, Reg);
4567bool AArch64FastISel::selectIntExt(
const Instruction *
I) {
4568 assert((isa<ZExtInst>(
I) || isa<SExtInst>(
I)) &&
4569 "Unexpected integer extend instruction.");
4572 if (!isTypeSupported(
I->getType(), RetVT))
4575 if (!isTypeSupported(
I->getOperand(0)->getType(), SrcVT))
4579 if (optimizeIntExtLoad(
I, RetVT, SrcVT))
4582 Register SrcReg = getRegForValue(
I->getOperand(0));
4587 bool IsZExt = isa<ZExtInst>(
I);
4588 if (
const auto *Arg = dyn_cast<Argument>(
I->getOperand(0))) {
4589 if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) {
4590 if (RetVT == MVT::i64 && SrcVT != MVT::i64) {
4591 Register ResultReg = createResultReg(&AArch64::GPR64RegClass);
4592 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4593 TII.get(AArch64::SUBREG_TO_REG), ResultReg)
4596 .
addImm(AArch64::sub_32);
4600 updateValueMap(
I, SrcReg);
4605 unsigned ResultReg = emitIntExt(SrcVT, SrcReg, RetVT, IsZExt);
4609 updateValueMap(
I, ResultReg);
4613bool AArch64FastISel::selectRem(
const Instruction *
I,
unsigned ISDOpcode) {
4614 EVT DestEVT = TLI.getValueType(
DL,
I->getType(),
true);
4619 if (DestVT != MVT::i64 && DestVT != MVT::i32)
4623 bool Is64bit = (DestVT == MVT::i64);
4624 switch (ISDOpcode) {
4628 DivOpc = Is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
4631 DivOpc = Is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
4634 unsigned MSubOpc = Is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
4635 Register Src0Reg = getRegForValue(
I->getOperand(0));
4639 Register Src1Reg = getRegForValue(
I->getOperand(1));
4644 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4645 Register QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg);
4646 assert(QuotReg &&
"Unexpected DIV instruction emission failure.");
4649 Register ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg);
4650 updateValueMap(
I, ResultReg);
4656 if (!isTypeSupported(
I->getType(), VT,
true))
4662 const Value *Src0 =
I->getOperand(0);
4663 const Value *Src1 =
I->getOperand(1);
4664 if (
const auto *
C = dyn_cast<ConstantInt>(Src0))
4665 if (
C->getValue().isPowerOf2())
4669 if (
const auto *
C = dyn_cast<ConstantInt>(Src1))
4670 if (
C->getValue().isPowerOf2()) {
4671 uint64_t ShiftVal =
C->getValue().logBase2();
4674 if (
const auto *ZExt = dyn_cast<ZExtInst>(Src0)) {
4677 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), VT)) {
4680 Src0 = ZExt->getOperand(0);
4683 }
else if (
const auto *SExt = dyn_cast<SExtInst>(Src0)) {
4686 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), VT)) {
4689 Src0 = SExt->getOperand(0);
4694 Register Src0Reg = getRegForValue(Src0);
4698 unsigned ResultReg =
4699 emitLSL_ri(VT, SrcVT, Src0Reg, ShiftVal, IsZExt);
4702 updateValueMap(
I, ResultReg);
4707 Register Src0Reg = getRegForValue(
I->getOperand(0));
4711 Register Src1Reg = getRegForValue(
I->getOperand(1));
4715 unsigned ResultReg = emitMul_rr(VT, Src0Reg, Src1Reg);
4720 updateValueMap(
I, ResultReg);
4724bool AArch64FastISel::selectShift(
const Instruction *
I) {
4726 if (!isTypeSupported(
I->getType(), RetVT,
true))
4730 return selectOperator(
I,
I->getOpcode());
4732 if (
const auto *
C = dyn_cast<ConstantInt>(
I->getOperand(1))) {
4733 unsigned ResultReg = 0;
4736 bool IsZExt =
I->getOpcode() != Instruction::AShr;
4737 const Value *Op0 =
I->getOperand(0);
4738 if (
const auto *ZExt = dyn_cast<ZExtInst>(Op0)) {
4741 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), TmpVT)) {
4744 Op0 = ZExt->getOperand(0);
4747 }
else if (
const auto *SExt = dyn_cast<SExtInst>(Op0)) {
4750 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), TmpVT)) {
4753 Op0 = SExt->getOperand(0);
4758 Register Op0Reg = getRegForValue(Op0);
4762 switch (
I->getOpcode()) {
4764 case Instruction::Shl:
4765 ResultReg = emitLSL_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt);
4767 case Instruction::AShr:
4768 ResultReg = emitASR_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt);
4770 case Instruction::LShr:
4771 ResultReg = emitLSR_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt);
4777 updateValueMap(
I, ResultReg);
4781 Register Op0Reg = getRegForValue(
I->getOperand(0));
4785 Register Op1Reg = getRegForValue(
I->getOperand(1));
4789 unsigned ResultReg = 0;
4790 switch (
I->getOpcode()) {
4792 case Instruction::Shl:
4793 ResultReg = emitLSL_rr(RetVT, Op0Reg, Op1Reg);
4795 case Instruction::AShr:
4796 ResultReg = emitASR_rr(RetVT, Op0Reg, Op1Reg);
4798 case Instruction::LShr:
4799 ResultReg = emitLSR_rr(RetVT, Op0Reg, Op1Reg);
4806 updateValueMap(
I, ResultReg);
4810bool AArch64FastISel::selectBitCast(
const Instruction *
I) {
4813 if (!isTypeLegal(
I->getOperand(0)->getType(), SrcVT))
4815 if (!isTypeLegal(
I->getType(), RetVT))
4819 if (RetVT == MVT::f32 && SrcVT == MVT::i32)
4820 Opc = AArch64::FMOVWSr;
4821 else if (RetVT == MVT::f64 && SrcVT == MVT::i64)
4822 Opc = AArch64::FMOVXDr;
4823 else if (RetVT == MVT::i32 && SrcVT == MVT::f32)
4824 Opc = AArch64::FMOVSWr;
4825 else if (RetVT == MVT::i64 && SrcVT == MVT::f64)
4826 Opc = AArch64::FMOVDXr;
4833 case MVT::i32: RC = &AArch64::GPR32RegClass;
break;
4834 case MVT::i64: RC = &AArch64::GPR64RegClass;
break;
4835 case MVT::f32: RC = &AArch64::FPR32RegClass;
break;
4836 case MVT::f64: RC = &AArch64::FPR64RegClass;
break;
4838 Register Op0Reg = getRegForValue(
I->getOperand(0));
4842 Register ResultReg = fastEmitInst_r(Opc, RC, Op0Reg);
4846 updateValueMap(
I, ResultReg);
4850bool AArch64FastISel::selectFRem(
const Instruction *
I) {
4852 if (!isTypeLegal(
I->getType(), RetVT))
4860 LC = RTLIB::REM_F32;
4863 LC = RTLIB::REM_F64;
4868 Args.reserve(
I->getNumOperands());
4871 for (
auto &Arg :
I->operands()) {
4874 Entry.Ty = Arg->getType();
4875 Args.push_back(Entry);
4878 CallLoweringInfo CLI;
4880 CLI.setCallee(
DL, Ctx, TLI.getLibcallCallingConv(LC),
I->getType(),
4881 TLI.getLibcallName(LC), std::move(Args));
4882 if (!lowerCallTo(CLI))
4884 updateValueMap(
I, CLI.ResultReg);
4888bool AArch64FastISel::selectSDiv(
const Instruction *
I) {
4890 if (!isTypeLegal(
I->getType(), VT))
4893 if (!isa<ConstantInt>(
I->getOperand(1)))
4896 const APInt &
C = cast<ConstantInt>(
I->getOperand(1))->getValue();
4897 if ((VT != MVT::i32 && VT != MVT::i64) || !
C ||
4898 !(
C.isPowerOf2() ||
C.isNegatedPowerOf2()))
4901 unsigned Lg2 =
C.countr_zero();
4902 Register Src0Reg = getRegForValue(
I->getOperand(0));
4906 if (cast<BinaryOperator>(
I)->isExact()) {
4907 unsigned ResultReg = emitASR_ri(VT, VT, Src0Reg, Lg2);
4910 updateValueMap(
I, ResultReg);
4914 int64_t Pow2MinusOne = (1ULL << Lg2) - 1;
4915 unsigned AddReg = emitAdd_ri_(VT, Src0Reg, Pow2MinusOne);
4920 if (!emitICmp_ri(VT, Src0Reg, 0))
4925 if (VT == MVT::i64) {
4926 SelectOpc = AArch64::CSELXr;
4927 RC = &AArch64::GPR64RegClass;
4929 SelectOpc = AArch64::CSELWr;
4930 RC = &AArch64::GPR32RegClass;
4932 Register SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg,
4939 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
4942 ResultReg = emitAddSub_rs(
false, VT, ZeroReg, SelectReg,
4945 ResultReg = emitASR_ri(VT, VT, SelectReg, Lg2);
4950 updateValueMap(
I, ResultReg);
4957unsigned AArch64FastISel::getRegForGEPIndex(
const Value *
Idx) {
4964 MVT PtrVT = TLI.getPointerTy(
DL);
4966 if (IdxVT.
bitsLT(PtrVT)) {
4967 IdxN = emitIntExt(IdxVT.
getSimpleVT(), IdxN, PtrVT,
false);
4968 }
else if (IdxVT.
bitsGT(PtrVT))
4969 llvm_unreachable(
"AArch64 FastISel doesn't support types larger than i64");
4977bool AArch64FastISel::selectGetElementPtr(
const Instruction *
I) {
4978 if (Subtarget->isTargetILP32())
4981 Register N = getRegForValue(
I->getOperand(0));
4988 MVT VT = TLI.getPointerTy(
DL);
4991 const Value *
Idx = GTI.getOperand();
4992 if (
auto *StTy = GTI.getStructTypeOrNull()) {
4993 unsigned Field = cast<ConstantInt>(
Idx)->getZExtValue();
4996 TotalOffs +=
DL.getStructLayout(StTy)->getElementOffset(
Field);
4999 if (
const auto *CI = dyn_cast<ConstantInt>(
Idx)) {
5003 TotalOffs += GTI.getSequentialElementStride(
DL) *
5004 cast<ConstantInt>(CI)->getSExtValue();
5008 N = emitAdd_ri_(VT,
N, TotalOffs);
5015 uint64_t ElementSize = GTI.getSequentialElementStride(
DL);
5016 unsigned IdxN = getRegForGEPIndex(
Idx);
5020 if (ElementSize != 1) {
5024 IdxN = emitMul_rr(VT, IdxN,
C);
5034 N = emitAdd_ri_(VT,
N, TotalOffs);
5038 updateValueMap(
I,
N);
5043 assert(
TM.getOptLevel() == CodeGenOptLevel::None &&
5044 "cmpxchg survived AtomicExpand at optlevel > -O0");
5046 auto *RetPairTy = cast<StructType>(
I->getType());
5047 Type *
RetTy = RetPairTy->getTypeAtIndex(0U);
5048 assert(RetPairTy->getTypeAtIndex(1U)->isIntegerTy(1) &&
5049 "cmpxchg has a non-i1 status result");
5052 if (!isTypeLegal(
RetTy, VT))
5056 unsigned Opc, CmpOpc;
5059 if (VT == MVT::i32) {
5060 Opc = AArch64::CMP_SWAP_32;
5061 CmpOpc = AArch64::SUBSWrs;
5062 ResRC = &AArch64::GPR32RegClass;
5063 }
else if (VT == MVT::i64) {
5064 Opc = AArch64::CMP_SWAP_64;
5065 CmpOpc = AArch64::SUBSXrs;
5066 ResRC = &AArch64::GPR64RegClass;
5074 II, getRegForValue(
I->getPointerOperand()),
II.getNumDefs());
5076 II, getRegForValue(
I->getCompareOperand()),
II.getNumDefs() + 1);
5078 II, getRegForValue(
I->getNewValOperand()),
II.getNumDefs() + 2);
5080 const Register ResultReg1 = createResultReg(ResRC);
5081 const Register ResultReg2 = createResultReg(&AArch64::GPR32RegClass);
5082 const Register ScratchReg = createResultReg(&AArch64::GPR32RegClass);
5085 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
5092 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(CmpOpc))
5093 .
addDef(VT == MVT::i32 ? AArch64::WZR : AArch64::XZR)
5098 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr))
5104 assert((ResultReg1 + 1) == ResultReg2 &&
"Nonconsecutive result registers.");
5105 updateValueMap(
I, ResultReg1, 2);
5109bool AArch64FastISel::fastSelectInstruction(
const Instruction *
I) {
5110 if (TLI.fallBackToDAGISel(*
I))
5112 switch (
I->getOpcode()) {
5115 case Instruction::Add:
5116 case Instruction::Sub:
5117 return selectAddSub(
I);
5118 case Instruction::Mul:
5119 return selectMul(
I);
5120 case Instruction::SDiv:
5121 return selectSDiv(
I);
5122 case Instruction::SRem:
5126 case Instruction::URem:
5130 case Instruction::Shl:
5131 case Instruction::LShr:
5132 case Instruction::AShr:
5133 return selectShift(
I);
5134 case Instruction::And:
5135 case Instruction::Or:
5136 case Instruction::Xor:
5137 return selectLogicalOp(
I);
5138 case Instruction::Br:
5139 return selectBranch(
I);
5140 case Instruction::IndirectBr:
5141 return selectIndirectBr(
I);
5142 case Instruction::BitCast:
5144 return selectBitCast(
I);
5146 case Instruction::FPToSI:
5148 return selectFPToInt(
I,
true);
5150 case Instruction::FPToUI:
5151 return selectFPToInt(
I,
false);
5152 case Instruction::ZExt:
5153 case Instruction::SExt:
5154 return selectIntExt(
I);
5155 case Instruction::Trunc:
5157 return selectTrunc(
I);
5159 case Instruction::FPExt:
5160 return selectFPExt(
I);
5161 case Instruction::FPTrunc:
5162 return selectFPTrunc(
I);
5163 case Instruction::SIToFP:
5165 return selectIntToFP(
I,
true);
5167 case Instruction::UIToFP:
5168 return selectIntToFP(
I,
false);
5169 case Instruction::Load:
5170 return selectLoad(
I);
5171 case Instruction::Store:
5172 return selectStore(
I);
5173 case Instruction::FCmp:
5174 case Instruction::ICmp:
5175 return selectCmp(
I);
5176 case Instruction::Select:
5177 return selectSelect(
I);
5178 case Instruction::Ret:
5179 return selectRet(
I);
5180 case Instruction::FRem:
5181 return selectFRem(
I);
5182 case Instruction::GetElementPtr:
5183 return selectGetElementPtr(
I);
5184 case Instruction::AtomicCmpXchg:
5185 return selectAtomicCmpXchg(cast<AtomicCmpXchgInst>(
I));
5189 return selectOperator(
I,
I->getOpcode());
5200 return new AArch64FastISel(FuncInfo, LibInfo);
unsigned const MachineRegisterInfo * MRI
static bool isIntExtFree(const Instruction *I)
Check if the sign-/zero-extend will be a noop.
static bool isSExtLoad(const MachineInstr *LI)
static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred)
static bool isMulPowOf2(const Value *I)
Check if the multiply is by a power-of-2 constant.
static unsigned getImplicitScaleFactor(MVT VT)
Determine the implicit scale factor that is applied by a memory operation for a given value type.
static bool isZExtLoad(const MachineInstr *LI)
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx