57#include "llvm/IR/IntrinsicsAArch64.h"
81class AArch64FastISel final :
public FastISel {
84 enum BaseKind { RegBase, FrameIndexBase };
87 BaseKind Kind = RegBase;
96 const GlobalValue *GV =
nullptr;
101 void setKind(BaseKind K) { Kind =
K; }
102 BaseKind getKind()
const {
return Kind; }
105 bool isRegBase()
const {
return Kind == RegBase; }
106 bool isFIBase()
const {
return Kind == FrameIndexBase; }
109 assert(isRegBase() &&
"Invalid base register access!");
114 assert(isRegBase() &&
"Invalid base register access!");
120 Register getOffsetReg()
const {
return OffsetReg; }
122 void setFI(
unsigned FI) {
123 assert(isFIBase() &&
"Invalid base frame index access!");
127 unsigned getFI()
const {
128 assert(isFIBase() &&
"Invalid base frame index access!");
132 void setOffset(int64_t O) { Offset =
O; }
134 void setShift(
unsigned S) { Shift = S; }
135 unsigned getShift() {
return Shift; }
137 void setGlobalValue(
const GlobalValue *
G) { GV =
G; }
138 const GlobalValue *getGlobalValue() {
return GV; }
143 const AArch64Subtarget *Subtarget;
144 LLVMContext *Context;
146 bool fastLowerArguments()
override;
147 bool fastLowerCall(CallLoweringInfo &CLI)
override;
148 bool fastLowerIntrinsicCall(
const IntrinsicInst *
II)
override;
152 bool selectAddSub(
const Instruction *
I);
153 bool selectLogicalOp(
const Instruction *
I);
154 bool selectLoad(
const Instruction *
I);
155 bool selectStore(
const Instruction *
I);
156 bool selectBranch(
const Instruction *
I);
157 bool selectIndirectBr(
const Instruction *
I);
158 bool selectCmp(
const Instruction *
I);
159 bool selectSelect(
const Instruction *
I);
160 bool selectFPExt(
const Instruction *
I);
161 bool selectFPTrunc(
const Instruction *
I);
162 bool selectFPToInt(
const Instruction *
I,
bool Signed);
163 bool selectIntToFP(
const Instruction *
I,
bool Signed);
164 bool selectRem(
const Instruction *
I,
unsigned ISDOpcode);
165 bool selectRet(
const Instruction *
I);
166 bool selectTrunc(
const Instruction *
I);
167 bool selectIntExt(
const Instruction *
I);
168 bool selectMul(
const Instruction *
I);
169 bool selectShift(
const Instruction *
I);
170 bool selectBitCast(
const Instruction *
I);
171 bool selectFRem(
const Instruction *
I);
172 bool selectSDiv(
const Instruction *
I);
173 bool selectGetElementPtr(
const Instruction *
I);
174 bool selectAtomicCmpXchg(
const AtomicCmpXchgInst *
I);
177 bool isTypeLegal(
Type *Ty, MVT &VT);
178 bool isTypeSupported(
Type *Ty, MVT &VT,
bool IsVectorAllowed =
false);
179 bool isValueAvailable(
const Value *V)
const;
180 bool computeAddress(
const Value *Obj, Address &Addr,
Type *Ty =
nullptr);
181 bool computeCallAddress(
const Value *V, Address &Addr);
182 bool simplifyAddress(Address &Addr, MVT VT);
183 void addLoadStoreOperands(Address &Addr,
const MachineInstrBuilder &MIB,
185 unsigned ScaleFactor, MachineMemOperand *MMO);
186 bool isMemCpySmall(uint64_t Len, MaybeAlign Alignment);
187 bool tryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
188 MaybeAlign Alignment);
191 bool optimizeIntExtLoad(
const Instruction *
I, MVT RetVT, MVT SrcVT);
192 bool optimizeSelect(
const SelectInst *SI);
197 const Value *
RHS,
bool SetFlags =
false,
198 bool WantResult =
true,
bool IsZExt =
false);
200 Register RHSReg,
bool SetFlags =
false,
201 bool WantResult =
true);
202 Register emitAddSub_ri(
bool UseAdd, MVT RetVT,
Register LHSReg, uint64_t Imm,
203 bool SetFlags =
false,
bool WantResult =
true);
206 uint64_t ShiftImm,
bool SetFlags =
false,
207 bool WantResult =
true);
210 uint64_t ShiftImm,
bool SetFlags =
false,
211 bool WantResult =
true);
214 bool emitCompareAndBranch(
const BranchInst *BI);
217 bool emitICmp_ri(MVT RetVT,
Register LHSReg, uint64_t Imm);
220 MachineMemOperand *MMO =
nullptr);
222 MachineMemOperand *MMO =
nullptr);
224 MachineMemOperand *MMO =
nullptr);
228 bool SetFlags =
false,
bool WantResult =
true,
229 bool IsZExt =
false);
232 bool SetFlags =
false,
bool WantResult =
true,
233 bool IsZExt =
false);
235 bool WantResult =
true);
238 bool WantResult =
true);
244 Register RHSReg, uint64_t ShiftImm);
257 bool IsZExt =
false);
259 Register materializeInt(
const ConstantInt *CI, MVT VT);
260 Register materializeFP(
const ConstantFP *CFP, MVT VT);
261 Register materializeGV(
const GlobalValue *GV);
265 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC)
const;
266 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
267 SmallVectorImpl<Type *> &OrigTys,
unsigned &NumBytes);
268 bool finishCall(CallLoweringInfo &CLI,
unsigned NumBytes);
272 Register fastMaterializeAlloca(
const AllocaInst *AI)
override;
273 Register fastMaterializeConstant(
const Constant *
C)
override;
274 Register fastMaterializeFloatZero(
const ConstantFP *CF)
override;
276 explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo,
277 const TargetLibraryInfo *LibInfo)
278 : FastISel(FuncInfo, LibInfo,
true) {
283 bool fastSelectInstruction(
const Instruction *
I)
override;
285#include "AArch64GenFastISel.inc"
293 "Unexpected integer extend instruction.");
294 assert(!
I->getType()->isVectorTy() &&
I->getType()->isIntegerTy() &&
295 "Unexpected value type.");
303 if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr()))
330 if (CC == CallingConv::GHC)
332 if (CC == CallingConv::CFGuard_Check)
341Register AArch64FastISel::fastMaterializeAlloca(
const AllocaInst *AI) {
343 "Alloca should always return a pointer.");
346 auto SI = FuncInfo.StaticAllocaMap.find(AI);
347 if (SI == FuncInfo.StaticAllocaMap.end())
350 if (SI != FuncInfo.StaticAllocaMap.end()) {
351 Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
352 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADDXri),
363Register AArch64FastISel::materializeInt(
const ConstantInt *CI, MVT VT) {
371 const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
372 : &AArch64::GPR32RegClass;
373 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
374 Register ResultReg = createResultReg(RC);
375 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::COPY),
380Register AArch64FastISel::materializeFP(
const ConstantFP *CFP, MVT VT) {
384 return fastMaterializeFloatZero(CFP);
386 if (VT != MVT::f32 && VT != MVT::f64)
390 bool Is64Bit = (VT == MVT::f64);
396 unsigned Opc = Is64Bit ? AArch64::FMOVDi : AArch64::FMOVSi;
397 return fastEmitInst_i(
Opc, TLI.getRegClassFor(VT), Imm);
402 unsigned Opc1 = Is64Bit ? AArch64::MOVi64imm : AArch64::MOVi32imm;
403 const TargetRegisterClass *RC = Is64Bit ?
404 &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
406 Register TmpReg = createResultReg(RC);
407 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc1), TmpReg)
408 .addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
410 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
411 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
412 TII.get(TargetOpcode::COPY), ResultReg)
413 .addReg(TmpReg, getKillRegState(true));
420 Align Alignment =
DL.getPrefTypeAlign(CFP->
getType());
422 unsigned CPI = MCP.getConstantPoolIndex(
cast<Constant>(CFP), Alignment);
423 Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
424 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
427 unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui;
428 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
429 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), ResultReg)
435Register AArch64FastISel::materializeGV(
const GlobalValue *GV) {
445 if (FuncInfo.MF->getInfo<AArch64FunctionInfo>()->hasELFSignedGOT())
450 EVT DestEVT = TLI.getValueType(
DL, GV->
getType(),
true);
454 Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
459 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
465 ResultReg = createResultReg(&AArch64::GPR32RegClass);
466 LdrOpc = AArch64::LDRWui;
468 ResultReg = createResultReg(&AArch64::GPR64RegClass);
469 LdrOpc = AArch64::LDRXui;
471 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(LdrOpc),
481 Register Result64 = createResultReg(&AArch64::GPR64RegClass);
482 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
483 TII.get(TargetOpcode::SUBREG_TO_REG))
491 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
509 Register DstReg = createResultReg(&AArch64::GPR64commonRegClass);
510 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::MOVKXi),
519 ResultReg = createResultReg(&AArch64::GPR64spRegClass);
520 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADDXri),
530Register AArch64FastISel::fastMaterializeConstant(
const Constant *
C) {
531 EVT CEVT = TLI.getValueType(
DL,
C->getType(),
true);
540 assert(VT == MVT::i64 &&
"Expected 64-bit pointers");
541 return materializeInt(ConstantInt::get(Type::getInt64Ty(*
Context), 0), VT);
545 return materializeInt(CI, VT);
547 return materializeFP(CFP, VT);
549 return materializeGV(GV);
554Register AArch64FastISel::fastMaterializeFloatZero(
const ConstantFP *CFP) {
556 "Floating-point constant is not a positive zero.");
558 if (!isTypeLegal(CFP->
getType(), VT))
561 if (VT != MVT::f32 && VT != MVT::f64)
564 bool Is64Bit = (VT == MVT::f64);
565 unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
566 unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr;
567 return fastEmitInst_r(
Opc, TLI.getRegClassFor(VT), ZReg);
574 if (
C->getValue().isPowerOf2())
577 if (
C->getValue().isPowerOf2())
584bool AArch64FastISel::computeAddress(
const Value *Obj,
Address &Addr,
Type *Ty)
586 const User *
U =
nullptr;
587 unsigned Opcode = Instruction::UserOp1;
591 if (FuncInfo.StaticAllocaMap.count(
static_cast<const AllocaInst *
>(Obj)) ||
592 FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
593 Opcode =
I->getOpcode();
597 Opcode =
C->getOpcode();
602 if (Ty->getAddressSpace() > 255)
610 case Instruction::BitCast:
612 return computeAddress(
U->getOperand(0), Addr, Ty);
614 case Instruction::IntToPtr:
616 if (TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
617 TLI.getPointerTy(
DL))
618 return computeAddress(
U->getOperand(0), Addr, Ty);
621 case Instruction::PtrToInt:
623 if (TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
624 return computeAddress(
U->getOperand(0), Addr, Ty);
627 case Instruction::GetElementPtr: {
629 uint64_t TmpOffset = Addr.getOffset();
635 const Value *
Op = GTI.getOperand();
636 if (StructType *STy = GTI.getStructTypeOrNull()) {
637 const StructLayout *SL =
DL.getStructLayout(STy);
641 uint64_t S = GTI.getSequentialElementStride(
DL);
648 if (canFoldAddIntoGEP(U,
Op)) {
658 goto unsupported_gep;
664 Addr.setOffset(TmpOffset);
665 if (computeAddress(
U->getOperand(0), Addr, Ty))
674 case Instruction::Alloca: {
676 DenseMap<const AllocaInst *, int>::iterator
SI =
677 FuncInfo.StaticAllocaMap.find(AI);
678 if (SI != FuncInfo.StaticAllocaMap.end()) {
679 Addr.setKind(Address::FrameIndexBase);
680 Addr.setFI(
SI->second);
685 case Instruction::Add: {
695 return computeAddress(
LHS, Addr, Ty);
699 if (computeAddress(
LHS, Addr, Ty) && computeAddress(
RHS, Addr, Ty))
705 case Instruction::Sub: {
712 return computeAddress(
LHS, Addr, Ty);
716 case Instruction::Shl: {
717 if (Addr.getOffsetReg())
725 if (Val < 1 || Val > 3)
728 uint64_t NumBytes = 0;
730 uint64_t NumBits =
DL.getTypeSizeInBits(Ty);
731 NumBytes = NumBits / 8;
736 if (NumBytes != (1ULL << Val))
742 const Value *Src =
U->getOperand(0);
744 if (FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
748 ZE->getOperand(0)->getType()->isIntegerTy(32)) {
750 Src = ZE->getOperand(0);
754 SE->getOperand(0)->getType()->isIntegerTy(32)) {
756 Src = SE->getOperand(0);
763 if (AI->
getOpcode() == Instruction::And) {
768 if (
C->getValue() == 0xffffffff)
772 if (
C->getValue() == 0xffffffff) {
777 Reg = fastEmitInst_extractsubreg(MVT::i32,
Reg, AArch64::sub_32);
778 Addr.setOffsetReg(
Reg);
786 Addr.setOffsetReg(
Reg);
789 case Instruction::Mul: {
790 if (Addr.getOffsetReg())
801 if (
C->getValue().isPowerOf2())
806 unsigned Val =
C->getValue().logBase2();
807 if (Val < 1 || Val > 3)
810 uint64_t NumBytes = 0;
812 uint64_t NumBits =
DL.getTypeSizeInBits(Ty);
813 NumBytes = NumBits / 8;
818 if (NumBytes != (1ULL << Val))
826 if (FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB) {
830 ZE->getOperand(0)->getType()->isIntegerTy(32)) {
832 Src = ZE->getOperand(0);
836 SE->getOperand(0)->getType()->isIntegerTy(32)) {
838 Src = SE->getOperand(0);
847 Addr.setOffsetReg(
Reg);
850 case Instruction::And: {
851 if (Addr.getOffsetReg())
854 if (!Ty ||
DL.getTypeSizeInBits(Ty) != 8)
861 if (
C->getValue() == 0xffffffff)
865 if (
C->getValue() == 0xffffffff) {
873 Reg = fastEmitInst_extractsubreg(MVT::i32,
Reg, AArch64::sub_32);
874 Addr.setOffsetReg(
Reg);
879 case Instruction::SExt:
880 case Instruction::ZExt: {
881 if (!Addr.getReg() || Addr.getOffsetReg())
884 const Value *Src =
nullptr;
887 if (!
isIntExtFree(ZE) && ZE->getOperand(0)->getType()->isIntegerTy(32)) {
889 Src = ZE->getOperand(0);
892 if (!
isIntExtFree(SE) && SE->getOperand(0)->getType()->isIntegerTy(32)) {
894 Src = SE->getOperand(0);
905 Addr.setOffsetReg(
Reg);
910 if (Addr.isRegBase() && !Addr.getReg()) {
918 if (!Addr.getOffsetReg()) {
922 Addr.setOffsetReg(
Reg);
929bool AArch64FastISel::computeCallAddress(
const Value *V,
Address &Addr) {
930 const User *
U =
nullptr;
931 unsigned Opcode = Instruction::UserOp1;
935 Opcode =
I->getOpcode();
937 InMBB =
I->getParent() == FuncInfo.MBB->getBasicBlock();
939 Opcode =
C->getOpcode();
945 case Instruction::BitCast:
948 return computeCallAddress(
U->getOperand(0), Addr);
950 case Instruction::IntToPtr:
953 TLI.getValueType(
DL,
U->getOperand(0)->getType()) ==
954 TLI.getPointerTy(
DL))
955 return computeCallAddress(
U->getOperand(0), Addr);
957 case Instruction::PtrToInt:
959 if (InMBB && TLI.getValueType(
DL,
U->getType()) == TLI.getPointerTy(
DL))
960 return computeCallAddress(
U->getOperand(0), Addr);
965 Addr.setGlobalValue(GV);
970 if (!Addr.getGlobalValue()) {
971 Addr.setReg(getRegForValue(V));
972 return Addr.getReg().isValid();
978bool AArch64FastISel::isTypeLegal(
Type *Ty, MVT &VT) {
979 EVT evt = TLI.getValueType(
DL, Ty,
true);
985 if (evt == MVT::Other || !evt.
isSimple())
995 return TLI.isTypeLegal(VT);
1002bool AArch64FastISel::isTypeSupported(
Type *Ty, MVT &VT,
bool IsVectorAllowed) {
1006 if (isTypeLegal(Ty, VT))
1011 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
1017bool AArch64FastISel::isValueAvailable(
const Value *V)
const {
1022 return FuncInfo.getMBB(
I->getParent()) == FuncInfo.MBB;
1025bool AArch64FastISel::simplifyAddress(
Address &Addr, MVT VT) {
1033 bool ImmediateOffsetNeedsLowering =
false;
1034 bool RegisterOffsetNeedsLowering =
false;
1035 int64_t
Offset = Addr.getOffset();
1037 ImmediateOffsetNeedsLowering =
true;
1038 else if (
Offset > 0 && !(
Offset & (ScaleFactor - 1)) &&
1040 ImmediateOffsetNeedsLowering =
true;
1045 if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.getOffsetReg())
1046 RegisterOffsetNeedsLowering =
true;
1049 if (Addr.isRegBase() && Addr.getOffsetReg() && !Addr.getReg())
1050 RegisterOffsetNeedsLowering =
true;
1055 if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase())
1057 Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
1058 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADDXri),
1063 Addr.setKind(Address::RegBase);
1064 Addr.setReg(ResultReg);
1067 if (RegisterOffsetNeedsLowering) {
1069 if (Addr.getReg()) {
1072 ResultReg = emitAddSub_rx(
true, MVT::i64, Addr.getReg(),
1073 Addr.getOffsetReg(), Addr.getExtendType(),
1076 ResultReg = emitAddSub_rs(
true, MVT::i64, Addr.getReg(),
1081 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(),
1082 Addr.getShift(),
true);
1084 ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(),
1085 Addr.getShift(),
false);
1087 ResultReg = emitLSL_ri(MVT::i64, MVT::i64, Addr.getOffsetReg(),
1093 Addr.setReg(ResultReg);
1094 Addr.setOffsetReg(0);
1101 if (ImmediateOffsetNeedsLowering) {
1105 ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(),
Offset);
1111 Addr.setReg(ResultReg);
1117void AArch64FastISel::addLoadStoreOperands(
Address &Addr,
1118 const MachineInstrBuilder &MIB,
1120 unsigned ScaleFactor,
1121 MachineMemOperand *MMO) {
1122 int64_t
Offset = Addr.getOffset() / ScaleFactor;
1124 if (Addr.isFIBase()) {
1125 int FI = Addr.getFI();
1128 MMO = FuncInfo.MF->getMachineMemOperand(
1130 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
1134 assert(Addr.isRegBase() &&
"Unexpected address kind.");
1141 if (Addr.getOffsetReg()) {
1142 assert(Addr.getOffset() == 0 &&
"Unexpected offset");
1145 MIB.
addReg(Addr.getReg());
1146 MIB.
addReg(Addr.getOffsetReg());
1148 MIB.
addImm(Addr.getShift() != 0);
1157Register AArch64FastISel::emitAddSub(
bool UseAdd, MVT RetVT,
const Value *
LHS,
1159 bool WantResult,
bool IsZExt) {
1161 bool NeedExtend =
false;
1196 if (
SI->getOpcode() == Instruction::Shl ||
1197 SI->getOpcode() == Instruction::LShr ||
1198 SI->getOpcode() == Instruction::AShr )
1206 LHSReg = emitIntExt(SrcVT, LHSReg, RetVT, IsZExt);
1210 uint64_t
Imm = IsZExt ?
C->getZExtValue() :
C->getSExtValue();
1211 if (
C->isNegative())
1212 ResultReg = emitAddSub_ri(!UseAdd, RetVT, LHSReg, -Imm, SetFlags,
1215 ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, Imm, SetFlags,
1218 if (
C->isNullValue())
1219 ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, 0, SetFlags, WantResult);
1226 isValueAvailable(
RHS)) {
1230 return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType, 0,
1231 SetFlags, WantResult);
1241 if (
C->getValue().isPowerOf2())
1246 Register RHSReg = getRegForValue(MulLHS);
1249 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg,
AArch64_AM::LSL,
1250 ShiftVal, SetFlags, WantResult);
1261 switch (
SI->getOpcode()) {
1267 uint64_t ShiftVal =
C->getZExtValue();
1269 Register RHSReg = getRegForValue(
SI->getOperand(0));
1272 ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, ShiftType,
1273 ShiftVal, SetFlags, WantResult);
1286 RHSReg = emitIntExt(SrcVT, RHSReg, RetVT, IsZExt);
1288 return emitAddSub_rr(UseAdd, RetVT, LHSReg, RHSReg, SetFlags, WantResult);
1291Register AArch64FastISel::emitAddSub_rr(
bool UseAdd, MVT RetVT,
Register LHSReg,
1294 assert(LHSReg && RHSReg &&
"Invalid register number.");
1296 if (LHSReg == AArch64::SP || LHSReg == AArch64::WSP ||
1297 RHSReg == AArch64::SP || RHSReg == AArch64::WSP)
1300 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1303 static const unsigned OpcTable[2][2][2] = {
1304 { { AArch64::SUBWrr, AArch64::SUBXrr },
1305 { AArch64::ADDWrr, AArch64::ADDXrr } },
1306 { { AArch64::SUBSWrr, AArch64::SUBSXrr },
1307 { AArch64::ADDSWrr, AArch64::ADDSXrr } }
1309 bool Is64Bit = RetVT == MVT::i64;
1310 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1311 const TargetRegisterClass *RC =
1312 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1315 ResultReg = createResultReg(RC);
1317 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1319 const MCInstrDesc &
II =
TII.get(
Opc);
1322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1328Register AArch64FastISel::emitAddSub_ri(
bool UseAdd, MVT RetVT,
Register LHSReg,
1329 uint64_t Imm,
bool SetFlags,
1331 assert(LHSReg &&
"Invalid register number.");
1333 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1339 else if ((Imm & 0xfff000) == Imm) {
1345 static const unsigned OpcTable[2][2][2] = {
1346 { { AArch64::SUBWri, AArch64::SUBXri },
1347 { AArch64::ADDWri, AArch64::ADDXri } },
1348 { { AArch64::SUBSWri, AArch64::SUBSXri },
1349 { AArch64::ADDSWri, AArch64::ADDSXri } }
1351 bool Is64Bit = RetVT == MVT::i64;
1352 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1353 const TargetRegisterClass *RC;
1355 RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1357 RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
1360 ResultReg = createResultReg(RC);
1362 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1364 const MCInstrDesc &
II =
TII.get(
Opc);
1366 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1373Register AArch64FastISel::emitAddSub_rs(
bool UseAdd, MVT RetVT,
Register LHSReg,
1376 uint64_t ShiftImm,
bool SetFlags,
1378 assert(LHSReg && RHSReg &&
"Invalid register number.");
1379 assert(LHSReg != AArch64::SP && LHSReg != AArch64::WSP &&
1380 RHSReg != AArch64::SP && RHSReg != AArch64::WSP);
1382 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1389 static const unsigned OpcTable[2][2][2] = {
1390 { { AArch64::SUBWrs, AArch64::SUBXrs },
1391 { AArch64::ADDWrs, AArch64::ADDXrs } },
1392 { { AArch64::SUBSWrs, AArch64::SUBSXrs },
1393 { AArch64::ADDSWrs, AArch64::ADDSXrs } }
1395 bool Is64Bit = RetVT == MVT::i64;
1396 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1397 const TargetRegisterClass *RC =
1398 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1401 ResultReg = createResultReg(RC);
1403 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1405 const MCInstrDesc &
II =
TII.get(
Opc);
1408 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1411 .
addImm(getShifterImm(ShiftType, ShiftImm));
1415Register AArch64FastISel::emitAddSub_rx(
bool UseAdd, MVT RetVT,
Register LHSReg,
1418 uint64_t ShiftImm,
bool SetFlags,
1420 assert(LHSReg && RHSReg &&
"Invalid register number.");
1421 assert(LHSReg != AArch64::XZR && LHSReg != AArch64::WZR &&
1422 RHSReg != AArch64::XZR && RHSReg != AArch64::WZR);
1424 if (RetVT != MVT::i32 && RetVT != MVT::i64)
1430 static const unsigned OpcTable[2][2][2] = {
1431 { { AArch64::SUBWrx, AArch64::SUBXrx },
1432 { AArch64::ADDWrx, AArch64::ADDXrx } },
1433 { { AArch64::SUBSWrx, AArch64::SUBSXrx },
1434 { AArch64::ADDSWrx, AArch64::ADDSXrx } }
1436 bool Is64Bit = RetVT == MVT::i64;
1437 unsigned Opc = OpcTable[SetFlags][UseAdd][Is64Bit];
1438 const TargetRegisterClass *RC =
nullptr;
1440 RC = Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
1442 RC = Is64Bit ? &AArch64::GPR64spRegClass : &AArch64::GPR32spRegClass;
1445 ResultReg = createResultReg(RC);
1447 ResultReg = Is64Bit ? AArch64::XZR : AArch64::WZR;
1449 const MCInstrDesc &
II =
TII.get(
Opc);
1452 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II, ResultReg)
1455 .
addImm(getArithExtendImm(ExtType, ShiftImm));
1459bool AArch64FastISel::emitCmp(
const Value *
LHS,
const Value *
RHS,
bool IsZExt) {
1461 EVT EVT = TLI.getValueType(
DL, Ty,
true);
1474 return emitICmp(VT,
LHS,
RHS, IsZExt);
1477 return emitFCmp(VT,
LHS,
RHS);
1481bool AArch64FastISel::emitICmp(MVT RetVT,
const Value *
LHS,
const Value *
RHS,
1483 return emitSub(RetVT,
LHS,
RHS,
true,
false,
1488bool AArch64FastISel::emitICmp_ri(MVT RetVT,
Register LHSReg, uint64_t Imm) {
1489 return emitAddSub_ri(
false, RetVT, LHSReg, Imm,
1494bool AArch64FastISel::emitFCmp(MVT RetVT,
const Value *
LHS,
const Value *
RHS) {
1495 if (RetVT != MVT::f32 && RetVT != MVT::f64)
1500 bool UseImm =
false;
1510 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri;
1511 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc))
1520 unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr;
1521 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc))
1528 bool SetFlags,
bool WantResult,
bool IsZExt) {
1529 return emitAddSub(
true, RetVT,
LHS,
RHS, SetFlags, WantResult,
1541 ResultReg = emitAddSub_ri(
false, VT, Op0, -Imm);
1543 ResultReg = emitAddSub_ri(
true, VT, Op0, Imm);
1552 ResultReg = emitAddSub_rr(
true, VT, Op0, CReg);
1557 bool SetFlags,
bool WantResult,
bool IsZExt) {
1558 return emitAddSub(
false, RetVT,
LHS,
RHS, SetFlags, WantResult,
1563 Register RHSReg,
bool WantResult) {
1564 return emitAddSub_rr(
false, RetVT, LHSReg, RHSReg,
1571 uint64_t ShiftImm,
bool WantResult) {
1572 return emitAddSub_rs(
false, RetVT, LHSReg, RHSReg, ShiftType,
1573 ShiftImm,
true, WantResult);
1576Register AArch64FastISel::emitLogicalOp(
unsigned ISDOpc, MVT RetVT,
1599 uint64_t
Imm =
C->getZExtValue();
1600 ResultReg = emitLogicalOp_ri(ISDOpc, RetVT, LHSReg, Imm);
1612 if (
C->getValue().isPowerOf2())
1618 Register RHSReg = getRegForValue(MulLHS);
1621 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
1631 uint64_t ShiftVal =
C->getZExtValue();
1632 Register RHSReg = getRegForValue(
SI->getOperand(0));
1635 ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
1645 MVT VT = std::max(MVT::i32, RetVT.
SimpleTy);
1646 ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, RHSReg);
1647 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
1648 uint64_t
Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
1649 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
1654Register AArch64FastISel::emitLogicalOp_ri(
unsigned ISDOpc, MVT RetVT,
1657 "ISD nodes are not consecutive!");
1658 static const unsigned OpcTable[3][2] = {
1659 { AArch64::ANDWri, AArch64::ANDXri },
1660 { AArch64::ORRWri, AArch64::ORRXri },
1661 { AArch64::EORWri, AArch64::EORXri }
1663 const TargetRegisterClass *RC;
1674 Opc = OpcTable[Idx][0];
1675 RC = &AArch64::GPR32spRegClass;
1681 RC = &AArch64::GPR64spRegClass;
1690 fastEmitInst_ri(
Opc, RC, LHSReg,
1692 if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc !=
ISD::AND) {
1693 uint64_t
Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
1694 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
1699Register AArch64FastISel::emitLogicalOp_rs(
unsigned ISDOpc, MVT RetVT,
1701 uint64_t ShiftImm) {
1703 "ISD nodes are not consecutive!");
1704 static const unsigned OpcTable[3][2] = {
1705 { AArch64::ANDWrs, AArch64::ANDXrs },
1706 { AArch64::ORRWrs, AArch64::ORRXrs },
1707 { AArch64::EORWrs, AArch64::EORXrs }
1714 const TargetRegisterClass *RC;
1724 RC = &AArch64::GPR32RegClass;
1728 RC = &AArch64::GPR64RegClass;
1732 fastEmitInst_rri(
Opc, RC, LHSReg, RHSReg,
1734 if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
1735 uint64_t
Mask = (RetVT == MVT::i8) ? 0xff : 0xffff;
1736 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
1741Register AArch64FastISel::emitAnd_ri(MVT RetVT,
Register LHSReg, uint64_t Imm) {
1742 return emitLogicalOp_ri(
ISD::AND, RetVT, LHSReg, Imm);
1746 bool WantZExt, MachineMemOperand *MMO) {
1747 if (!TLI.allowsMisalignedMemoryAccesses(VT))
1751 if (!simplifyAddress(Addr, VT))
1760 bool UseScaled =
true;
1761 if ((Addr.getOffset() < 0) || (Addr.getOffset() & (ScaleFactor - 1))) {
1766 static const unsigned GPOpcTable[2][8][4] = {
1768 { { AArch64::LDURSBWi, AArch64::LDURSHWi, AArch64::LDURWi,
1770 { AArch64::LDURSBXi, AArch64::LDURSHXi, AArch64::LDURSWi,
1772 { AArch64::LDRSBWui, AArch64::LDRSHWui, AArch64::LDRWui,
1774 { AArch64::LDRSBXui, AArch64::LDRSHXui, AArch64::LDRSWui,
1776 { AArch64::LDRSBWroX, AArch64::LDRSHWroX, AArch64::LDRWroX,
1778 { AArch64::LDRSBXroX, AArch64::LDRSHXroX, AArch64::LDRSWroX,
1780 { AArch64::LDRSBWroW, AArch64::LDRSHWroW, AArch64::LDRWroW,
1782 { AArch64::LDRSBXroW, AArch64::LDRSHXroW, AArch64::LDRSWroW,
1786 { { AArch64::LDURBBi, AArch64::LDURHHi, AArch64::LDURWi,
1788 { AArch64::LDURBBi, AArch64::LDURHHi, AArch64::LDURWi,
1790 { AArch64::LDRBBui, AArch64::LDRHHui, AArch64::LDRWui,
1792 { AArch64::LDRBBui, AArch64::LDRHHui, AArch64::LDRWui,
1794 { AArch64::LDRBBroX, AArch64::LDRHHroX, AArch64::LDRWroX,
1796 { AArch64::LDRBBroX, AArch64::LDRHHroX, AArch64::LDRWroX,
1798 { AArch64::LDRBBroW, AArch64::LDRHHroW, AArch64::LDRWroW,
1800 { AArch64::LDRBBroW, AArch64::LDRHHroW, AArch64::LDRWroW,
1805 static const unsigned FPOpcTable[4][2] = {
1806 { AArch64::LDURSi, AArch64::LDURDi },
1807 { AArch64::LDRSui, AArch64::LDRDui },
1808 { AArch64::LDRSroX, AArch64::LDRDroX },
1809 { AArch64::LDRSroW, AArch64::LDRDroW }
1813 const TargetRegisterClass *RC;
1814 bool UseRegOffset = Addr.isRegBase() && !Addr.getOffset() && Addr.getReg() &&
1815 Addr.getOffsetReg();
1816 unsigned Idx = UseRegOffset ? 2 : UseScaled ? 1 : 0;
1821 bool IsRet64Bit = RetVT == MVT::i64;
1827 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][0];
1828 RC = (IsRet64Bit && !WantZExt) ?
1829 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1832 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][1];
1833 RC = (IsRet64Bit && !WantZExt) ?
1834 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1837 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][2];
1838 RC = (IsRet64Bit && !WantZExt) ?
1839 &AArch64::GPR64RegClass: &AArch64::GPR32RegClass;
1842 Opc = GPOpcTable[WantZExt][2 * Idx + IsRet64Bit][3];
1843 RC = &AArch64::GPR64RegClass;
1846 Opc = FPOpcTable[Idx][0];
1847 RC = &AArch64::FPR32RegClass;
1850 Opc = FPOpcTable[Idx][1];
1851 RC = &AArch64::FPR64RegClass;
1856 Register ResultReg = createResultReg(RC);
1857 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1858 TII.get(
Opc), ResultReg);
1862 if (VT == MVT::i1) {
1863 Register ANDReg = emitAnd_ri(MVT::i32, ResultReg, 1);
1864 assert(ANDReg &&
"Unexpected AND instruction emission failure.");
1870 if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) {
1871 Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
1872 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1873 TII.get(AArch64::SUBREG_TO_REG), Reg64)
1876 .
addImm(AArch64::sub_32);
1882bool AArch64FastISel::selectAddSub(
const Instruction *
I) {
1884 if (!isTypeSupported(
I->getType(), VT,
true))
1888 return selectOperator(
I,
I->getOpcode());
1891 switch (
I->getOpcode()) {
1894 case Instruction::Add:
1895 ResultReg = emitAdd(VT,
I->getOperand(0),
I->getOperand(1));
1897 case Instruction::Sub:
1898 ResultReg = emitSub(VT,
I->getOperand(0),
I->getOperand(1));
1904 updateValueMap(
I, ResultReg);
1908bool AArch64FastISel::selectLogicalOp(
const Instruction *
I) {
1910 if (!isTypeSupported(
I->getType(), VT,
true))
1914 return selectOperator(
I,
I->getOpcode());
1917 switch (
I->getOpcode()) {
1920 case Instruction::And:
1921 ResultReg = emitLogicalOp(
ISD::AND, VT,
I->getOperand(0),
I->getOperand(1));
1923 case Instruction::Or:
1924 ResultReg = emitLogicalOp(
ISD::OR, VT,
I->getOperand(0),
I->getOperand(1));
1926 case Instruction::Xor:
1927 ResultReg = emitLogicalOp(
ISD::XOR, VT,
I->getOperand(0),
I->getOperand(1));
1933 updateValueMap(
I, ResultReg);
1937bool AArch64FastISel::selectLoad(
const Instruction *
I) {
1942 if (!isTypeSupported(
I->getType(), VT,
true) ||
1946 const Value *SV =
I->getOperand(0);
1947 if (TLI.supportSwiftError()) {
1951 if (Arg->hasSwiftErrorAttr())
1956 if (Alloca->isSwiftError())
1963 if (!computeAddress(
I->getOperand(0), Addr,
I->getType()))
1967 bool WantZExt =
true;
1969 const Value *IntExtVal =
nullptr;
1970 if (
I->hasOneUse()) {
1972 if (isTypeSupported(ZE->getType(), RetVT))
1977 if (isTypeSupported(SE->getType(), RetVT))
1986 emitLoad(VT, RetVT, Addr, WantZExt, createMachineMemOperandFor(
I));
2007 auto *
MI =
MRI.getUniqueVRegDef(
Reg);
2009 if (RetVT == MVT::i64 && VT <= MVT::i32) {
2013 ResultReg = std::prev(
I)->getOperand(0).getReg();
2014 removeDeadCode(
I, std::next(
I));
2016 ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg,
2019 updateValueMap(
I, ResultReg);
2028 for (
auto &Opnd :
MI->uses()) {
2030 Reg = Opnd.getReg();
2035 removeDeadCode(
I, std::next(
I));
2040 updateValueMap(IntExtVal, ResultReg);
2044 updateValueMap(
I, ResultReg);
2048bool AArch64FastISel::emitStoreRelease(MVT VT,
Register SrcReg,
2050 MachineMemOperand *MMO) {
2053 default:
return false;
2054 case MVT::i8:
Opc = AArch64::STLRB;
break;
2055 case MVT::i16:
Opc = AArch64::STLRH;
break;
2056 case MVT::i32:
Opc = AArch64::STLRW;
break;
2057 case MVT::i64:
Opc = AArch64::STLRX;
break;
2060 const MCInstrDesc &
II =
TII.get(
Opc);
2063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
2071 MachineMemOperand *MMO) {
2072 if (!TLI.allowsMisalignedMemoryAccesses(VT))
2076 if (!simplifyAddress(Addr, VT))
2085 bool UseScaled =
true;
2086 if ((Addr.getOffset() < 0) || (Addr.getOffset() & (ScaleFactor - 1))) {
2091 static const unsigned OpcTable[4][6] = {
2092 { AArch64::STURBBi, AArch64::STURHHi, AArch64::STURWi, AArch64::STURXi,
2093 AArch64::STURSi, AArch64::STURDi },
2094 { AArch64::STRBBui, AArch64::STRHHui, AArch64::STRWui, AArch64::STRXui,
2095 AArch64::STRSui, AArch64::STRDui },
2096 { AArch64::STRBBroX, AArch64::STRHHroX, AArch64::STRWroX, AArch64::STRXroX,
2097 AArch64::STRSroX, AArch64::STRDroX },
2098 { AArch64::STRBBroW, AArch64::STRHHroW, AArch64::STRWroW, AArch64::STRXroW,
2099 AArch64::STRSroW, AArch64::STRDroW }
2103 bool VTIsi1 =
false;
2104 bool UseRegOffset = Addr.isRegBase() && !Addr.getOffset() && Addr.getReg() &&
2105 Addr.getOffsetReg();
2106 unsigned Idx = UseRegOffset ? 2 : UseScaled ? 1 : 0;
2113 case MVT::i1: VTIsi1 =
true; [[fallthrough]];
2114 case MVT::i8:
Opc = OpcTable[Idx][0];
break;
2115 case MVT::i16:
Opc = OpcTable[Idx][1];
break;
2116 case MVT::i32:
Opc = OpcTable[Idx][2];
break;
2117 case MVT::i64:
Opc = OpcTable[Idx][3];
break;
2118 case MVT::f32:
Opc = OpcTable[Idx][4];
break;
2119 case MVT::f64:
Opc = OpcTable[Idx][5];
break;
2123 if (VTIsi1 && SrcReg != AArch64::WZR) {
2124 Register ANDReg = emitAnd_ri(MVT::i32, SrcReg, 1);
2125 assert(ANDReg &&
"Unexpected AND instruction emission failure.");
2129 const MCInstrDesc &
II =
TII.get(
Opc);
2131 MachineInstrBuilder MIB =
2138bool AArch64FastISel::selectStore(
const Instruction *
I) {
2140 const Value *Op0 =
I->getOperand(0);
2144 if (!isTypeSupported(Op0->
getType(), VT,
true))
2147 const Value *PtrV =
I->getOperand(1);
2148 if (TLI.supportSwiftError()) {
2152 if (Arg->hasSwiftErrorAttr())
2157 if (Alloca->isSwiftError())
2167 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
2169 if (CF->isZero() && !CF->isNegative()) {
2171 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
2176 SrcReg = getRegForValue(Op0);
2184 if (
SI->isAtomic()) {
2189 Register AddrReg = getRegForValue(PtrV);
2192 return emitStoreRelease(VT, SrcReg, AddrReg,
2193 createMachineMemOperandFor(
I));
2199 if (!computeAddress(PtrV, Addr, Op0->
getType()))
2202 if (!
emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(
I)))
2254bool AArch64FastISel::emitCompareAndBranch(
const BranchInst *BI) {
2258 if (FuncInfo.MF->getFunction().hasFnAttribute(
2259 Attribute::SpeculativeLoadHardening))
2278 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->
getSuccessor(1));
2281 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
2288 switch (Predicate) {
2300 if (AI->
getOpcode() == Instruction::And && isValueAvailable(AI)) {
2305 if (
C->getValue().isPowerOf2())
2309 if (
C->getValue().isPowerOf2()) {
2310 TestBit =
C->getValue().logBase2();
2341 static const unsigned OpcTable[2][2][2] = {
2342 { {AArch64::CBZW, AArch64::CBZX },
2343 {AArch64::CBNZW, AArch64::CBNZX} },
2344 { {AArch64::TBZW, AArch64::TBZX },
2345 {AArch64::TBNZW, AArch64::TBNZX} }
2348 bool IsBitTest = TestBit != -1;
2349 bool Is64Bit = BW == 64;
2350 if (TestBit < 32 && TestBit >= 0)
2353 unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit];
2354 const MCInstrDesc &
II =
TII.get(
Opc);
2360 if (BW == 64 && !Is64Bit)
2361 SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, AArch64::sub_32);
2363 if ((BW < 32) && !IsBitTest)
2364 SrcReg = emitIntExt(VT, SrcReg, MVT::i32,
true);
2368 MachineInstrBuilder MIB =
2369 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc))
2379bool AArch64FastISel::selectBranch(
const Instruction *
I) {
2382 MachineBasicBlock *MSucc = FuncInfo.getMBB(BI->
getSuccessor(0));
2388 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->
getSuccessor(1));
2391 if (CI->
hasOneUse() && isValueAvailable(CI)) {
2394 switch (Predicate) {
2398 fastEmitBranch(FBB, MIMD.getDL());
2401 fastEmitBranch(
TBB, MIMD.getDL());
2406 if (emitCompareAndBranch(BI))
2410 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
2423 switch (Predicate) {
2439 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::Bcc))
2445 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::Bcc))
2453 uint64_t
Imm = CI->getZExtValue();
2455 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::B))
2460 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
2462 FuncInfo.MBB->addSuccessor(Target, BranchProbability);
2464 FuncInfo.MBB->addSuccessorWithoutProb(Target);
2476 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::Bcc))
2490 unsigned Opcode = AArch64::TBNZW;
2491 if (FuncInfo.MBB->isLayoutSuccessor(
TBB)) {
2493 Opcode = AArch64::TBZW;
2496 const MCInstrDesc &
II =
TII.get(Opcode);
2499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
2500 .
addReg(ConstrainedCondReg)
2508bool AArch64FastISel::selectIndirectBr(
const Instruction *
I) {
2515 if (FuncInfo.MF->getFunction().hasFnAttribute(
"ptrauth-indirect-gotos"))
2519 const MCInstrDesc &
II =
TII.get(AArch64::BR);
2525 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(Succ));
2530bool AArch64FastISel::selectCmp(
const Instruction *
I) {
2540 switch (Predicate) {
2544 ResultReg = createResultReg(&AArch64::GPR32RegClass);
2545 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2546 TII.get(TargetOpcode::COPY), ResultReg)
2550 ResultReg = fastEmit_i(MVT::i32, MVT::i32,
ISD::Constant, 1);
2555 updateValueMap(
I, ResultReg);
2563 ResultReg = createResultReg(&AArch64::GPR32RegClass);
2567 static unsigned CondCodeTable[2][2] = {
2572 switch (Predicate) {
2584 Register TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
2585 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr),
2590 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr),
2596 updateValueMap(
I, ResultReg);
2604 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr),
2610 updateValueMap(
I, ResultReg);
2616bool AArch64FastISel::optimizeSelect(
const SelectInst *SI) {
2617 if (!
SI->getType()->isIntegerTy(1))
2620 const Value *Src1Val, *Src2Val;
2622 bool NeedExtraOp =
false;
2625 Src1Val =
SI->getCondition();
2626 Src2Val =
SI->getFalseValue();
2627 Opc = AArch64::ORRWrr;
2630 Src1Val =
SI->getFalseValue();
2631 Src2Val =
SI->getCondition();
2632 Opc = AArch64::BICWrr;
2636 Src1Val =
SI->getCondition();
2637 Src2Val =
SI->getTrueValue();
2638 Opc = AArch64::ORRWrr;
2642 Src1Val =
SI->getCondition();
2643 Src2Val =
SI->getTrueValue();
2644 Opc = AArch64::ANDWrr;
2651 Register Src1Reg = getRegForValue(Src1Val);
2655 Register Src2Reg = getRegForValue(Src2Val);
2660 Src1Reg = emitLogicalOp_ri(
ISD::XOR, MVT::i32, Src1Reg, 1);
2662 Register ResultReg = fastEmitInst_rr(
Opc, &AArch64::GPR32RegClass, Src1Reg,
2664 updateValueMap(SI, ResultReg);
2668bool AArch64FastISel::selectSelect(
const Instruction *
I) {
2671 if (!isTypeSupported(
I->getType(), VT))
2675 const TargetRegisterClass *RC;
2683 Opc = AArch64::CSELWr;
2684 RC = &AArch64::GPR32RegClass;
2687 Opc = AArch64::CSELXr;
2688 RC = &AArch64::GPR64RegClass;
2691 Opc = AArch64::FCSELSrrr;
2692 RC = &AArch64::FPR32RegClass;
2695 Opc = AArch64::FCSELDrrr;
2696 RC = &AArch64::FPR64RegClass;
2705 if (optimizeSelect(SI))
2709 if (foldXALUIntrinsic(CC,
I,
Cond)) {
2715 isValueAvailable(
Cond)) {
2719 const Value *FoldSelect =
nullptr;
2720 switch (Predicate) {
2724 FoldSelect =
SI->getFalseValue();
2727 FoldSelect =
SI->getTrueValue();
2732 Register SrcReg = getRegForValue(FoldSelect);
2736 updateValueMap(
I, SrcReg);
2746 switch (Predicate) {
2764 const MCInstrDesc &
II =
TII.get(AArch64::ANDSWri);
2768 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II,
2774 Register Src1Reg = getRegForValue(
SI->getTrueValue());
2775 Register Src2Reg = getRegForValue(
SI->getFalseValue());
2777 if (!Src1Reg || !Src2Reg)
2781 Src2Reg = fastEmitInst_rri(
Opc, RC, Src1Reg, Src2Reg, ExtraCC);
2783 Register ResultReg = fastEmitInst_rri(
Opc, RC, Src1Reg, Src2Reg, CC);
2784 updateValueMap(
I, ResultReg);
2788bool AArch64FastISel::selectFPExt(
const Instruction *
I) {
2790 if (!
I->getType()->isDoubleTy() || !
V->getType()->isFloatTy())
2797 Register ResultReg = createResultReg(&AArch64::FPR64RegClass);
2798 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::FCVTDSr),
2800 updateValueMap(
I, ResultReg);
2804bool AArch64FastISel::selectFPTrunc(
const Instruction *
I) {
2806 if (!
I->getType()->isFloatTy() || !
V->getType()->isDoubleTy())
2813 Register ResultReg = createResultReg(&AArch64::FPR32RegClass);
2814 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::FCVTSDr),
2816 updateValueMap(
I, ResultReg);
2821bool AArch64FastISel::selectFPToInt(
const Instruction *
I,
bool Signed) {
2823 if (!isTypeLegal(
I->getType(), DestVT) || DestVT.
isVector())
2826 Register SrcReg = getRegForValue(
I->getOperand(0));
2830 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType(),
true);
2831 if (SrcVT == MVT::f128 || SrcVT == MVT::f16 || SrcVT == MVT::bf16)
2835 if (SrcVT == MVT::f64) {
2837 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr;
2839 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr;
2842 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr;
2844 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
2846 Register ResultReg = createResultReg(
2847 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
2848 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), ResultReg)
2850 updateValueMap(
I, ResultReg);
2854bool AArch64FastISel::selectIntToFP(
const Instruction *
I,
bool Signed) {
2856 if (!isTypeLegal(
I->getType(), DestVT) || DestVT.
isVector())
2859 if (DestVT == MVT::f16 || DestVT == MVT::bf16)
2862 assert((DestVT == MVT::f32 || DestVT == MVT::f64) &&
2863 "Unexpected value type.");
2865 Register SrcReg = getRegForValue(
I->getOperand(0));
2869 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType(),
true);
2872 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) {
2880 if (SrcVT == MVT::i64) {
2882 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri;
2884 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri;
2887 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri;
2889 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
2892 Register ResultReg = fastEmitInst_r(
Opc, TLI.getRegClassFor(DestVT), SrcReg);
2893 updateValueMap(
I, ResultReg);
2897bool AArch64FastISel::fastLowerArguments() {
2898 if (!FuncInfo.CanLowerReturn)
2905 CallingConv::ID CC =
F->getCallingConv();
2906 if (CC != CallingConv::C && CC != CallingConv::Swift)
2913 unsigned GPRCnt = 0;
2914 unsigned FPRCnt = 0;
2915 for (
auto const &Arg :
F->args()) {
2916 if (Arg.hasAttribute(Attribute::ByVal) ||
2917 Arg.hasAttribute(Attribute::InReg) ||
2918 Arg.hasAttribute(Attribute::StructRet) ||
2919 Arg.hasAttribute(Attribute::SwiftSelf) ||
2920 Arg.hasAttribute(Attribute::SwiftAsync) ||
2921 Arg.hasAttribute(Attribute::SwiftError) ||
2922 Arg.hasAttribute(Attribute::Nest))
2925 Type *ArgTy = Arg.getType();
2929 EVT ArgVT = TLI.getValueType(
DL, ArgTy);
2941 if (VT >= MVT::i1 && VT <= MVT::i64)
2943 else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.
is64BitVector() ||
2949 if (GPRCnt > 8 || FPRCnt > 8)
2954 { AArch64::W0, AArch64::W1, AArch64::W2, AArch64::W3, AArch64::W4,
2955 AArch64::W5, AArch64::W6, AArch64::W7 },
2956 { AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
2957 AArch64::X5, AArch64::X6, AArch64::X7 },
2958 { AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
2959 AArch64::H5, AArch64::H6, AArch64::H7 },
2960 { AArch64::S0, AArch64::S1, AArch64::S2, AArch64::S3, AArch64::S4,
2961 AArch64::S5, AArch64::S6, AArch64::S7 },
2962 { AArch64::D0, AArch64::D1, AArch64::D2, AArch64::D3, AArch64::D4,
2963 AArch64::D5, AArch64::D6, AArch64::D7 },
2964 { AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,
2965 AArch64::Q5, AArch64::Q6, AArch64::Q7 }
2969 unsigned FPRIdx = 0;
2970 for (
auto const &Arg :
F->args()) {
2971 MVT VT = TLI.getSimpleValueType(
DL, Arg.getType());
2973 const TargetRegisterClass *RC;
2974 if (VT >= MVT::i1 && VT <= MVT::i32) {
2976 RC = &AArch64::GPR32RegClass;
2978 }
else if (VT == MVT::i64) {
2980 RC = &AArch64::GPR64RegClass;
2981 }
else if (VT == MVT::f16 || VT == MVT::bf16) {
2983 RC = &AArch64::FPR16RegClass;
2984 }
else if (VT == MVT::f32) {
2986 RC = &AArch64::FPR32RegClass;
2989 RC = &AArch64::FPR64RegClass;
2992 RC = &AArch64::FPR128RegClass;
2996 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3000 Register ResultReg = createResultReg(RC);
3001 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3002 TII.get(TargetOpcode::COPY), ResultReg)
3004 updateValueMap(&Arg, ResultReg);
3009bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
3010 SmallVectorImpl<MVT> &OutVTs,
3011 SmallVectorImpl<Type *> &OrigTys,
3012 unsigned &NumBytes) {
3013 CallingConv::ID CC = CLI.CallConv;
3015 CCState CCInfo(CC,
false, *FuncInfo.MF, ArgLocs, *
Context);
3016 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, OrigTys,
3017 CCAssignFnForCall(CC));
3020 NumBytes = CCInfo.getStackSize();
3023 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
3024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AdjStackDown))
3028 for (CCValAssign &VA : ArgLocs) {
3029 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
3030 MVT ArgVT = OutVTs[VA.getValNo()];
3032 Register ArgReg = getRegForValue(ArgVal);
3037 switch (VA.getLocInfo()) {
3041 MVT DestVT = VA.getLocVT();
3043 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT,
false);
3051 MVT DestVT = VA.getLocVT();
3053 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT,
true);
3063 if (VA.isRegLoc() && !VA.needsCustom()) {
3064 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3065 TII.get(TargetOpcode::COPY), VA.getLocReg()).
addReg(ArgReg);
3066 CLI.OutRegs.push_back(VA.getLocReg());
3067 }
else if (VA.needsCustom()) {
3071 assert(VA.isMemLoc() &&
"Assuming store on stack.");
3080 unsigned BEAlign = 0;
3081 if (ArgSize < 8 && !Subtarget->isLittleEndian())
3082 BEAlign = 8 - ArgSize;
3085 Addr.setKind(Address::RegBase);
3086 Addr.setReg(AArch64::SP);
3087 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
3090 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3094 if (!
emitStore(ArgVT, ArgReg, Addr, MMO))
3101bool AArch64FastISel::finishCall(CallLoweringInfo &CLI,
unsigned NumBytes) {
3102 CallingConv::ID CC = CLI.CallConv;
3105 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
3106 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AdjStackUp))
3111 CCState CCInfo(CC,
false, *FuncInfo.MF, RVLocs, *
Context);
3112 CCInfo.AnalyzeCallResult(CLI.Ins, CCAssignFnForCall(CC));
3114 Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3115 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
3116 CCValAssign &VA = RVLocs[i];
3125 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(TargetOpcode::COPY),
3131 CLI.ResultReg = ResultReg;
3132 CLI.NumResultRegs = RVLocs.
size();
3137bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3138 CallingConv::ID CC = CLI.CallConv;
3139 bool IsTailCall = CLI.IsTailCall;
3140 bool IsVarArg = CLI.IsVarArg;
3144 if (!Callee && !Symbol)
3149 if (CLI.CB && CLI.CB->hasFnAttr(Attribute::ReturnsTwice) &&
3150 !Subtarget->noBTIAtReturnTwice() &&
3151 MF->getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
3155 if (CLI.CB && CLI.CB->isIndirectCall() &&
3179 if (MF->getFunction().getParent()->getRtLibUseGOT())
3189 for (
auto Flag : CLI.OutFlags)
3191 Flag.isSwiftSelf() ||
Flag.isSwiftAsync() ||
Flag.isSwiftError())
3197 OutVTs.
reserve(CLI.OutVals.size());
3199 for (
auto *Val : CLI.OutVals) {
3201 if (!isTypeLegal(Val->getType(), VT) &&
3202 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
3214 if (Callee && !computeCallAddress(Callee, Addr))
3221 Addr.getGlobalValue()->hasExternalWeakLinkage())
3226 if (!processCallArgs(CLI, OutVTs, OrigTys, NumBytes))
3234 MachineInstrBuilder MIB;
3236 const MCInstrDesc &
II =
3238 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II);
3241 else if (Addr.getGlobalValue())
3243 else if (Addr.getReg()) {
3251 Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
3252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::ADRP),
3256 CallReg = createResultReg(&AArch64::GPR64RegClass);
3257 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3258 TII.get(AArch64::LDRXui), CallReg)
3262 }
else if (Addr.getGlobalValue())
3263 CallReg = materializeGV(Addr.getGlobalValue());
3264 else if (Addr.getReg())
3265 CallReg = Addr.getReg();
3272 MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II).
addReg(CallReg);
3276 for (
auto Reg : CLI.OutRegs)
3286 return finishCall(CLI, NumBytes);
3289bool AArch64FastISel::isMemCpySmall(uint64_t Len, MaybeAlign Alignment) {
3291 return Len / Alignment->value() <= 4;
3296bool AArch64FastISel::tryEmitSmallMemCpy(
Address Dest,
Address Src,
3297 uint64_t Len, MaybeAlign Alignment) {
3299 if (!isMemCpySmall(Len, Alignment))
3302 int64_t UnscaledOffset = 0;
3308 if (!Alignment || *Alignment >= 8) {
3319 assert(Alignment &&
"Alignment is set in this branch");
3321 if (Len >= 4 && *Alignment == 4)
3323 else if (Len >= 2 && *Alignment == 2)
3339 UnscaledOffset +=
Size;
3342 Dest.setOffset(OrigDest.getOffset() + UnscaledOffset);
3343 Src.setOffset(OrigSrc.getOffset() + UnscaledOffset);
3352 const Instruction *
I,
3366 if (!isTypeLegal(RetTy, RetVT))
3369 if (RetVT != MVT::i32 && RetVT != MVT::i64)
3384 case Intrinsic::smul_with_overflow:
3386 if (
C->getValue() == 2)
3387 IID = Intrinsic::sadd_with_overflow;
3389 case Intrinsic::umul_with_overflow:
3391 if (
C->getValue() == 2)
3392 IID = Intrinsic::uadd_with_overflow;
3400 case Intrinsic::sadd_with_overflow:
3401 case Intrinsic::ssub_with_overflow:
3404 case Intrinsic::uadd_with_overflow:
3407 case Intrinsic::usub_with_overflow:
3410 case Intrinsic::smul_with_overflow:
3411 case Intrinsic::umul_with_overflow:
3417 if (!isValueAvailable(
II))
3423 for (
auto Itr = std::prev(Start); Itr != End; --Itr) {
3431 if (EVI->getAggregateOperand() !=
II)
3439bool AArch64FastISel::fastLowerIntrinsicCall(
const IntrinsicInst *
II) {
3441 switch (
II->getIntrinsicID()) {
3442 default:
return false;
3443 case Intrinsic::frameaddress: {
3444 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
3449 Register SrcReg =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
3450 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3460 DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass,
3462 assert(DestReg &&
"Unexpected LDR instruction emission failure.");
3466 updateValueMap(
II, SrcReg);
3469 case Intrinsic::sponentry: {
3470 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
3474 Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
3475 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3476 TII.get(AArch64::ADDXri), ResultReg)
3481 updateValueMap(
II, ResultReg);
3484 case Intrinsic::memcpy:
3485 case Intrinsic::memmove: {
3488 if (MTI->isVolatile())
3493 bool IsMemCpy = (
II->getIntrinsicID() == Intrinsic::memcpy);
3498 MaybeAlign Alignment;
3499 if (MTI->getDestAlign() || MTI->getSourceAlign())
3500 Alignment = std::min(MTI->getDestAlign().valueOrOne(),
3501 MTI->getSourceAlign().valueOrOne());
3502 if (isMemCpySmall(Len, Alignment)) {
3504 if (!computeAddress(MTI->getRawDest(), Dest) ||
3505 !computeAddress(MTI->getRawSource(), Src))
3507 if (tryEmitSmallMemCpy(Dest, Src, Len, Alignment))
3512 if (!MTI->getLength()->getType()->isIntegerTy(64))
3515 if (MTI->getSourceAddressSpace() > 255 || MTI->getDestAddressSpace() > 255)
3521 return lowerCallTo(
II, IntrMemName,
II->arg_size() - 1);
3523 case Intrinsic::memset: {
3537 return lowerCallTo(
II,
"memset",
II->arg_size() - 1);
3539 case Intrinsic::sin:
3540 case Intrinsic::cos:
3541 case Intrinsic::tan:
3542 case Intrinsic::pow: {
3544 if (!isTypeLegal(
II->getType(), RetVT))
3547 if (RetVT != MVT::f32 && RetVT != MVT::f64)
3550 static const RTLIB::Libcall LibCallTable[4][2] = {
3551 {RTLIB::SIN_F32, RTLIB::SIN_F64},
3552 {RTLIB::COS_F32, RTLIB::COS_F64},
3553 {RTLIB::TAN_F32, RTLIB::TAN_F64},
3554 {RTLIB::POW_F32, RTLIB::POW_F64}};
3556 bool Is64Bit = RetVT == MVT::f64;
3557 switch (
II->getIntrinsicID()) {
3560 case Intrinsic::sin:
3561 LC = LibCallTable[0][Is64Bit];
3563 case Intrinsic::cos:
3564 LC = LibCallTable[1][Is64Bit];
3566 case Intrinsic::tan:
3567 LC = LibCallTable[2][Is64Bit];
3569 case Intrinsic::pow:
3570 LC = LibCallTable[3][Is64Bit];
3575 Args.reserve(
II->arg_size());
3578 for (
auto &Arg :
II->args())
3579 Args.emplace_back(Arg);
3581 CallLoweringInfo CLI;
3582 MCContext &Ctx = MF->getContext();
3583 CLI.setCallee(
DL, Ctx, TLI.getLibcallCallingConv(LC),
II->getType(),
3584 TLI.getLibcallName(LC), std::move(Args));
3585 if (!lowerCallTo(CLI))
3587 updateValueMap(
II, CLI.ResultReg);
3590 case Intrinsic::fabs: {
3592 if (!isTypeLegal(
II->getType(), VT))
3600 Opc = AArch64::FABSSr;
3603 Opc = AArch64::FABSDr;
3606 Register SrcReg = getRegForValue(
II->getOperand(0));
3609 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3610 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(
Opc), ResultReg)
3612 updateValueMap(
II, ResultReg);
3615 case Intrinsic::trap:
3616 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::BRK))
3619 case Intrinsic::debugtrap:
3620 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::BRK))
3624 case Intrinsic::sqrt: {
3625 Type *RetTy =
II->getCalledFunction()->getReturnType();
3628 if (!isTypeLegal(RetTy, VT))
3631 Register Op0Reg = getRegForValue(
II->getOperand(0));
3635 Register ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg);
3639 updateValueMap(
II, ResultReg);
3642 case Intrinsic::sadd_with_overflow:
3643 case Intrinsic::uadd_with_overflow:
3644 case Intrinsic::ssub_with_overflow:
3645 case Intrinsic::usub_with_overflow:
3646 case Intrinsic::smul_with_overflow:
3647 case Intrinsic::umul_with_overflow: {
3651 Type *RetTy = Ty->getTypeAtIndex(0U);
3654 if (!isTypeLegal(RetTy, VT))
3657 if (VT != MVT::i32 && VT != MVT::i64)
3671 case Intrinsic::smul_with_overflow:
3673 if (
C->getValue() == 2) {
3674 IID = Intrinsic::sadd_with_overflow;
3678 case Intrinsic::umul_with_overflow:
3680 if (
C->getValue() == 2) {
3681 IID = Intrinsic::uadd_with_overflow;
3687 Register ResultReg1, ResultReg2, MulReg;
3691 case Intrinsic::sadd_with_overflow:
3692 ResultReg1 = emitAdd(VT,
LHS,
RHS,
true);
3695 case Intrinsic::uadd_with_overflow:
3696 ResultReg1 = emitAdd(VT,
LHS,
RHS,
true);
3699 case Intrinsic::ssub_with_overflow:
3700 ResultReg1 = emitSub(VT,
LHS,
RHS,
true);
3703 case Intrinsic::usub_with_overflow:
3704 ResultReg1 = emitSub(VT,
LHS,
RHS,
true);
3707 case Intrinsic::smul_with_overflow: {
3717 if (VT == MVT::i32) {
3718 MulReg = emitSMULL_rr(MVT::i64, LHSReg, RHSReg);
3720 fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32);
3722 emitAddSub_rx(
false, MVT::i64, MulReg, MulSubReg,
3727 assert(VT == MVT::i64 &&
"Unexpected value type.");
3730 MulReg = emitMul_rr(VT, LHSReg, RHSReg);
3737 case Intrinsic::umul_with_overflow: {
3747 if (VT == MVT::i32) {
3748 MulReg = emitUMULL_rr(MVT::i64, LHSReg, RHSReg);
3750 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3751 TII.get(AArch64::ANDSXri), AArch64::XZR)
3754 MulReg = fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32);
3756 assert(VT == MVT::i64 &&
"Unexpected value type.");
3759 MulReg = emitMul_rr(VT, LHSReg, RHSReg);
3761 emitSubs_rr(VT, AArch64::XZR, UMULHReg,
false);
3768 ResultReg1 = createResultReg(TLI.getRegClassFor(VT));
3769 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3770 TII.get(TargetOpcode::COPY), ResultReg1).
addReg(MulReg);
3776 ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass,
3777 AArch64::WZR, AArch64::WZR,
3778 getInvertedCondCode(CC));
3780 assert((ResultReg1 + 1) == ResultReg2 &&
3781 "Nonconsecutive result registers.");
3782 updateValueMap(
II, ResultReg1, 2);
3785 case Intrinsic::aarch64_crc32b:
3786 case Intrinsic::aarch64_crc32h:
3787 case Intrinsic::aarch64_crc32w:
3788 case Intrinsic::aarch64_crc32x:
3789 case Intrinsic::aarch64_crc32cb:
3790 case Intrinsic::aarch64_crc32ch:
3791 case Intrinsic::aarch64_crc32cw:
3792 case Intrinsic::aarch64_crc32cx: {
3793 if (!Subtarget->hasCRC())
3797 switch (
II->getIntrinsicID()) {
3800 case Intrinsic::aarch64_crc32b:
3801 Opc = AArch64::CRC32Brr;
3803 case Intrinsic::aarch64_crc32h:
3804 Opc = AArch64::CRC32Hrr;
3806 case Intrinsic::aarch64_crc32w:
3807 Opc = AArch64::CRC32Wrr;
3809 case Intrinsic::aarch64_crc32x:
3810 Opc = AArch64::CRC32Xrr;
3812 case Intrinsic::aarch64_crc32cb:
3813 Opc = AArch64::CRC32CBrr;
3815 case Intrinsic::aarch64_crc32ch:
3816 Opc = AArch64::CRC32CHrr;
3818 case Intrinsic::aarch64_crc32cw:
3819 Opc = AArch64::CRC32CWrr;
3821 case Intrinsic::aarch64_crc32cx:
3822 Opc = AArch64::CRC32CXrr;
3826 Register LHSReg = getRegForValue(
II->getArgOperand(0));
3827 Register RHSReg = getRegForValue(
II->getArgOperand(1));
3828 if (!LHSReg || !RHSReg)
3832 fastEmitInst_rr(
Opc, &AArch64::GPR32RegClass, LHSReg, RHSReg);
3833 updateValueMap(
II, ResultReg);
3840bool AArch64FastISel::selectRet(
const Instruction *
I) {
3842 const Function &
F = *
I->getParent()->getParent();
3844 if (!FuncInfo.CanLowerReturn)
3850 if (TLI.supportSwiftError() &&
3851 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
3854 if (TLI.supportSplitCSR(FuncInfo.MF))
3860 if (
Ret->getNumOperands() > 0) {
3861 CallingConv::ID CC =
F.getCallingConv();
3867 CCState CCInfo(CC,
F.isVarArg(), *FuncInfo.MF, ValLocs,
I->getContext());
3871 if (ValLocs.
size() != 1)
3874 CCValAssign &VA = ValLocs[0];
3875 const Value *RV =
Ret->getOperand(0);
3893 if (!
MRI.getRegClass(SrcReg)->contains(DestReg))
3896 EVT RVEVT = TLI.getValueType(
DL, RV->
getType());
3906 if (RVVT == MVT::f128)
3911 if (RVVT != DestVT) {
3912 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
3915 if (!Outs[0].
Flags.isZExt() && !Outs[0].Flags.isSExt())
3918 bool IsZExt = Outs[0].Flags.isZExt();
3919 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
3927 SrcReg = emitAnd_ri(MVT::i64, SrcReg, 0xffffffff);
3930 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3931 TII.get(TargetOpcode::COPY), DestReg).
addReg(SrcReg);
3937 MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3938 TII.get(AArch64::RET_ReallyLR));
3944bool AArch64FastISel::selectTrunc(
const Instruction *
I) {
3945 Type *DestTy =
I->getType();
3947 Type *SrcTy =
Op->getType();
3949 EVT SrcEVT = TLI.getValueType(
DL, SrcTy,
true);
3950 EVT DestEVT = TLI.getValueType(
DL, DestTy,
true);
3959 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 &&
3962 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 &&
3976 if (SrcVT == MVT::i64) {
3993 Register Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg,
3996 ResultReg = emitAnd_ri(MVT::i32, Reg32, Mask);
3997 assert(ResultReg &&
"Unexpected AND instruction emission failure.");
3999 ResultReg = createResultReg(&AArch64::GPR32RegClass);
4000 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4001 TII.get(TargetOpcode::COPY), ResultReg)
4005 updateValueMap(
I, ResultReg);
4009Register AArch64FastISel::emiti1Ext(
Register SrcReg, MVT DestVT,
bool IsZExt) {
4010 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 ||
4011 DestVT == MVT::i64) &&
4012 "Unexpected value type.");
4014 if (DestVT == MVT::i8 || DestVT == MVT::i16)
4018 Register ResultReg = emitAnd_ri(MVT::i32, SrcReg, 1);
4019 assert(ResultReg &&
"Unexpected AND instruction emission failure.");
4020 if (DestVT == MVT::i64) {
4023 Register Reg64 =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
4024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4025 TII.get(AArch64::SUBREG_TO_REG), Reg64)
4028 .
addImm(AArch64::sub_32);
4033 if (DestVT == MVT::i64) {
4037 return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg,
4052 Opc = AArch64::MADDWrrr; ZReg = AArch64::WZR;
break;
4054 Opc = AArch64::MADDXrrr; ZReg = AArch64::XZR;
break;
4057 const TargetRegisterClass *RC =
4058 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4059 return fastEmitInst_rrr(
Opc, RC, Op0, Op1, ZReg);
4063 if (RetVT != MVT::i64)
4066 return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass,
4067 Op0, Op1, AArch64::XZR);
4071 if (RetVT != MVT::i64)
4074 return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass,
4075 Op0, Op1, AArch64::XZR);
4081 bool NeedTrunc =
false;
4086 case MVT::i8:
Opc = AArch64::LSLVWr; NeedTrunc =
true;
Mask = 0xff;
break;
4087 case MVT::i16:
Opc = AArch64::LSLVWr; NeedTrunc =
true;
Mask = 0xffff;
break;
4088 case MVT::i32:
Opc = AArch64::LSLVWr;
break;
4089 case MVT::i64:
Opc = AArch64::LSLVXr;
break;
4092 const TargetRegisterClass *RC =
4093 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4095 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
4097 Register ResultReg = fastEmitInst_rr(
Opc, RC, Op0Reg, Op1Reg);
4099 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
4104 uint64_t Shift,
bool IsZExt) {
4106 "Unexpected source/return type pair.");
4107 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4108 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4109 "Unexpected source value type.");
4110 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4111 RetVT == MVT::i64) &&
"Unexpected return value type.");
4113 bool Is64Bit = (RetVT == MVT::i64);
4114 unsigned RegSize = Is64Bit ? 64 : 32;
4117 const TargetRegisterClass *RC =
4118 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4122 if (RetVT == SrcVT) {
4123 Register ResultReg = createResultReg(RC);
4124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4125 TII.get(TargetOpcode::COPY), ResultReg)
4129 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4133 if (Shift >= DstBits)
4161 unsigned ImmR =
RegSize - Shift;
4163 unsigned ImmS = std::min<unsigned>(SrcBits - 1, DstBits - 1 - Shift);
4164 static const unsigned OpcTable[2][2] = {
4165 {AArch64::SBFMWri, AArch64::SBFMXri},
4166 {AArch64::UBFMWri, AArch64::UBFMXri}
4168 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4169 if (SrcVT.
SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4172 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4175 .
addImm(AArch64::sub_32);
4178 return fastEmitInst_rii(
Opc, RC, Op0, ImmR, ImmS);
4184 bool NeedTrunc =
false;
4189 case MVT::i8:
Opc = AArch64::LSRVWr; NeedTrunc =
true;
Mask = 0xff;
break;
4190 case MVT::i16:
Opc = AArch64::LSRVWr; NeedTrunc =
true;
Mask = 0xffff;
break;
4191 case MVT::i32:
Opc = AArch64::LSRVWr;
break;
4192 case MVT::i64:
Opc = AArch64::LSRVXr;
break;
4195 const TargetRegisterClass *RC =
4196 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4198 Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Mask);
4199 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
4201 Register ResultReg = fastEmitInst_rr(
Opc, RC, Op0Reg, Op1Reg);
4203 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
4208 uint64_t Shift,
bool IsZExt) {
4210 "Unexpected source/return type pair.");
4211 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4212 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4213 "Unexpected source value type.");
4214 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4215 RetVT == MVT::i64) &&
"Unexpected return value type.");
4217 bool Is64Bit = (RetVT == MVT::i64);
4218 unsigned RegSize = Is64Bit ? 64 : 32;
4221 const TargetRegisterClass *RC =
4222 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4226 if (RetVT == SrcVT) {
4227 Register ResultReg = createResultReg(RC);
4228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4229 TII.get(TargetOpcode::COPY), ResultReg)
4233 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4237 if (Shift >= DstBits)
4265 if (Shift >= SrcBits && IsZExt)
4266 return materializeInt(ConstantInt::get(*
Context, APInt(
RegSize, 0)), RetVT);
4271 Op0 = emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4279 unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
4280 unsigned ImmS = SrcBits - 1;
4281 static const unsigned OpcTable[2][2] = {
4282 {AArch64::SBFMWri, AArch64::SBFMXri},
4283 {AArch64::UBFMWri, AArch64::UBFMXri}
4285 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4286 if (SrcVT.
SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4289 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4292 .
addImm(AArch64::sub_32);
4295 return fastEmitInst_rii(
Opc, RC, Op0, ImmR, ImmS);
4301 bool NeedTrunc =
false;
4306 case MVT::i8:
Opc = AArch64::ASRVWr; NeedTrunc =
true;
Mask = 0xff;
break;
4307 case MVT::i16:
Opc = AArch64::ASRVWr; NeedTrunc =
true;
Mask = 0xffff;
break;
4308 case MVT::i32:
Opc = AArch64::ASRVWr;
break;
4309 case MVT::i64:
Opc = AArch64::ASRVXr;
break;
4312 const TargetRegisterClass *RC =
4313 (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4315 Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32,
false);
4316 Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
4318 Register ResultReg = fastEmitInst_rr(
Opc, RC, Op0Reg, Op1Reg);
4320 ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
4325 uint64_t Shift,
bool IsZExt) {
4327 "Unexpected source/return type pair.");
4328 assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 ||
4329 SrcVT == MVT::i32 || SrcVT == MVT::i64) &&
4330 "Unexpected source value type.");
4331 assert((RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32 ||
4332 RetVT == MVT::i64) &&
"Unexpected return value type.");
4334 bool Is64Bit = (RetVT == MVT::i64);
4335 unsigned RegSize = Is64Bit ? 64 : 32;
4338 const TargetRegisterClass *RC =
4339 Is64Bit ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4343 if (RetVT == SrcVT) {
4344 Register ResultReg = createResultReg(RC);
4345 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4346 TII.get(TargetOpcode::COPY), ResultReg)
4350 return emitIntExt(SrcVT, Op0, RetVT, IsZExt);
4354 if (Shift >= DstBits)
4382 if (Shift >= SrcBits && IsZExt)
4383 return materializeInt(ConstantInt::get(*
Context, APInt(
RegSize, 0)), RetVT);
4385 unsigned ImmR = std::min<unsigned>(SrcBits - 1, Shift);
4386 unsigned ImmS = SrcBits - 1;
4387 static const unsigned OpcTable[2][2] = {
4388 {AArch64::SBFMWri, AArch64::SBFMXri},
4389 {AArch64::UBFMWri, AArch64::UBFMXri}
4391 unsigned Opc = OpcTable[IsZExt][Is64Bit];
4392 if (SrcVT.
SimpleTy <= MVT::i32 && RetVT == MVT::i64) {
4394 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4395 TII.get(AArch64::SUBREG_TO_REG), TmpReg)
4398 .
addImm(AArch64::sub_32);
4401 return fastEmitInst_rii(
Opc, RC, Op0, ImmR, ImmS);
4404Register AArch64FastISel::emitIntExt(MVT SrcVT,
Register SrcReg, MVT DestVT,
4406 assert(DestVT != MVT::i1 &&
"ZeroExt/SignExt an i1?");
4412 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) &&
4413 (DestVT != MVT::i32) && (DestVT != MVT::i64)) ||
4414 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) &&
4415 (SrcVT != MVT::i16) && (SrcVT != MVT::i32)))
4425 return emiti1Ext(SrcReg, DestVT, IsZExt);
4427 if (DestVT == MVT::i64)
4428 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4430 Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
4434 if (DestVT == MVT::i64)
4435 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4437 Opc = IsZExt ? AArch64::UBFMWri : AArch64::SBFMWri;
4441 assert(DestVT == MVT::i64 &&
"IntExt i32 to i32?!?");
4442 Opc = IsZExt ? AArch64::UBFMXri : AArch64::SBFMXri;
4448 if (DestVT == MVT::i8 || DestVT == MVT::i16)
4450 else if (DestVT == MVT::i64) {
4451 Register Src64 =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
4452 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4453 TII.get(AArch64::SUBREG_TO_REG), Src64)
4456 .
addImm(AArch64::sub_32);
4460 const TargetRegisterClass *RC =
4461 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4462 return fastEmitInst_rii(
Opc, RC, SrcReg, 0, Imm);
4469 case AArch64::LDURBBi:
4470 case AArch64::LDURHHi:
4471 case AArch64::LDURWi:
4472 case AArch64::LDRBBui:
4473 case AArch64::LDRHHui:
4474 case AArch64::LDRWui:
4475 case AArch64::LDRBBroX:
4476 case AArch64::LDRHHroX:
4477 case AArch64::LDRWroX:
4478 case AArch64::LDRBBroW:
4479 case AArch64::LDRHHroW:
4480 case AArch64::LDRWroW:
4489 case AArch64::LDURSBWi:
4490 case AArch64::LDURSHWi:
4491 case AArch64::LDURSBXi:
4492 case AArch64::LDURSHXi:
4493 case AArch64::LDURSWi:
4494 case AArch64::LDRSBWui:
4495 case AArch64::LDRSHWui:
4496 case AArch64::LDRSBXui:
4497 case AArch64::LDRSHXui:
4498 case AArch64::LDRSWui:
4499 case AArch64::LDRSBWroX:
4500 case AArch64::LDRSHWroX:
4501 case AArch64::LDRSBXroX:
4502 case AArch64::LDRSHXroX:
4503 case AArch64::LDRSWroX:
4504 case AArch64::LDRSBWroW:
4505 case AArch64::LDRSHWroW:
4506 case AArch64::LDRSBXroW:
4507 case AArch64::LDRSHXroW:
4508 case AArch64::LDRSWroW:
4513bool AArch64FastISel::optimizeIntExtLoad(
const Instruction *
I, MVT RetVT,
4516 if (!LI || !LI->hasOneUse())
4524 MachineInstr *
MI =
MRI.getUniqueVRegDef(
Reg);
4531 const auto *LoadMI =
MI;
4532 if (LoadMI->getOpcode() == TargetOpcode::COPY &&
4533 LoadMI->getOperand(1).getSubReg() == AArch64::sub_32) {
4534 Register LoadReg =
MI->getOperand(1).getReg();
4535 LoadMI =
MRI.getUniqueVRegDef(LoadReg);
4536 assert(LoadMI &&
"Expected valid instruction");
4542 if (RetVT != MVT::i64 || SrcVT > MVT::i32) {
4543 updateValueMap(
I,
Reg);
4548 Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
4549 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4550 TII.get(AArch64::SUBREG_TO_REG), Reg64)
4553 .
addImm(AArch64::sub_32);
4556 assert((
MI->getOpcode() == TargetOpcode::COPY &&
4557 MI->getOperand(1).getSubReg() == AArch64::sub_32) &&
4558 "Expected copy instruction");
4559 Reg =
MI->getOperand(1).getReg();
4561 removeDeadCode(
I, std::next(
I));
4563 updateValueMap(
I,
Reg);
4567bool AArch64FastISel::selectIntExt(
const Instruction *
I) {
4569 "Unexpected integer extend instruction.");
4572 if (!isTypeSupported(
I->getType(), RetVT))
4575 if (!isTypeSupported(
I->getOperand(0)->getType(), SrcVT))
4579 if (optimizeIntExtLoad(
I, RetVT, SrcVT))
4582 Register SrcReg = getRegForValue(
I->getOperand(0));
4589 if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) {
4590 if (RetVT == MVT::i64 && SrcVT != MVT::i64) {
4591 Register ResultReg = createResultReg(&AArch64::GPR64RegClass);
4592 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4593 TII.get(AArch64::SUBREG_TO_REG), ResultReg)
4596 .
addImm(AArch64::sub_32);
4600 updateValueMap(
I, SrcReg);
4605 Register ResultReg = emitIntExt(SrcVT, SrcReg, RetVT, IsZExt);
4609 updateValueMap(
I, ResultReg);
4613bool AArch64FastISel::selectRem(
const Instruction *
I,
unsigned ISDOpcode) {
4614 EVT DestEVT = TLI.getValueType(
DL,
I->getType(),
true);
4619 if (DestVT != MVT::i64 && DestVT != MVT::i32)
4623 bool Is64bit = (DestVT == MVT::i64);
4624 switch (ISDOpcode) {
4628 DivOpc = Is64bit ? AArch64::SDIVXr : AArch64::SDIVWr;
4631 DivOpc = Is64bit ? AArch64::UDIVXr : AArch64::UDIVWr;
4634 unsigned MSubOpc = Is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
4635 Register Src0Reg = getRegForValue(
I->getOperand(0));
4639 Register Src1Reg = getRegForValue(
I->getOperand(1));
4643 const TargetRegisterClass *RC =
4644 (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
4645 Register QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg);
4646 assert(QuotReg &&
"Unexpected DIV instruction emission failure.");
4649 Register ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg);
4650 updateValueMap(
I, ResultReg);
4654bool AArch64FastISel::selectMul(
const Instruction *
I) {
4656 if (!isTypeSupported(
I->getType(), VT,
true))
4662 const Value *Src0 =
I->getOperand(0);
4663 const Value *Src1 =
I->getOperand(1);
4665 if (
C->getValue().isPowerOf2())
4670 if (
C->getValue().isPowerOf2()) {
4671 uint64_t ShiftVal =
C->getValue().logBase2();
4677 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), VT)) {
4680 Src0 = ZExt->getOperand(0);
4686 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), VT)) {
4689 Src0 = SExt->getOperand(0);
4694 Register Src0Reg = getRegForValue(Src0);
4698 Register ResultReg = emitLSL_ri(VT, SrcVT, Src0Reg, ShiftVal, IsZExt);
4701 updateValueMap(
I, ResultReg);
4706 Register Src0Reg = getRegForValue(
I->getOperand(0));
4710 Register Src1Reg = getRegForValue(
I->getOperand(1));
4714 Register ResultReg = emitMul_rr(VT, Src0Reg, Src1Reg);
4719 updateValueMap(
I, ResultReg);
4723bool AArch64FastISel::selectShift(
const Instruction *
I) {
4725 if (!isTypeSupported(
I->getType(), RetVT,
true))
4729 return selectOperator(
I,
I->getOpcode());
4733 uint64_t ShiftVal =
C->getZExtValue();
4735 bool IsZExt =
I->getOpcode() != Instruction::AShr;
4736 const Value *Op0 =
I->getOperand(0);
4740 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), TmpVT)) {
4743 Op0 = ZExt->getOperand(0);
4749 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), TmpVT)) {
4752 Op0 = SExt->getOperand(0);
4757 Register Op0Reg = getRegForValue(Op0);
4761 switch (
I->getOpcode()) {
4763 case Instruction::Shl:
4764 ResultReg = emitLSL_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt);
4766 case Instruction::AShr:
4767 ResultReg = emitASR_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt);
4769 case Instruction::LShr:
4770 ResultReg = emitLSR_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt);
4776 updateValueMap(
I, ResultReg);
4780 Register Op0Reg = getRegForValue(
I->getOperand(0));
4784 Register Op1Reg = getRegForValue(
I->getOperand(1));
4789 switch (
I->getOpcode()) {
4791 case Instruction::Shl:
4792 ResultReg = emitLSL_rr(RetVT, Op0Reg, Op1Reg);
4794 case Instruction::AShr:
4795 ResultReg = emitASR_rr(RetVT, Op0Reg, Op1Reg);
4797 case Instruction::LShr:
4798 ResultReg = emitLSR_rr(RetVT, Op0Reg, Op1Reg);
4805 updateValueMap(
I, ResultReg);
4809bool AArch64FastISel::selectBitCast(
const Instruction *
I) {
4812 if (!isTypeLegal(
I->getOperand(0)->getType(), SrcVT))
4814 if (!isTypeLegal(
I->getType(), RetVT))
4818 if (RetVT == MVT::f32 && SrcVT == MVT::i32)
4819 Opc = AArch64::FMOVWSr;
4820 else if (RetVT == MVT::f64 && SrcVT == MVT::i64)
4821 Opc = AArch64::FMOVXDr;
4822 else if (RetVT == MVT::i32 && SrcVT == MVT::f32)
4823 Opc = AArch64::FMOVSWr;
4824 else if (RetVT == MVT::i64 && SrcVT == MVT::f64)
4825 Opc = AArch64::FMOVDXr;
4829 const TargetRegisterClass *RC =
nullptr;
4832 case MVT::i32: RC = &AArch64::GPR32RegClass;
break;
4833 case MVT::i64: RC = &AArch64::GPR64RegClass;
break;
4834 case MVT::f32: RC = &AArch64::FPR32RegClass;
break;
4835 case MVT::f64: RC = &AArch64::FPR64RegClass;
break;
4837 Register Op0Reg = getRegForValue(
I->getOperand(0));
4841 Register ResultReg = fastEmitInst_r(
Opc, RC, Op0Reg);
4845 updateValueMap(
I, ResultReg);
4849bool AArch64FastISel::selectFRem(
const Instruction *
I) {
4851 if (!isTypeLegal(
I->getType(), RetVT))
4859 LC = RTLIB::REM_F32;
4862 LC = RTLIB::REM_F64;
4867 Args.reserve(
I->getNumOperands());
4870 for (
auto &Arg :
I->operands())
4871 Args.emplace_back(Arg);
4873 CallLoweringInfo CLI;
4874 MCContext &Ctx = MF->getContext();
4875 CLI.setCallee(
DL, Ctx, TLI.getLibcallCallingConv(LC),
I->getType(),
4876 TLI.getLibcallName(LC), std::move(Args));
4877 if (!lowerCallTo(CLI))
4879 updateValueMap(
I, CLI.ResultReg);
4883bool AArch64FastISel::selectSDiv(
const Instruction *
I) {
4885 if (!isTypeLegal(
I->getType(), VT))
4892 if ((VT != MVT::i32 && VT != MVT::i64) || !
C ||
4893 !(
C.isPowerOf2() ||
C.isNegatedPowerOf2()))
4896 unsigned Lg2 =
C.countr_zero();
4897 Register Src0Reg = getRegForValue(
I->getOperand(0));
4902 Register ResultReg = emitASR_ri(VT, VT, Src0Reg, Lg2);
4905 updateValueMap(
I, ResultReg);
4909 int64_t Pow2MinusOne = (1ULL << Lg2) - 1;
4910 Register AddReg = emitAdd_ri_(VT, Src0Reg, Pow2MinusOne);
4915 if (!emitICmp_ri(VT, Src0Reg, 0))
4919 const TargetRegisterClass *RC;
4920 if (VT == MVT::i64) {
4921 SelectOpc = AArch64::CSELXr;
4922 RC = &AArch64::GPR64RegClass;
4924 SelectOpc = AArch64::CSELWr;
4925 RC = &AArch64::GPR32RegClass;
4927 Register SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg,
4934 Register ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
4937 ResultReg = emitAddSub_rs(
false, VT, ZeroReg, SelectReg,
4940 ResultReg = emitASR_ri(VT, VT, SelectReg, Lg2);
4945 updateValueMap(
I, ResultReg);
4952Register AArch64FastISel::getRegForGEPIndex(
const Value *Idx) {
4953 Register IdxN = getRegForValue(Idx);
4959 MVT PtrVT = TLI.getPointerTy(
DL);
4961 if (IdxVT.
bitsLT(PtrVT)) {
4962 IdxN = emitIntExt(IdxVT.
getSimpleVT(), IdxN, PtrVT,
false);
4963 }
else if (IdxVT.
bitsGT(PtrVT))
4964 llvm_unreachable(
"AArch64 FastISel doesn't support types larger than i64");
4972bool AArch64FastISel::selectGetElementPtr(
const Instruction *
I) {
4976 Register N = getRegForValue(
I->getOperand(0));
4982 uint64_t TotalOffs = 0;
4983 MVT VT = TLI.getPointerTy(
DL);
4986 const Value *Idx = GTI.getOperand();
4987 if (
auto *StTy = GTI.getStructTypeOrNull()) {
4991 TotalOffs +=
DL.getStructLayout(StTy)->getElementOffset(
Field);
4998 TotalOffs += GTI.getSequentialElementStride(
DL) *
5003 N = emitAdd_ri_(VT,
N, TotalOffs);
5010 uint64_t ElementSize = GTI.getSequentialElementStride(
DL);
5011 Register IdxN = getRegForGEPIndex(Idx);
5015 if (ElementSize != 1) {
5019 IdxN = emitMul_rr(VT, IdxN,
C);
5029 N = emitAdd_ri_(VT,
N, TotalOffs);
5033 updateValueMap(
I,
N);
5037bool AArch64FastISel::selectAtomicCmpXchg(
const AtomicCmpXchgInst *
I) {
5038 assert(
TM.getOptLevel() == CodeGenOptLevel::None &&
5039 "cmpxchg survived AtomicExpand at optlevel > -O0");
5042 Type *RetTy = RetPairTy->getTypeAtIndex(0U);
5043 assert(RetPairTy->getTypeAtIndex(1U)->isIntegerTy(1) &&
5044 "cmpxchg has a non-i1 status result");
5047 if (!isTypeLegal(RetTy, VT))
5050 const TargetRegisterClass *ResRC;
5051 unsigned Opc, CmpOpc;
5054 if (VT == MVT::i32) {
5055 Opc = AArch64::CMP_SWAP_32;
5056 CmpOpc = AArch64::SUBSWrs;
5057 ResRC = &AArch64::GPR32RegClass;
5058 }
else if (VT == MVT::i64) {
5059 Opc = AArch64::CMP_SWAP_64;
5060 CmpOpc = AArch64::SUBSXrs;
5061 ResRC = &AArch64::GPR64RegClass;
5066 const MCInstrDesc &
II =
TII.get(
Opc);
5068 Register AddrReg = getRegForValue(
I->getPointerOperand());
5069 Register DesiredReg = getRegForValue(
I->getCompareOperand());
5070 Register NewReg = getRegForValue(
I->getNewValOperand());
5072 if (!AddrReg || !DesiredReg || !NewReg)
5079 const Register ResultReg1 = createResultReg(ResRC);
5080 const Register ResultReg2 = createResultReg(&AArch64::GPR32RegClass);
5081 const Register ScratchReg = createResultReg(&AArch64::GPR32RegClass);
5084 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
II)
5091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(CmpOpc))
5092 .
addDef(VT == MVT::i32 ? AArch64::WZR : AArch64::XZR)
5097 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
TII.get(AArch64::CSINCWr))
5103 assert((ResultReg1 + 1) == ResultReg2 &&
"Nonconsecutive result registers.");
5104 updateValueMap(
I, ResultReg1, 2);
5108bool AArch64FastISel::fastSelectInstruction(
const Instruction *
I) {
5109 if (TLI.fallBackToDAGISel(*
I))
5111 switch (
I->getOpcode()) {
5114 case Instruction::Add:
5115 case Instruction::Sub:
5116 return selectAddSub(
I);
5117 case Instruction::Mul:
5118 return selectMul(
I);
5119 case Instruction::SDiv:
5120 return selectSDiv(
I);
5121 case Instruction::SRem:
5125 case Instruction::URem:
5129 case Instruction::Shl:
5130 case Instruction::LShr:
5131 case Instruction::AShr:
5132 return selectShift(
I);
5133 case Instruction::And:
5134 case Instruction::Or:
5135 case Instruction::Xor:
5136 return selectLogicalOp(
I);
5137 case Instruction::Br:
5138 return selectBranch(
I);
5139 case Instruction::IndirectBr:
5140 return selectIndirectBr(
I);
5141 case Instruction::BitCast:
5143 return selectBitCast(
I);
5145 case Instruction::FPToSI:
5147 return selectFPToInt(
I,
true);
5149 case Instruction::FPToUI:
5150 return selectFPToInt(
I,
false);
5151 case Instruction::ZExt:
5152 case Instruction::SExt:
5153 return selectIntExt(
I);
5154 case Instruction::Trunc:
5156 return selectTrunc(
I);
5158 case Instruction::FPExt:
5159 return selectFPExt(
I);
5160 case Instruction::FPTrunc:
5161 return selectFPTrunc(
I);
5162 case Instruction::SIToFP:
5164 return selectIntToFP(
I,
true);
5166 case Instruction::UIToFP:
5167 return selectIntToFP(
I,
false);
5168 case Instruction::Load:
5169 return selectLoad(
I);
5170 case Instruction::Store:
5171 return selectStore(
I);
5172 case Instruction::FCmp:
5173 case Instruction::ICmp:
5174 return selectCmp(
I);
5175 case Instruction::Select:
5176 return selectSelect(
I);
5177 case Instruction::Ret:
5178 return selectRet(
I);
5179 case Instruction::FRem:
5180 return selectFRem(
I);
5181 case Instruction::GetElementPtr:
5182 return selectGetElementPtr(
I);
5183 case Instruction::AtomicCmpXchg:
5188 return selectOperator(
I,
I->getOpcode());
5201 return new AArch64FastISel(FuncInfo, LibInfo);
unsigned const MachineRegisterInfo * MRI
static bool isIntExtFree(const Instruction *I)
Check if the sign-/zero-extend will be a noop.
static bool isSExtLoad(const MachineInstr *LI)
static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred)
static bool isMulPowOf2(const Value *I)
Check if the multiply is by a power-of-2 constant.
static unsigned getImplicitScaleFactor(MVT VT)
Determine the implicit scale factor that is applied by a memory operation for a given value type.
static bool isZExtLoad(const MachineInstr *LI)
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the FastISel class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
SI Pre allocate WWM Registers
This file defines the SmallVector class.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static const unsigned FramePtr
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
Register getFrameRegister(const MachineFunction &MF) const override
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool useSmallAddressing() const
bool isTargetDarwin() const
bool isTargetILP32() const
bool isTargetMachO() const
unsigned ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
bool isLittleEndian() const
bool isWindowsArm64EC() const
bool hasCustomCallingConv() const
PointerType * getType() const
Overload to return most specific pointer type.
InstListType::const_iterator const_iterator
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Register getLocReg() const
LocInfo getLocInfo() const
unsigned getValNo() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
const APFloat & getValueAPF() const
bool isNegative() const
Return true if the sign bit is set.
bool isZero() const
Return true if the value is positive or negative zero.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
constexpr bool isVector() const
One or more elements.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool selectBitCast(const User *I)
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
PointerType * getType() const
Global values are always pointers.
iterator_range< succ_op_iterator > successors()
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool isVector() const
Return true if this is a vector value type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
bool is64BitVector() const
Return true if this is a 64-bit vector type.
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
unsigned getDestAddressSpace() const
constexpr unsigned id() const
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingCompatibleInterface() const
bool hasAgnosticZAInterface() const
bool hasStreamingInterfaceOrBody() const
void reserve(size_type N)
void push_back(const T &Elt)
TypeSize getElementOffset(unsigned Idx) const
Provides information about what library functions are available for the current target.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
bool CC_AArch64_Win64PCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool CC_AArch64_DarwinPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
bool isReleaseOrStronger(AtomicOrdering AO)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool CC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool CC_AArch64_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
bool RetCC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
bool CC_AArch64_Win64_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.