25 using namespace LegalizeActions;
39 for (
unsigned i = 0; i < v.size(); ++i) {
40 result.push_back(v[i]);
41 if (i + 1 < v[i].first && i + 1 < v.size() &&
42 v[i + 1].first != v[i].first + 1)
43 result.push_back({v[i].first + 1, Unsupported});
57 auto Largest = result.back().first;
63 return ST.isTargetAEABI() ||
ST.isTargetGNUAEABI() ||
ST.isTargetMuslAEABI();
67 using namespace TargetOpcode;
77 if (
ST.isThumb1Only()) {
84 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
85 .legalForCartesianProduct({s8, s16, s32}, {s1, s8, s16});
87 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
89 getActionDefinitionsBuilder({G_MUL, G_AND, G_OR, G_XOR})
91 .clampScalar(0, s32, s32);
94 getActionDefinitionsBuilder({G_ADD, G_SUB})
98 getActionDefinitionsBuilder({G_ADD, G_SUB})
102 getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL})
103 .legalFor({{s32, s32}})
105 .clampScalar(1, s32, s32);
107 bool HasHWDivide = (!
ST.isThumb() &&
ST.hasDivideInARMMode()) ||
108 (
ST.isThumb() &&
ST.hasDivideInThumbMode());
110 getActionDefinitionsBuilder({G_SDIV, G_UDIV})
112 .clampScalar(0, s32, s32);
114 getActionDefinitionsBuilder({G_SDIV, G_UDIV})
116 .clampScalar(0, s32, s32);
118 for (
unsigned Op : {G_SREM, G_UREM}) {
119 setLegalizeScalarToDifferentSizeStrategy(
Op, 0,
widen_8_16);
128 getActionDefinitionsBuilder(G_INTTOPTR)
129 .legalFor({{p0, s32}})
131 getActionDefinitionsBuilder(G_PTRTOINT)
132 .legalFor({{s32, p0}})
135 getActionDefinitionsBuilder(G_CONSTANT)
137 .clampScalar(0, s32, s32);
139 getActionDefinitionsBuilder(G_ICMP)
140 .legalForCartesianProduct({s1}, {s32, p0})
143 getActionDefinitionsBuilder(G_SELECT)
144 .legalForCartesianProduct({s32, p0}, {s1})
149 auto &LoadStoreBuilder = getActionDefinitionsBuilder({G_LOAD, G_STORE})
150 .legalForTypesWithMemDesc({{s1, p0, 8, 8},
155 .unsupportedIfMemSizeNotPow2();
157 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
158 getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
161 getActionDefinitionsBuilder(G_PHI)
165 getActionDefinitionsBuilder(G_PTR_ADD)
166 .legalFor({{p0, s32}})
169 getActionDefinitionsBuilder(G_BRCOND).legalFor({s1});
171 if (!
ST.useSoftFloat() &&
ST.hasVFP2Base()) {
172 getActionDefinitionsBuilder(
173 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG})
174 .legalFor({s32, s64});
177 .legalForTypesWithMemDesc({{s64, p0, 64, 32}})
179 PhiBuilder.legalFor({s64});
181 getActionDefinitionsBuilder(G_FCMP).legalForCartesianProduct({s1},
184 getActionDefinitionsBuilder(G_MERGE_VALUES).legalFor({{s64, s32}});
185 getActionDefinitionsBuilder(G_UNMERGE_VALUES).legalFor({{s32, s64}});
187 getActionDefinitionsBuilder(G_FPEXT).legalFor({{s64, s32}});
188 getActionDefinitionsBuilder(G_FPTRUNC).legalFor({{s32, s64}});
190 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
191 .legalForCartesianProduct({s32}, {s32, s64});
192 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
193 .legalForCartesianProduct({s32, s64}, {s32});
195 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV})
196 .libcallFor({s32, s64});
198 LoadStoreBuilder.maxScalar(0, s32);
200 for (
auto Ty : {s32, s64})
201 setAction({G_FNEG, Ty},
Lower);
203 getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64});
205 getActionDefinitionsBuilder(G_FCMP).customForCartesianProduct({s1},
209 setFCmpLibcallsAEABI();
211 setFCmpLibcallsGNU();
213 getActionDefinitionsBuilder(G_FPEXT).libcallFor({{s64, s32}});
214 getActionDefinitionsBuilder(G_FPTRUNC).libcallFor({{s32, s64}});
216 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
217 .libcallForCartesianProduct({s32}, {s32, s64});
218 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
219 .libcallForCartesianProduct({s32, s64}, {s32});
222 if (!
ST.useSoftFloat() &&
ST.hasVFP4Base())
223 getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64});
225 getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64});
227 getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
229 if (
ST.hasV5TOps()) {
230 getActionDefinitionsBuilder(G_CTLZ)
231 .legalFor({s32, s32})
232 .clampScalar(1, s32, s32)
233 .clampScalar(0, s32, s32);
234 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
235 .lowerFor({s32, s32})
236 .clampScalar(1, s32, s32)
237 .clampScalar(0, s32, s32);
239 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
240 .libcallFor({s32, s32})
241 .clampScalar(1, s32, s32)
242 .clampScalar(0, s32, s32);
243 getActionDefinitionsBuilder(G_CTLZ)
244 .lowerFor({s32, s32})
245 .clampScalar(1, s32, s32)
246 .clampScalar(0, s32, s32);
253 void ARMLegalizerInfo::setFCmpLibcallsAEABI() {
309 void ARMLegalizerInfo::setFCmpLibcallsGNU() {
351 unsigned Size)
const {
362 using namespace TargetOpcode;
368 switch (
MI.getOpcode()) {
373 Register OriginalResult =
MI.getOperand(0).getReg();
374 auto Size =
MRI.getType(OriginalResult).getSizeInBits();
379 MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
389 {{
MI.getOperand(1).getReg(), ArgTy},
390 {
MI.getOperand(2).getReg(), ArgTy}});
397 MRI.getType(
MI.getOperand(3).getReg()) &&
398 "Mismatched operands for G_FCMP");
401 auto OriginalResult =
MI.getOperand(0).getReg();
403 static_cast<CmpInst::Predicate>(
MI.getOperand(1).getPredicate());
404 auto Libcalls = getFCmpLibcalls(
Predicate, OpSize);
406 if (Libcalls.empty()) {
409 "Predicate needs libcalls, but none specified");
412 MI.eraseFromParent();
416 assert((OpSize == 32 || OpSize == 64) &&
"Unsupported operand size");
421 for (
auto Libcall : Libcalls) {
422 auto LibcallResult =
MRI.createGenericVirtualRegister(
LLT::scalar(32));
425 {{MI.getOperand(2).getReg(), ArgTy},
426 {MI.getOperand(3).getReg(), ArgTy}});
431 auto ProcessedResult =
434 :
MRI.createGenericVirtualRegister(
MRI.getType(OriginalResult));
443 MIRBuilder.
buildTrunc(ProcessedResult, LibcallResult);
448 MIRBuilder.
buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero);
450 Results.push_back(ProcessedResult);
462 MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt();
469 MI.eraseFromParent();
bool isFPPredicate() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
static Type * getDoubleTy(LLVMContext &C)
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
This class represents lattice values for constants.
1 1 1 1 Always true (always folded)
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
The operation should be implemented in terms of a wider scalar base-type.
unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const
Get the size in bits of Reg.
0 1 0 0 True if ordered and less than
static bool AEABI(const ARMSubtarget &ST)
std::vector< SizeAndAction > SizeAndActionsVec
Function Alias Analysis Results
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_OR Op0, Op1.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
static Type * getFloatTy(LLVMContext &C)
Class to represent struct types.
0 0 0 1 True if ordered and equal
LegalizerHelper::LegalizeResult createLibcall(MachineIRBuilder &MIRBuilder, const char *Name, const CallLowering::ArgInfo &Result, ArrayRef< CallLowering::ArgInfo > Args, CallingConv::ID CC)
Helper function that creates a libcall to the given Name using the given calling convention CC.
This operation is completely unsupported on the target.
0 1 1 1 True if ordered (no nans)
MachineFunction & getMF()
Getter for the function we currently build.
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
The operation itself must be expressed in terms of simpler actions on this target.
static LegalizerInfo::SizeAndActionsVec widen_8_16(const LegalizerInfo::SizeAndActionsVec &v)
MachineRegisterInfo * getMRI()
Getter for MRI.
unsigned const MachineRegisterInfo * MRI
The instances of the Type class are immutable: once they are created, they are never changed.
This is an important class for using LLVM in a threaded context.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override
Called for instructions with the Custom LegalizationAction.
This file declares the targeting of the Machinelegalizer class for ARM.
Helper class to build MachineInstr.
1 0 0 1 True if unordered or equal
1 0 1 1 True if unordered, greater than, or equal
0 0 1 0 True if ordered and greater than
ARMLegalizerInfo(const ARMSubtarget &ST)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
1 1 0 0 True if unordered or less than
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool verify(const TargetRegisterInfo &TRI) const
Check that information hold by this instance make sense for the given TRI.
1 1 0 1 True if unordered, less than, or equal
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
The operation should be implemented as a call to some kind of runtime support library.
The target wants to do something special with this combination of operand and type.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
bool isIntPredicate() const
0 1 1 0 True if ordered and operands are unequal
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation of each machine instruction.
Instruction has been legalized and the MachineFunction changed.
static IntegerType * getInt32Ty(LLVMContext &C)
1 0 1 0 True if unordered or greater than
static void addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result, const LegalizerInfo::SizeAndActionsVec &v)
FIXME: The following static functions are SizeChangeStrategy functions that are meant to temporarily ...
0 0 1 1 True if ordered and greater than or equal
0 1 0 1 True if ordered and less than or equal
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
0 0 0 0 Always false (always folded)
This file describes how to lower LLVM calls to machine code calls.
Wrapper class representing virtual and physical registers.
1 1 1 0 True if unordered or not equal