26using namespace TargetOpcode;
27using namespace LegalizeActions;
28using namespace LegalityPredicates;
34 bool Is64Bit = Subtarget.is64Bit();
36 bool HasSSE1 = Subtarget.
hasSSE1();
37 bool HasSSE2 = Subtarget.
hasSSE2();
38 bool HasSSE41 = Subtarget.
hasSSE41();
39 bool HasAVX = Subtarget.
hasAVX();
40 bool HasAVX2 = Subtarget.
hasAVX2();
42 bool HasVLX = Subtarget.hasVLX();
43 bool HasDQI = Subtarget.
hasAVX512() && Subtarget.hasDQI();
44 bool HasBWI = Subtarget.
hasAVX512() && Subtarget.hasBWI();
45 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
55 const LLT sMaxScalar = Subtarget.is64Bit() ? s64 : s32;
77 const LLT s8MaxVector = HasAVX512 ? v64s8 : HasAVX ? v32s8 : v16s8;
78 const LLT s16MaxVector = HasAVX512 ? v32s16 : HasAVX ? v16s16 : v8s16;
79 const LLT s32MaxVector = HasAVX512 ? v16s32 : HasAVX ? v8s32 : v4s32;
80 const LLT s64MaxVector = HasAVX512 ? v8s64 : HasAVX ? v4s64 : v2s64;
90 return typeInSet(0, {p0, s1, s8, s16, s32, s64})(Query) ||
96 return typeInSet(0, {p0, s8, s16, s32})(Query) ||
99 .widenScalarToNextPow2(0, 8)
103 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
104 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
105 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
112 switch (Q.
Types[BigTyIdx].getSizeInBits()) {
123 switch (Q.
Types[LitTyIdx].getSizeInBits()) {
142 if (Is64Bit &&
typeInSet(0, {s64})(Query))
144 if (HasSSE2 &&
typeInSet(0, {v16s8, v8s16, v4s32, v2s64})(Query))
146 if (HasAVX2 &&
typeInSet(0, {v32s8, v16s16, v8s32, v4s64})(Query))
148 if (HasAVX512 &&
typeInSet(0, {v16s32, v8s64})(Query))
150 if (HasBWI &&
typeInSet(0, {v64s8, v32s16})(Query))
154 .clampMinNumElements(0, s8, 16)
168 return typePairInSet(0, 1, {{s8, s1}, {s16, s1}, {s32, s1}})(Query) ||
171 .widenScalarToNextPow2(0, 32)
181 if (Is64Bit &&
typeInSet(0, {s64})(Query))
183 if (HasSSE2 &&
typeInSet(0, {v8s16})(Query))
185 if (HasSSE41 &&
typeInSet(0, {v4s32})(Query))
187 if (HasAVX2 &&
typeInSet(0, {v16s16, v8s32})(Query))
189 if (HasAVX512 &&
typeInSet(0, {v16s32})(Query))
191 if (HasDQI &&
typeInSet(0, {v8s64})(Query))
193 if (HasDQI && HasVLX &&
typeInSet(0, {v2s64, v4s64})(Query))
195 if (HasBWI &&
typeInSet(0, {v32s16})(Query))
199 .clampMinNumElements(0, s16, 8)
211 return typeInSet(0, {s8, s16, s32})(Query) ||
214 .widenScalarToNextPow2(0, 32)
221 return typeInSet(0, {s8, s16, s32})(Query) ||
225 .clampScalar(0, s8, sMaxScalar);
230 return typePairInSet(0, 1, {{s8, s8}, {s16, s8}, {s32, s8}})(Query) ||
233 .clampScalar(0, s8, sMaxScalar)
241 if (Is64Bit &&
typeInSet(0, {s64})(Query))
243 if (HasSSE2 &&
typeInSet(0, {v16s8, v8s16, v4s32, v2s64})(Query))
245 if (HasAVX &&
typeInSet(0, {v32s8, v16s16, v8s32, v4s64})(Query))
247 if (HasAVX512 &&
typeInSet(0, {v64s8, v32s16, v16s32, v8s64})(Query))
251 .clampMinNumElements(0, s8, 16)
264 const std::initializer_list<LLT> IntTypes32 = {s8, s16, s32, p0};
265 const std::initializer_list<LLT> IntTypes64 = {s8, s16, s32, s64, p0};
269 .clampScalar(0, s8, s8)
275 return Query.
Types[0] == s32 ||
276 (Subtarget.is64Bit() && Query.
Types[0] == s64);
278 .widenScalarToNextPow2(0, 32)
284 return Subtarget.hasPOPCNT() &&
288 .widenScalarToNextPow2(1, 16)
295 return Subtarget.hasLZCNT() &&
299 .widenScalarToNextPow2(1, 16)
306 return (Query.
Opcode == G_CTTZ_ZERO_UNDEF || Subtarget.hasBMI()) &&
310 .widenScalarToNextPow2(1, 16)
317 return typeInSet(0, {s8, s16, s32, p0})(Query) ||
318 (UseX87 &&
typeIs(0, s80)(Query)) ||
319 (Is64Bit &&
typeIs(0, s64)(Query)) ||
320 (HasSSE1 &&
typeInSet(0, {v16s8, v8s16, v4s32, v2s64})(Query)) ||
321 (HasAVX &&
typeInSet(0, {v32s8, v16s16, v8s32, v4s64})(Query)) ||
323 typeInSet(0, {v64s8, v32s16, v16s32, v8s64})(Query));
325 .clampMinNumElements(0, s8, 16)
330 .clampMaxNumElements(0, s16, HasAVX512 ? 32 : (HasAVX ? 16 : 8))
331 .clampMaxNumElements(0, s32, HasAVX512 ? 16 : (HasAVX ? 8 : 4))
332 .clampMaxNumElements(0, s64, HasAVX512 ? 8 : (HasAVX ? 4 : 2))
333 .widenScalarToNextPow2(0, 32)
340 const std::initializer_list<LLT> PtrTypes32 = {s1, s8, s16, s32};
341 const std::initializer_list<LLT> PtrTypes64 = {s1, s8, s16, s32, s64};
345 .maxScalar(0, sMaxScalar)
357 .widenScalarToNextPow2(1, 32)
363 for (
unsigned Op : {G_LOAD, G_STORE}) {
365 Action.legalForTypesWithMemDesc({{s8, p0, s1, 1},
374 {v4s8, p0, v4s8, 1}});
376 Action.legalForTypesWithMemDesc({{s64, p0, s8, 1},
380 {v2s32, p0, v2s32, 1}});
382 Action.legalForTypesWithMemDesc({{v4s32, p0, v4s32, 1}});
384 Action.legalForTypesWithMemDesc({{v16s8, p0, v16s8, 1},
385 {v8s16, p0, v8s16, 1},
386 {v2s64, p0, v2s64, 1},
387 {v2p0, p0, v2p0, 1}});
389 Action.legalForTypesWithMemDesc({{v32s8, p0, v32s8, 1},
390 {v16s16, p0, v16s16, 1},
391 {v8s32, p0, v8s32, 1},
392 {v4s64, p0, v4s64, 1},
393 {v4p0, p0, v4p0, 1}});
395 Action.legalForTypesWithMemDesc({{v64s8, p0, v64s8, 1},
396 {v32s16, p0, v32s16, 1},
397 {v16s32, p0, v16s32, 1},
398 {v8s64, p0, v8s64, 1}});
399 Action.widenScalarToNextPow2(0, 8)
400 .clampScalar(0, s8, sMaxScalar)
404 for (
unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
406 Action.legalForTypesWithMemDesc({{s16, p0, s8, 1},
410 Action.legalForTypesWithMemDesc({{s64, p0, s8, 1},
419 return typeInSet(0, {s8, s16, s32})(Query) ||
420 (Query.
Opcode == G_ANYEXT && Query.
Types[0] == s128) ||
421 (Is64Bit && Query.
Types[0] == s64);
423 .widenScalarToNextPow2(0, 8)
434 return (
typeInSet(0, {s32, s64})(Query)) ||
441 return (
typeInSet(0, {s32, s64})(Query)) ||
442 (HasSSE1 &&
typeInSet(0, {v4s32})(Query)) ||
443 (HasSSE2 &&
typeInSet(0, {v2s64})(Query)) ||
444 (HasAVX &&
typeInSet(0, {v8s32, v4s64})(Query)) ||
445 (HasAVX512 &&
typeInSet(0, {v16s32, v8s64})(Query)) ||
452 return (HasSSE1 &&
typePairInSet(0, 1, {{s8, s32}})(Query)) ||
455 .clampScalar(0, s8, s8)
461 return (HasSSE2 &&
typePairInSet(0, 1, {{s64, s32}})(Query)) ||
468 return (HasSSE2 &&
typePairInSet(0, 1, {{s32, s64}})(Query)) ||
482 .clampScalar(1, s32, sMaxScalar)
496 .clampScalar(1, s32, HasSSE2 ? s64 : s32)
504 return (HasSSE1 &&
typeInSet(0, {v4s32})(Query)) ||
505 (HasSSE2 &&
typeInSet(0, {v2s64, v8s16, v16s8})(Query)) ||
506 (HasAVX &&
typeInSet(0, {v4s64, v8s32, v16s16, v32s8})(Query)) ||
507 (HasAVX512 &&
typeInSet(0, {v8s64, v16s32, v32s16, v64s8}));
509 .clampNumElements(0, v16s8, s8MaxVector)
517 unsigned SubIdx = Query.
Opcode == G_EXTRACT ? 0 : 1;
518 unsigned FullIdx = Query.
Opcode == G_EXTRACT ? 1 : 0;
523 {v2s64, v4s64}})(Query)) ||
532 {v4s64, v8s64}})(Query));
542 {v2s64, v4s64}})(Query)) ||
551 {v4s64, v8s64}})(Query));
556 .
legalFor({{s8, s32}, {s16, s32}, {s32, s32}, {s64, s32}, {p0, s32}})
557 .widenScalarToNextPow2(0, 8)
566 G_STACKRESTORE}).lower();
575 .legalFor({s8, s16, s32, s64, p0})
576 .widenScalarToNextPow2(0, 8)
587 switch (
MI.getOpcode()) {
591 case TargetOpcode::G_BUILD_VECTOR:
592 return legalizeBuildVector(
MI,
MRI, Helper);
601 const auto &BuildVector = cast<GBuildVector>(
MI);
602 Register Dst = BuildVector.getReg(0);
603 LLT DstTy =
MRI.getType(Dst);
609 for (
unsigned i = 0; i < BuildVector.getNumSources(); ++i) {
610 Register Source = BuildVector.getSourceReg(i);
614 CstIdxs.
emplace_back(ConstantInt::get(Ctx, ValueAndReg->Value));
620 CstIdxs.
emplace_back(ConstantFP::get(Ctx, FPValueAndReg->Value));
624 if (getOpcodeDef<GImplicitDef>(Source,
MRI)) {
634 unsigned AddrSpace =
DL.getDefaultGlobalsAddressSpace();
644 MI.eraseFromParent();
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
This file declares the targeting of the Machinelegalizer class for X86.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & clampMaxNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MaxElements)
Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet & clampMinNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MinElements)
Limit the number of elements in EltTy vectors to at least MinElements.
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & clampNumElements(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the number of elements for the given vectors to at least MinTy's number of elements and at most...
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Helper class to build MachineInstr.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const DataLayout & getDataLayout() const
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
reference emplace_back(ArgTypes &&... Args)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Type * getType() const
All values are typed, get the type of this value.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
X86LegalizerInfo(const X86Subtarget &STI, const X86TargetMachine &TM)
const X86InstrInfo * getInstrInfo() const override
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LegalityPredicate typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1, std::initializer_list< std::pair< LLT, LLT > > TypesInit)
True iff the given types for the given pair of type indexes is one of the specified type pairs.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
This is an optimization pass for GlobalISel generic memory operations.
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
This struct is a compact representation of a valid (non-zero power of two) alignment.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.