40 for (
auto Reg : RegList) {
45 const size_t RequiredGprsUponSplit = 2;
46 if (AvailableRegs.
size() < RequiredGprsUponSplit)
50 for (
unsigned I = 0;
I < RequiredGprsUponSplit;
I++) {
57 assert(
Reg &&
"Expecting a register will be available");
69 static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2,
70 X86::ZMM3, X86::ZMM4, X86::ZMM5};
75 static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2,
76 X86::YMM3, X86::YMM4, X86::YMM5};
80 static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2,
81 X86::XMM3, X86::XMM4, X86::XMM5};
86 static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9};
97 bool Is64bit = static_cast<const X86Subtarget &>(
101 for (
auto Reg : RegList) {
105 assert(AssigedReg ==
Reg &&
"Expecting a valid register allocation");
118 "an available register.");
133 if (ArgFlags.
isHva())
167 if (
TRI->regsOverlap(
Reg, X86::XMM4) ||
168 TRI->regsOverlap(
Reg, X86::XMM5))
171 if (!ArgFlags.
isHva()) {
180 return ArgFlags.
isHva();
193 if (ArgFlags.
isHva())
207 if (ArgFlags.
isHva())
232 "stackmap and patchpoint intrinsics.");
243 static const unsigned NumRegs =
sizeof(RegList) /
sizeof(RegList[0]);
260 if (PendingMembers.
empty()) {
278 bool UseRegs = PendingMembers.
size() <=
std::min(2U, NumRegs - FirstFree);
280 for (
auto &It : PendingMembers) {
282 It.convertToReg(State.
AllocateReg(RegList[FirstFree++]));
288 PendingMembers.clear();
303 unsigned SlotSize = Is64Bit ? 8 : 4;
305 if (ArgCount == 1 && ValNo == 0) {
309 }
else if (ArgCount == 2 && ValNo == 0) {
314 }
else if (ArgCount == 2 && ValNo == 1) {
326 if (Is64Bit && ArgCount == 2)
344 #include "X86GenCallingConv.inc" unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
const_iterator end(StringRef path)
Get end iterator over path.
static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
LLVM_NODISCARD bool empty() const
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
bool isVector() const
Return true if this is a vector value type.
void push_back(const T &Elt)
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Vectorcall calling convention has special handling for vector types or HVA for 64 bit arch.
bool isAllocated(MCRegister Reg) const
isAllocated - Return true if the specified register (or an alias) is allocated.
unsigned const TargetRegisterInfo * TRI
static bool CC_X86_64_Pointer(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
MachineFunction & getMachineFunction() const
Function & getFunction()
Return the LLVM function that this machine code represents.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void addLoc(const CCValAssign &V)
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
static bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &, CCValAssign::LocInfo &, ISD::ArgFlagsTy &, CCState &)
SmallVectorImpl< CCValAssign > & getPendingLocs()
bool isSecArgPass() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static bool CC_X86_Intr(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
X86 interrupt handlers can only take one or two stack arguments, but if there are two arguments,...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
unsigned AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
bool IsShadowAllocatedReg(MCRegister Reg) const
A shadow allocated register is a register that was allocated but wasn't added to the location list (L...
static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
CCState - This class holds information needed while lowering arguments and return values.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static ArrayRef< MCPhysReg > CC_X86_64_VectorCallGetGPRs()
bool is512BitVector() const
Return true if this is a 512-bit vector type.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
Vectorcall calling convention has special handling for vector types or HVA for 32 bit arch.
static bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
When regcall calling convention compiled to 32 bit arch, special treatment is required for 64 bit mas...
static bool is64Bit(const char *name)
static ArrayRef< MCPhysReg > CC_X86_VectorCallGetSSEs(const MVT &ValVT)