33#include "llvm/IR/IntrinsicsXCore.h"
42#define DEBUG_TYPE "xcore-lower"
196 switch (Op.getOpcode())
203 case ISD::LOAD:
return LowerLOAD(Op, DAG);
211 case ISD::SUB:
return ExpandADDSUB(Op.getNode(), DAG);
231 switch (
N->getOpcode()) {
236 Results.push_back(ExpandADDSUB(
N, DAG));
254 const auto *GVar = dyn_cast<GlobalVariable>(GV);
271 unsigned ObjSize =
DL.getTypeAllocSize(ObjType);
284 int64_t FoldedOffset = std::max(
Offset & ~3, (int64_t)0);
286 GA = getGlobalAddressWrapper(GA, GV, DAG);
288 if (
Offset != FoldedOffset) {
312 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
324 EVT PtrVT =
Op.getValueType();
326 if (
CP->isMachineConstantPoolEntry()) {
328 CP->getAlign(),
CP->getOffset());
343 SDValue Chain = Op.getOperand(0);
344 SDValue Table = Op.getOperand(1);
348 unsigned JTI = JT->getIndex();
353 unsigned NumEntries = MJTI->
getJumpTables()[JTI].MBBs.size();
354 if (NumEntries <= 32) {
357 assert((NumEntries >> 31) == 0);
364SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
368 if ((
Offset & 0x3) == 0) {
373 int32_t LowOffset = HighOffset - 4;
376 dyn_cast<GlobalAddressSDNode>(
Base.getNode())) {
412 "Unexpected extension type");
416 LD->getMemoryVT(), *
LD->getMemOperand()))
423 if (!
LD->isVolatile()) {
429 Offset = cast<ConstantSDNode>(
BasePtr->getOperand(1))->getSExtValue();
430 return lowerLoadWordFromAlignedBasePlusOffset(
DL, Chain, NewBasePtr,
437 return lowerLoadWordFromAlignedBasePlusOffset(
DL, Chain, NewBasePtr,
442 if (
LD->getAlign() ==
Align(2)) {
445 LD->getMemOperand()->getFlags());
450 LD->getPointerInfo().getWithOffset(2),
MVT::i16,
451 Align(2),
LD->getMemOperand()->getFlags());
468 Args.push_back(Entry);
471 CLI.setDebugLoc(
DL).setChain(Chain).setLibCallee(
477 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
478 SDValue Ops[] = { CallResult.first, CallResult.second };
485 assert(!
ST->isTruncatingStore() &&
"Unexpected store type");
489 ST->getMemoryVT(), *
ST->getMemOperand()))
497 if (
ST->getAlign() ==
Align(2)) {
507 Chain, dl,
High, HighAddr,
ST->getPointerInfo().getWithOffset(2),
519 Args.push_back(Entry);
522 Args.push_back(Entry);
525 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
531 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
532 return CallResult.second;
539 "Unexpected operand to lower!");
556 "Unexpected operand to lower!");
576 SDValue &Addend1,
bool requireIntermediatesHaveOneUse)
593 if (requireIntermediatesHaveOneUse && !AddOp.
hasOneUse())
597 if (requireIntermediatesHaveOneUse && !OtherOp.
hasOneUse())
633 if (
N->getOperand(0).getOpcode() ==
ISD::MUL) {
634 Mul =
N->getOperand(0);
636 }
else if (
N->getOperand(1).getOpcode() ==
ISD::MUL) {
637 Mul =
N->getOperand(1);
643 SDValue LL, RL, AddendL, AddendH;
664 if (LHSSB > 32 && RHSSB > 32) {
693 "Unknown operand to lower!");
696 if (
SDValue Result = TryExpandADDWithMul(
N, DAG))
736 EVT VT =
Node->getValueType(0);
740 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
775 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() > 0)
791 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() > 0)
835 unsigned StackReg = XCore::R2;
836 unsigned HandlerReg = XCore::R3;
853 return Op.getOperand(0);
863 const Value *TrmpAddr = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
913 unsigned IntNo = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
915 case Intrinsic::xcore_crc8:
916 EVT VT =
Op.getValueType();
919 Op.getOperand(1),
Op.getOperand(2) ,
Op.getOperand(3));
939 "setInsertFencesForAtomic(true) expects unordered / monotonic");
941 if (
N->getAlign() <
Align(4))
944 N->getChain(),
N->getBasePtr(),
N->getPointerInfo(),
945 N->getAlign(),
N->getMemOperand()->getFlags(),
946 N->getAAInfo(),
N->getRanges());
949 if (
N->getAlign() <
Align(2))
952 N->getBasePtr(),
N->getPointerInfo(),
MVT::i16,
953 N->getAlign(),
N->getMemOperand()->getFlags(),
958 N->getBasePtr(),
N->getPointerInfo(),
MVT::i8,
959 N->getAlign(),
N->getMemOperand()->getFlags(),
970 "setInsertFencesForAtomic(true) expects unordered / monotonic");
972 if (
N->getAlign() <
Align(4))
975 N->getPointerInfo(),
N->getAlign(),
976 N->getMemOperand()->getFlags(),
N->getAAInfo());
979 if (
N->getAlign() <
Align(2))
982 N->getBasePtr(),
N->getPointerInfo(),
MVT::i16,
983 N->getAlign(),
N->getMemOperand()->getFlags(),
988 N->getBasePtr(),
N->getPointerInfo(),
MVT::i8,
989 N->getAlign(),
N->getMemOperand()->getFlags(),
995XCoreTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
1000 if (
auto *SI = dyn_cast<StoreInst>(&
I))
1003 if (
auto *LI = dyn_cast<LoadInst>(&
I))
1006 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
1009 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
1019#include "XCoreGenCallingConv.inc"
1050 return LowerCCCCallTo(Chain,
Callee, CallConv, isVarArg, isTailCall,
1051 Outs, OutVals, Ins, dl, DAG, InVals);
1063 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
1081 for (
unsigned i = 0, e = ResultMemLocs.
size(); i != e; ++i) {
1082 int offset = ResultMemLocs[i].first;
1083 unsigned index = ResultMemLocs[i].second;
1087 InVals[index] = load;
1093 if (!MemOpChains.
empty())
1103SDValue XCoreTargetLowering::LowerCCCCallTo(
1117 CCInfo.AllocateStack(4,
Align(4));
1119 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1125 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(),
Align(4));
1126 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1129 unsigned NumBytes = RetCCInfo.getNextStackOffset();
1137 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
1174 if (!MemOpChains.
empty())
1182 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
1183 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1184 RegsToPass[i].second, InFlag);
1207 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
1209 RegsToPass[i].second.getValueType()));
1235SDValue XCoreTargetLowering::LowerFormalArguments(
1245 return LowerCCCArguments(Chain, CallConv, isVarArg,
1246 Ins, dl, DAG, InVals);
1254SDValue XCoreTargetLowering::LowerCCCArguments(
1268 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1272 unsigned LRSaveSize = StackSlotSize;
1289 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1301 errs() <<
"LowerFormalArguments Unhandled argument type: "
1307 Register VReg =
RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1317 if (ObjSize > StackSlotSize) {
1318 errs() <<
"LowerFormalArguments Unhandled argument type: "
1332 const ArgDataPair ADP = { ArgIn,
Ins[i].Flags };
1340 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1343 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1344 if (FirstVAReg < std::size(ArgRegs)) {
1348 for (
int i = std::size(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1351 if (i == (
int)FirstVAReg) {
1354 offset -= StackSlotSize;
1357 Register VReg =
RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1358 RegInfo.addLiveIn(ArgRegs[i], VReg);
1375 if (!CFRegNode.
empty())
1382 for (
const ArgDataPair &ArgDI : ArgData) {
1383 if (ArgDI.Flags.isByVal() && ArgDI.Flags.getByValSize()) {
1384 unsigned Size = ArgDI.Flags.getByValSize();
1386 std::max(
Align(StackSlotSize), ArgDI.Flags.getNonZeroByValAlign());
1401 if (!MemOps.
empty()) {
1413bool XCoreTargetLowering::
1419 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1420 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1422 if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1450 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1460 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
1472 int FI = MFI.CreateFixedObject(ObjSize,
Offset,
false);
1478 Chain, dl, OutVals[i], FIN,
1484 if (!MemOpChains.
empty())
1488 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
1505 RetOps.push_back(Flag);
1519 assert((
MI.getOpcode() == XCore::SELECT_CC) &&
1520 "Unexpected instr type to insert");
1539 F->insert(It, copy0MBB);
1540 F->insert(It, sinkMBB);
1573 MI.eraseFromParent();
1582 DAGCombinerInfo &DCI)
const {
1585 switch (
N->getOpcode()) {
1588 switch (cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue()) {
1589 case Intrinsic::xcore_outt:
1590 case Intrinsic::xcore_outct:
1591 case Intrinsic::xcore_chkct: {
1599 !DCI.isBeforeLegalizeOps());
1603 DCI.CommitTargetLoweringOpt(TLO);
1607 case Intrinsic::xcore_setpt: {
1615 !DCI.isBeforeLegalizeOps());
1619 DCI.CommitTargetLoweringOpt(TLO);
1648 if (N1C && N1C->
isZero() &&
N->hasNUsesOfValue(0, 1)) {
1652 if ((Known.
Zero & Mask) == Mask) {
1674 if ((Known.
Zero & Mask) == Mask) {
1685 if (N1C && N1C->
isZero() &&
N->hasNUsesOfValue(0, 1)) {
1689 if ((Known.
Zero & Mask) == Mask) {
1708 if ((N0C && !N1C) ||
1714 if (N1C && N1C->
isZero()) {
1716 if (
N->hasNUsesOfValue(0, 0)) {
1735 SDValue Mul0, Mul1, Addend0, Addend1;
1740 Mul1, Addend0, Addend1);
1765 Addend0L, Addend1L);
1774 if (!DCI.isBeforeLegalize() ||
1777 *
ST->getMemOperand()) ||
1778 ST->isVolatile() ||
ST->isIndexed()) {
1783 unsigned StoreBits =
ST->getMemoryVT().getStoreSizeInBits();
1784 assert((StoreBits % 8) == 0 &&
1785 "Store size in bits must be a multiple of 8");
1786 Align Alignment =
ST->getAlign();
1788 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(
ST->getValue())) {
1789 if (
LD->hasNUsesOfValue(1, 0) &&
ST->getMemoryVT() ==
LD->getMemoryVT() &&
1790 LD->getAlign() == Alignment &&
1791 !
LD->isVolatile() && !
LD->isIndexed() &&
1794 return DAG.
getMemmove(Chain, dl,
ST->getBasePtr(),
LD->getBasePtr(),
1796 Alignment,
false, isTail,
1797 ST->getPointerInfo(),
LD->getPointerInfo());
1806void XCoreTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op,
1808 const APInt &DemandedElts,
1810 unsigned Depth)
const {
1812 switch (
Op.getOpcode()) {
1816 if (
Op.getResNo() == 1) {
1824 unsigned IntNo = cast<ConstantSDNode>(
Op.getOperand(1))->getZExtValue();
1826 case Intrinsic::xcore_getts:
1831 case Intrinsic::xcore_int:
1832 case Intrinsic::xcore_inct:
1837 case Intrinsic::xcore_testct:
1842 case Intrinsic::xcore_testwct:
1859 return (val >= 0 && val <= 11);
1864 return (val%2 == 0 &&
isImmUs(val/2));
1869 return (val%4 == 0 &&
isImmUs(val/4));
1881 unsigned Size =
DL.getTypeAllocSize(Ty);
1890 if (AM.
Scale == 0) {
1898 if (AM.
Scale == 0) {
1905 if (AM.
Scale == 0) {
1917std::pair<unsigned, const TargetRegisterClass *>
1921 if (Constraint.
size() == 1) {
1922 switch (Constraint[0]) {
1925 return std::make_pair(0U, &XCore::GRRegsRegClass);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImmUs(int64_t val)
static bool isImmUs4(int64_t val)
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...
static bool isImmUs2(int64_t val)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
Class for arbitrary precision integers.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
This is an SDNode representing atomic operations.
LLVM Basic Block Representation.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
unsigned getLocMemOffset() const
Register getLocReg() const
LocInfo getLocInfo() const
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
int64_t getOffset() const
const GlobalValue * getGlobal() const
bool hasLocalLinkage() const
StringRef getSection() const
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getExternalSymbol(const char *Sym, EVT VT)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
bool startswith(StringRef Prefix) const
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
@ ZeroOrOneBooleanContent
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
Primary interface to the complete machine description for the target machine.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
@ VoidTyID
type with no size
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt8Ty(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool isFunctionTy() const
True if this is an instance of FunctionType.
static IntegerType * getInt32Ty(LLVMContext &C)
TypeID getTypeID() const
Return the type id for the type.
LLVM Value Representation.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
static int stackSlotSize()
Stack slot size (4 bytes)
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
void setVarArgsFrameIndex(int off)
void setReturnStackOffset(unsigned value)
int getVarArgsFrameIndex() const
int createLRSpillSlot(MachineFunction &MF)
unsigned getReturnStackOffset() const
const TargetRegisterInfo * getRegisterInfo() const override
const XCoreInstrInfo * getInstrInfo() const override
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ FRAME_TO_ARGS_OFFSET
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static const unsigned CodeModelLargeSize
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isInteger() const
Return true if this is an integer or a vector integer type.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...