Go to the documentation of this file.
33 #include "llvm/IR/IntrinsicsXCore.h"
42 #define DEBUG_TYPE "xcore-lower"
197 switch (
Op.getOpcode())
212 case ISD::SUB:
return ExpandADDSUB(
Op.getNode(), DAG);
232 switch (
N->getOpcode()) {
237 Results.push_back(ExpandADDSUB(
N, DAG));
255 const auto *GVar = dyn_cast<GlobalVariable>(GV);
272 unsigned ObjSize =
DL.getTypeAllocSize(ObjType);
285 int64_t FoldedOffset =
std::max(Offset & ~3, (int64_t)0);
287 GA = getGlobalAddressWrapper(GA, GV, DAG);
289 if (Offset != FoldedOffset) {
313 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
325 EVT PtrVT =
Op.getValueType();
327 if (
CP->isMachineConstantPoolEntry()) {
329 CP->getAlign(),
CP->getOffset());
349 unsigned JTI =
JT->getIndex();
354 unsigned NumEntries = MJTI->
getJumpTables()[JTI].MBBs.size();
355 if (NumEntries <= 32) {
358 assert((NumEntries >> 31) == 0);
365 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
369 if ((Offset & 0
x3) == 0) {
373 int32_t HighOffset =
alignTo(Offset, 4);
374 int32_t LowOffset = HighOffset - 4;
377 dyn_cast<GlobalAddressSDNode>(
Base.getNode())) {
413 "Unexpected extension type");
417 LD->getMemoryVT(), *
LD->getMemOperand()))
424 if (!
LD->isVolatile()) {
430 Offset = cast<ConstantSDNode>(
BasePtr->getOperand(1))->getSExtValue();
431 return lowerLoadWordFromAlignedBasePlusOffset(
DL, Chain, NewBasePtr,
438 return lowerLoadWordFromAlignedBasePlusOffset(
DL, Chain, NewBasePtr,
443 if (
LD->getAlign() ==
Align(2)) {
446 LD->getMemOperand()->getFlags());
451 LD->getPointerInfo().getWithOffset(2),
MVT::i16,
452 Align(2),
LD->getMemOperand()->getFlags());
465 TargetLowering::ArgListEntry Entry;
469 Args.push_back(Entry);
472 CLI.setDebugLoc(
DL).setChain(Chain).setLibCallee(
478 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
479 SDValue Ops[] = { CallResult.first, CallResult.second };
486 assert(!
ST->isTruncatingStore() &&
"Unexpected store type");
490 ST->getMemoryVT(), *
ST->getMemOperand()))
498 if (
ST->getAlign() ==
Align(2)) {
508 Chain, dl,
High, HighAddr,
ST->getPointerInfo().getWithOffset(2),
516 TargetLowering::ArgListEntry Entry;
520 Args.push_back(Entry);
523 Args.push_back(Entry);
526 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
532 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
533 return CallResult.second;
540 "Unexpected operand to lower!");
557 "Unexpected operand to lower!");
577 SDValue &Addend1,
bool requireIntermediatesHaveOneUse)
594 if (requireIntermediatesHaveOneUse && !AddOp.
hasOneUse())
598 if (requireIntermediatesHaveOneUse && !OtherOp.
hasOneUse())
634 if (
N->getOperand(0).getOpcode() ==
ISD::MUL) {
635 Mul =
N->getOperand(0);
637 }
else if (
N->getOperand(1).getOpcode() ==
ISD::MUL) {
638 Mul =
N->getOperand(1);
644 SDValue LL, RL, AddendL, AddendH;
665 if (LHSSB > 32 && RHSSB > 32) {
694 "Unknown operand to lower!");
697 if (
SDValue Result = TryExpandADDWithMul(
N, DAG))
737 EVT VT = Node->getValueType(0);
738 SDValue InChain = Node->getOperand(0);
739 SDValue VAListPtr = Node->getOperand(1);
741 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
776 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() > 0)
792 if (cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue() > 0)
837 unsigned HandlerReg = XCore::R3;
854 return Op.getOperand(0);
864 const Value *TrmpAddr = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
914 unsigned IntNo = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
916 case Intrinsic::xcore_crc8:
917 EVT VT =
Op.getValueType();
920 Op.getOperand(1),
Op.getOperand(2) ,
Op.getOperand(3));
940 "setInsertFencesForAtomic(true) expects unordered / monotonic");
942 if (
N->getAlign() <
Align(4))
945 N->getChain(),
N->getBasePtr(),
N->getPointerInfo(),
946 N->getAlign(),
N->getMemOperand()->getFlags(),
947 N->getAAInfo(),
N->getRanges());
950 if (
N->getAlign() <
Align(2))
953 N->getBasePtr(),
N->getPointerInfo(),
MVT::i16,
954 N->getAlign(),
N->getMemOperand()->getFlags(),
959 N->getBasePtr(),
N->getPointerInfo(),
MVT::i8,
960 N->getAlign(),
N->getMemOperand()->getFlags(),
971 "setInsertFencesForAtomic(true) expects unordered / monotonic");
973 if (
N->getAlign() <
Align(4))
976 N->getPointerInfo(),
N->getAlign(),
977 N->getMemOperand()->getFlags(),
N->getAAInfo());
980 if (
N->getAlign() <
Align(2))
983 N->getBasePtr(),
N->getPointerInfo(),
MVT::i16,
984 N->getAlign(),
N->getMemOperand()->getFlags(),
989 N->getBasePtr(),
N->getPointerInfo(),
MVT::i8,
990 N->getAlign(),
N->getMemOperand()->getFlags(),
996 XCoreTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
1001 if (
auto *
SI = dyn_cast<StoreInst>(&
I))
1004 if (
auto *LI = dyn_cast<LoadInst>(&
I))
1007 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
1010 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
1020 #include "XCoreGenCallingConv.inc"
1051 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1052 Outs, OutVals,
Ins, dl, DAG, InVals);
1064 for (
unsigned i = 0,
e = RVLocs.size();
i !=
e; ++
i) {
1070 InVals.push_back(Chain.
getValue(0));
1082 for (
unsigned i = 0,
e = ResultMemLocs.size();
i !=
e; ++
i) {
1083 int offset = ResultMemLocs[
i].first;
1084 unsigned index = ResultMemLocs[
i].second;
1089 MemOpChains.push_back(
load.getValue(1));
1094 if (!MemOpChains.empty())
1104 SDValue XCoreTargetLowering::LowerCCCCallTo(
1118 CCInfo.AllocateStack(4,
Align(4));
1120 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1126 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(),
Align(4));
1127 RetCCInfo.AnalyzeCallResult(
Ins, RetCC_XCore);
1130 unsigned NumBytes = RetCCInfo.getNextStackOffset();
1139 for (
unsigned i = 0,
e = ArgLocs.size();
i !=
e; ++
i) {
1161 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Arg));
1176 if (!MemOpChains.empty())
1184 for (
unsigned i = 0,
e = RegsToPass.size();
i !=
e; ++
i) {
1186 RegsToPass[
i].second, InFlag);
1204 Ops.push_back(Chain);
1205 Ops.push_back(Callee);
1209 for (
unsigned i = 0,
e = RegsToPass.size();
i !=
e; ++
i)
1211 RegsToPass[
i].second.getValueType()));
1214 Ops.push_back(InFlag);
1238 SDValue XCoreTargetLowering::LowerFormalArguments(
1248 return LowerCCCArguments(Chain, CallConv, isVarArg,
1249 Ins, dl, DAG, InVals);
1257 SDValue XCoreTargetLowering::LowerCCCArguments(
1271 CCInfo.AnalyzeFormalArguments(
Ins, CC_XCore);
1275 unsigned LRSaveSize = StackSlotSize;
1292 for (
unsigned i = 0,
e = ArgLocs.size();
i !=
e; ++
i) {
1304 errs() <<
"LowerFormalArguments Unhandled argument type: "
1320 if (ObjSize > StackSlotSize) {
1321 errs() <<
"LowerFormalArguments Unhandled argument type: "
1336 const ArgDataPair ADP = { ArgIn,
Ins[
i].Flags };
1337 ArgData.push_back(ADP);
1344 XCore::R0, XCore::R1,
XCore::R2, XCore::R3
1347 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1355 if (
i == (
int)FirstVAReg) {
1358 offset -= StackSlotSize;
1368 MemOps.push_back(
Store);
1379 if (!CFRegNode.empty())
1387 ArgDE = ArgData.end();
1388 ArgDI != ArgDE; ++ArgDI) {
1389 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1390 unsigned Size = ArgDI->Flags.getByValSize();
1392 std::max(
Align(StackSlotSize), ArgDI->Flags.getNonZeroByValAlign());
1396 InVals.push_back(FIN);
1402 InVals.push_back(ArgDI->SDV);
1407 if (!MemOps.empty()) {
1408 MemOps.push_back(Chain);
1419 bool XCoreTargetLowering::
1426 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1428 if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1456 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1466 for (
unsigned i = 0,
e = RVLocs.size();
i !=
e; ++
i) {
1478 int FI = MFI.CreateFixedObject(ObjSize, Offset,
false);
1483 MemOpChains.push_back(DAG.
getStore(
1484 Chain, dl, OutVals[
i], FIN,
1490 if (!MemOpChains.empty())
1494 for (
unsigned i = 0,
e = RVLocs.size();
i !=
e; ++
i) {
1511 RetOps.push_back(
Flag);
1526 "Unexpected instr type to insert");
1545 F->insert(It, copy0MBB);
1546 F->insert(It, sinkMBB);
1554 BB->addSuccessor(copy0MBB);
1555 BB->addSuccessor(sinkMBB);
1567 BB->addSuccessor(sinkMBB);
1573 BuildMI(*
BB,
BB->begin(), dl,
TII.get(XCore::PHI),
MI.getOperand(0).getReg())
1579 MI.eraseFromParent();
1588 DAGCombinerInfo &DCI)
const {
1591 switch (
N->getOpcode()) {
1594 switch (cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue()) {
1595 case Intrinsic::xcore_outt:
1596 case Intrinsic::xcore_outct:
1597 case Intrinsic::xcore_chkct: {
1605 !DCI.isBeforeLegalizeOps());
1609 DCI.CommitTargetLoweringOpt(TLO);
1613 case Intrinsic::xcore_setpt: {
1621 !DCI.isBeforeLegalizeOps());
1625 DCI.CommitTargetLoweringOpt(TLO);
1654 if (N1C && N1C->
isZero() &&
N->hasNUsesOfValue(0, 1)) {
1691 if (N1C && N1C->
isZero() &&
N->hasNUsesOfValue(0, 1)) {
1714 if ((N0C && !N1C) ||
1720 if (N1C && N1C->
isZero()) {
1722 if (
N->hasNUsesOfValue(0, 0)) {
1741 SDValue Mul0, Mul1, Addend0, Addend1;
1746 Mul1, Addend0, Addend1);
1771 Addend0L, Addend1L);
1780 if (!DCI.isBeforeLegalize() ||
1783 *
ST->getMemOperand()) ||
1784 ST->isVolatile() ||
ST->isIndexed()) {
1789 unsigned StoreBits =
ST->getMemoryVT().getStoreSizeInBits();
1790 assert((StoreBits % 8) == 0 &&
1791 "Store size in bits must be a multiple of 8");
1795 if (
LD->hasNUsesOfValue(1, 0) &&
ST->getMemoryVT() ==
LD->getMemoryVT() &&
1796 LD->getAlign() == Alignment &&
1797 !
LD->isVolatile() && !
LD->isIndexed() &&
1800 return DAG.
getMemmove(Chain, dl,
ST->getBasePtr(),
LD->getBasePtr(),
1802 Alignment,
false, isTail,
1803 ST->getPointerInfo(),
LD->getPointerInfo());
1812 void XCoreTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op,
1814 const APInt &DemandedElts,
1816 unsigned Depth)
const {
1818 switch (
Op.getOpcode()) {
1822 if (
Op.getResNo() == 1) {
1830 unsigned IntNo = cast<ConstantSDNode>(
Op.getOperand(1))->getZExtValue();
1832 case Intrinsic::xcore_getts:
1837 case Intrinsic::xcore_int:
1838 case Intrinsic::xcore_inct:
1843 case Intrinsic::xcore_testct:
1848 case Intrinsic::xcore_testwct:
1865 return (
val >= 0 &&
val <= 11);
1887 unsigned Size =
DL.getTypeAllocSize(Ty);
1896 if (AM.
Scale == 0) {
1904 if (AM.
Scale == 0) {
1911 if (AM.
Scale == 0) {
1923 std::pair<unsigned, const TargetRegisterClass *>
1927 if (Constraint.
size() == 1) {
1928 switch (Constraint[0]) {
1931 return std::make_pair(0U, &XCore::GRRegsRegClass);
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
LLVM_NODISCARD bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
This is an optimization pass for GlobalISel generic memory operations.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
int createLRSpillSlot(MachineFunction &MF)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
A parsed version of the target data layout string in and methods for querying it.
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
@ BR_JT
BR_JT - Jumptable branch.
void resetAll()
Resets the known state of all bits.
CCState - This class holds information needed while lowering arguments and return values.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
SDNode * getNode() const
get the SDNode which holds the desired result
@ VoidTyID
type with no size
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
static int stackSlotSize()
Stack slot size (4 bytes)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
Represents one node in the SelectionDAG.
TypeID getTypeID() const
Return the type id for the type.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
This class is used to represent ISD::LOAD nodes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Function Alias Analysis Results
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
static IntegerType * getInt8Ty(LLVMContext &C)
unsigned const TargetRegisterInfo * TRI
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
static IntegerType * getInt32Ty(LLVMContext &C)
LLVMContext * getContext() const
SDValue getRegister(unsigned Reg, EVT VT)
LLVM Basic Block Representation.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
StringRef getSection() const
static bool isImmUs(int64_t val)
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static auto integer_valuetypes()
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ BR_CC
BR_CC - Conditional branch.
In x86 we generate this spiffy xmm0 xmm0 ret in x86 we generate this which could be xmm1 movss xmm1 xmm0 ret In sse4 we could use insertps to make both better Here s another testcase that could use x3
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
TargetInstrInfo - Interface to description of machine instruction set.
EVT getValueType() const
Return the ValueType of the referenced return value.
CCValAssign - Represent assignment of one arg/retval to a location.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Register getLocReg() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
const TargetLowering & getTargetLoweringInfo() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
Clang compiles this i1 i64 store i64 i64 store i64 i64 store i64 i64 store i64 align Which gets codegen d xmm0 movaps rbp movaps rbp movaps rbp movaps rbp rbp rbp rbp rbp It would be better to have movq s of instead of the movaps s LLVM produces ret int
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const HexagonInstrInfo * TII
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
const TargetRegisterInfo * getRegisterInfo() const override
bool isInteger() const
Return true if this is an integer or a vector integer type.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
@ AND
Bitwise operators - logical and, logical or, logical xor.
LocInfo getLocInfo() const
const GlobalValue * getGlobal() const
unsigned getLocMemOffset() const
This struct is a compact representation of a valid (non-zero power of two) alignment.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
bool isFunctionTy() const
True if this is an instance of FunctionType.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
void setVarArgsFrameIndex(int off)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
SmallVector< ISD::OutputArg, 32 > Outs
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
The initial backend is deliberately restricted to z10 We should add support for later architectures at some point If an asm ties an i32 r result to an i64 the input will be treated as an leaving the upper bits uninitialised For i64 store i32 val
const std::vector< MachineJumpTableEntry > & getJumpTables() const
This is an important base class in LLVM.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Representation of each machine instruction.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Module * getParent()
Get the module that this global value is contained inside of...
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
@ TRAP
TRAP - Trapping instruction.
This class contains a discriminated union of information about pointers in memory operands,...
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
This is an important class for using LLVM in a threaded context.
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName - This method returns the name of a target specific
const XCoreInstrInfo * getInstrInfo() const override
int64_t getOffset() const
Flags
Flags values. These may be or'd together.
This class is used to represent ISD::STORE nodes.
SDValue getValue(unsigned R) const
This structure contains all information that is necessary for lowering calls.
typename SuperClass::const_iterator const_iterator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Primary interface to the complete machine description for the target machine.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
LLVM currently emits rax rax movq rax rax ret It could narrow the loads and stores to emit rax rax movq rax rax ret The trouble is that there is a TokenFactor between the store and the load
StandardInstrumentations SI(Debug, VerifyEach)
uint64_t getZExtValue() const
bool hasLocalLinkage() const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
Class for arbitrary precision integers.
XCoreFunctionInfo - This class is derived from MachineFunction private XCore target-specific informat...
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
@ C
C - The default llvm calling convention, compatible with C.
SmallVector< ISD::InputArg, 32 > Ins
@ Fast
Fast - This calling convention attempts to make calls as fast as possible (e.g.
The address of a basic block.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
StringRef - Represent a constant reference to a string, i.e.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setReturnStackOffset(unsigned value)
static bool isImmUs4(int64_t val)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
const SDValue & getOperand(unsigned i) const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
int getVarArgsFrameIndex() const
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
@ MOVolatile
The memory access is volatile.
Wrapper class representing virtual and physical registers.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
std::vector< ArgListEntry > ArgListTy
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
amdgpu Simplify well known AMD library false FunctionCallee Callee
This is an SDNode representing atomic operations.
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
static const unsigned CodeModelLargeSize
const TargetMachine & getTargetMachine() const
constexpr LLVM_NODISCARD size_t size() const
size - Get the string size.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
Iterator for intrusive lists based on ilist_node.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const DataLayout & getDataLayout() const
XCoreTargetLowering(const TargetMachine &TM, const XCoreSubtarget &Subtarget)
@ FRAME_TO_ARGS_OFFSET
FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to first (possible) on-stack ar...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
constexpr unsigned BitWidth
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
CodeModel::Model getCodeModel() const
Returns the code model.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
@ ZeroOrOneBooleanContent
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ ADD
Simple integer binary arithmetic operators.
unsigned getReturnStackOffset() const
static Type * getVoidTy(LLVMContext &C)
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
@ SHL
Shift and rotation operations.
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, bool *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
static bool isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, SDValue &Addend1, bool requireIntermediatesHaveOneUse)
isADDADDMUL - Return whether Op is in a form that is equivalent to add(add(mul(x,y),...
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
SmallVector< SDValue, 32 > OutVals
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
unsigned getOpcode() const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Type * getValueType() const
const char LLVMTargetMachineRef TM
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MachineFunction & getMachineFunction() const
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
unsigned getBitWidth() const
Get the bit width of this value.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Value * getOperand(unsigned i) const
@ SIGN_EXTEND
Conversion operators.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM Value Representation.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
static bool isImmUs2(int64_t val)
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
Optional< std::vector< StOtherPiece > > Other
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.