46#include "llvm/IR/IntrinsicsHexagon.h"
69#define DEBUG_TYPE "hexagon-lowering"
73 cl::desc(
"Control jump table emission on Hexagon target"));
77 cl::desc(
"Enable Hexagon SDNode scheduling"));
80 cl::desc(
"Enable Fast Math processing"));
84 cl::desc(
"Set minimum jump tables"));
88 cl::desc(
"Max #stores to inline memcpy"));
92 cl::desc(
"Max #stores to inline memcpy"));
96 cl::desc(
"Max #stores to inline memmove"));
101 cl::desc(
"Max #stores to inline memmove"));
105 cl::desc(
"Max #stores to inline memset"));
109 cl::desc(
"Max #stores to inline memset"));
113 cl::desc(
"Rewrite unaligned loads as a pair of aligned loads"));
118 cl::desc(
"Disable minimum alignment of 1 for "
119 "arguments passed by value on stack"));
123 class HexagonCCState :
public CCState {
124 unsigned NumNamedVarArgParams = 0;
129 unsigned NumNamedArgs)
131 NumNamedVarArgParams(NumNamedArgs) {}
132 unsigned getNumNamedVarArgParams()
const {
return NumNamedVarArgParams; }
144 Hexagon::R0, Hexagon::R1, Hexagon::R2,
145 Hexagon::R3, Hexagon::R4, Hexagon::R5
147 const unsigned NumArgRegs = std::size(ArgRegs);
151 if (RegNum != NumArgRegs && RegNum % 2 == 1)
160#include "HexagonGenCallingConv.inc"
179 Chain, dl, Dst, Src, SizeNode,
Flags.getNonZeroByValAlign(),
193 return CCInfo.
CheckReturn(Outs, RetCC_Hexagon_HVX);
223 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
272 .
Case(
"r0", Hexagon::R0)
273 .
Case(
"r1", Hexagon::R1)
274 .
Case(
"r2", Hexagon::R2)
275 .
Case(
"r3", Hexagon::R3)
276 .
Case(
"r4", Hexagon::R4)
277 .
Case(
"r5", Hexagon::R5)
278 .
Case(
"r6", Hexagon::R6)
279 .
Case(
"r7", Hexagon::R7)
280 .
Case(
"r8", Hexagon::R8)
281 .
Case(
"r9", Hexagon::R9)
282 .
Case(
"r10", Hexagon::R10)
283 .
Case(
"r11", Hexagon::R11)
284 .
Case(
"r12", Hexagon::R12)
285 .
Case(
"r13", Hexagon::R13)
286 .
Case(
"r14", Hexagon::R14)
287 .
Case(
"r15", Hexagon::R15)
288 .
Case(
"r16", Hexagon::R16)
289 .
Case(
"r17", Hexagon::R17)
290 .
Case(
"r18", Hexagon::R18)
291 .
Case(
"r19", Hexagon::R19)
292 .
Case(
"r20", Hexagon::R20)
293 .
Case(
"r21", Hexagon::R21)
294 .
Case(
"r22", Hexagon::R22)
295 .
Case(
"r23", Hexagon::R23)
296 .
Case(
"r24", Hexagon::R24)
297 .
Case(
"r25", Hexagon::R25)
298 .
Case(
"r26", Hexagon::R26)
299 .
Case(
"r27", Hexagon::R27)
300 .
Case(
"r28", Hexagon::R28)
301 .
Case(
"r29", Hexagon::R29)
302 .
Case(
"r30", Hexagon::R30)
303 .
Case(
"r31", Hexagon::R31)
304 .
Case(
"r1:0", Hexagon::D0)
305 .
Case(
"r3:2", Hexagon::D1)
306 .
Case(
"r5:4", Hexagon::D2)
307 .
Case(
"r7:6", Hexagon::D3)
308 .
Case(
"r9:8", Hexagon::D4)
309 .
Case(
"r11:10", Hexagon::D5)
310 .
Case(
"r13:12", Hexagon::D6)
311 .
Case(
"r15:14", Hexagon::D7)
312 .
Case(
"r17:16", Hexagon::D8)
313 .
Case(
"r19:18", Hexagon::D9)
314 .
Case(
"r21:20", Hexagon::D10)
315 .
Case(
"r23:22", Hexagon::D11)
316 .
Case(
"r25:24", Hexagon::D12)
317 .
Case(
"r27:26", Hexagon::D13)
318 .
Case(
"r29:28", Hexagon::D14)
319 .
Case(
"r31:30", Hexagon::D15)
320 .
Case(
"sp", Hexagon::R29)
321 .
Case(
"fp", Hexagon::R30)
322 .
Case(
"lr", Hexagon::R31)
323 .
Case(
"p0", Hexagon::P0)
324 .
Case(
"p1", Hexagon::P1)
325 .
Case(
"p2", Hexagon::P2)
326 .
Case(
"p3", Hexagon::P3)
327 .
Case(
"sa0", Hexagon::SA0)
328 .
Case(
"lc0", Hexagon::LC0)
329 .
Case(
"sa1", Hexagon::SA1)
330 .
Case(
"lc1", Hexagon::LC1)
331 .
Case(
"m0", Hexagon::M0)
332 .
Case(
"m1", Hexagon::M1)
333 .
Case(
"usr", Hexagon::USR)
334 .
Case(
"ugp", Hexagon::UGP)
335 .
Case(
"cs0", Hexagon::CS0)
336 .
Case(
"cs1", Hexagon::CS1)
366 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
368 if (RVLocs[i].getValVT() == MVT::i1) {
378 Register PredR =
MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
390 RVLocs[i].getValVT(), Glue);
416 bool IsStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
430 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.
getContext(),
434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
438 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
443 IsVarArg, IsStructRet, StructAttrFlag, Outs,
452 :
"Argument must be passed on stack. "
453 "Not eligible for Tail Call\n"));
456 unsigned NumBytes = CCInfo.getStackSize();
464 bool NeedsArgAlign =
false;
465 Align LargestAlignSeen;
467 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
473 NeedsArgAlign |= ArgAlign;
499 StackPtr.getValueType());
502 LargestAlignSeen = std::max(
504 if (
Flags.isByVal()) {
524 if (NeedsArgAlign && Subtarget.
hasV60Ops()) {
525 LLVM_DEBUG(
dbgs() <<
"Function needs byte stack align due to call args\n");
526 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
527 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
532 if (!MemOpChains.
empty())
546 for (
const auto &R : RegsToPass) {
547 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
562 for (
const auto &R : RegsToPass) {
563 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
578 dyn_cast<ExternalSymbolSDNode>(
Callee)) {
590 for (
const auto &R : RegsToPass)
594 assert(Mask &&
"Missing call preserved mask for calling convention");
611 Chain = DAG.
getNode(OpCode, dl, NodeTys, Ops);
636 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
637 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
638 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
639 VT == MVT::v4i16 || VT == MVT::v8i8 ||
646 Base = Op->getOperand(0);
647 Offset = Op->getOperand(1);
648 if (!isa<ConstantSDNode>(
Offset.getNode()))
652 int32_t V = cast<ConstantSDNode>(
Offset.getNode())->getSExtValue();
661 unsigned LR = HRI.getRARegister();
667 unsigned NumOps = Op.getNumOperands();
668 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
672 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
687 for (; NumVals; --NumVals, ++i) {
688 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
691 HMFI.setHasClobberLR(
true);
707 SDValue Chain = Op.getOperand(0);
722 SDValue Chain = Op.getOperand(0);
730 SDValue Chain = Op.getOperand(0);
731 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
733 if (IntNo == Intrinsic::hexagon_prefetch) {
745 SDValue Chain = Op.getOperand(0);
751 assert(AlignConst &&
"Non-constant Align in LowerDYNAMIC_STACKALLOC");
757 A = HFI.getStackAlign().value();
760 dbgs () << __func__ <<
" Align: " <<
A <<
" Size: ";
761 Size.getNode()->dump(&DAG);
786 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
791 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
793 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
795 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
804 switch (RC.
getID()) {
805 case Hexagon::IntRegsRegClassID:
806 return Reg - Hexagon::R0 + 1;
807 case Hexagon::DoubleRegsRegClassID:
808 return (Reg - Hexagon::D0 + 1) * 2;
809 case Hexagon::HvxVRRegClassID:
810 return Reg - Hexagon::V0 + 1;
811 case Hexagon::HvxWRRegClassID:
812 return (Reg - Hexagon::W0 + 1) * 2;
819 HFL.FirstVarArgSavedReg = 0;
822 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
825 bool ByVal =
Flags.isByVal();
835 (!ByVal || (ByVal &&
Flags.getByValSize() > 8));
864 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.
getLocReg());
870 unsigned ObjSize =
Flags.isByVal()
871 ?
Flags.getByValSize()
879 if (
Flags.isByVal()) {
893 for (
int i = HFL.FirstVarArgSavedReg; i < 6; i++)
894 MRI.addLiveIn(Hexagon::R0+i);
898 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
902 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
903 bool RequiresPadding = (NumVarArgRegs & 1);
904 int RegSaveAreaSizePlusPadding = RequiresPadding
905 ? (NumVarArgRegs + 1) * 4
908 if (RegSaveAreaSizePlusPadding > 0) {
911 if (!(RegAreaStart % 8))
912 RegAreaStart = (RegAreaStart + 7) & -8;
914 int RegSaveAreaFrameIndex =
916 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
919 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
921 HMFI.setVarArgsFrameIndex(FI);
927 HMFI.setRegSavedAreaStartFrameIndex(FI);
928 HMFI.setVarArgsFrameIndex(FI);
937 HMFI.setVarArgsFrameIndex(FI);
950 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
962 SDValue FIN = Op.getOperand(1);
970 SDValue SavedRegAreaStartFrameIndex =
975 if (HFL.FirstVarArgSavedReg & 1)
976 SavedRegAreaStartFrameIndex =
985 SavedRegAreaStartFrameIndex,
1014 SDValue Chain = Op.getOperand(0);
1015 SDValue DestPtr = Op.getOperand(1);
1016 SDValue SrcPtr = Op.getOperand(2);
1017 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
1018 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
1024 false,
false,
false,
1029 const SDLoc &dl(Op);
1036 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1053 auto isSExtFree = [
this](
SDValue N) {
1054 switch (
N.getOpcode()) {
1060 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
1066 return ThisBW >= OrigBW;
1075 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1077 bool IsNegative =
C &&
C->getAPIntValue().isNegative();
1078 if (IsNegative || isSExtFree(
LHS) || isSExtFree(
RHS))
1089 SDValue PredOp = Op.getOperand(0);
1090 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1092 const SDLoc &dl(Op);
1094 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1112 EVT ValTy = Op.getValueType();
1115 bool isVTi1Type =
false;
1116 if (
auto *CV = dyn_cast<ConstantVector>(CPN->
getConstVal())) {
1117 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1120 unsigned VecLen = CV->getNumOperands();
1122 "conversion only supported for pow2 VectorSize");
1123 for (
unsigned i = 0; i < VecLen; ++i)
1139 else if (isVTi1Type)
1145 assert(cast<ConstantPoolSDNode>(
T)->getTargetFlags() == TF &&
1146 "Inconsistent target flag encountered");
1148 if (IsPositionIndependent)
1155 EVT VT = Op.getValueType();
1156 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1176 EVT VT = Op.getValueType();
1178 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1198 EVT VT = Op.getValueType();
1200 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1218 auto *GAN = cast<GlobalAddressSDNode>(Op);
1220 auto *GV = GAN->getGlobal();
1221 int64_t
Offset = GAN->getOffset();
1229 if (GO && Subtarget.
useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1251 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1277 unsigned char OperandFlags)
const {
1296 assert(Mask &&
"Missing call preserved mask for calling convention");
1331 if (IsPositionIndependent) {
1403 Hexagon::R0,
Flags);
1539 for (
unsigned LegalIntOp :
1579 for (
unsigned IntExpOp :
1588 for (
unsigned FPExpOp :
1624 static const unsigned VectExpOps[] = {
1650 for (
unsigned VectExpOp : VectExpOps)
1664 if (VT.getVectorElementType() != MVT::i32) {
1688 for (
MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1689 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1704 if (NativeVT.getVectorElementType() != MVT::i1) {
1711 for (
MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1722 for (
MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1723 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1729 for (
MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1734 for (
MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1790 for (
MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1791 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1819 initializeHVXLowering();
1839 setLibcallName(RTLIB::FPTOUINT_F32_I128,
"__hexagon_fixunssfti");
1840 setLibcallName(RTLIB::FPTOUINT_F64_I128,
"__hexagon_fixunsdfti");
1939HexagonTargetLowering::validateConstPtrAlignment(
SDValue Ptr,
Align NeedAlign,
1941 auto *CA = dyn_cast<ConstantSDNode>(
Ptr);
1944 unsigned Addr = CA->getZExtValue();
1947 if (HaveAlign >= NeedAlign)
1953 DiagnosticInfoMisalignedTrap(
StringRef M)
1959 return DI->
getKind() == DK_MisalignedTrap;
1967 <<
" has alignment " << HaveAlign.
value()
1968 <<
", but the memory access requires " << NeedAlign.
value();
1971 O <<
". The instruction has been replaced with a trap.";
1980 const SDLoc &dl(Op);
1981 auto *
LS = cast<LSBaseSDNode>(
Op.getNode());
1982 assert(!
LS->isIndexed() &&
"Not expecting indexed ops on constant address");
1994 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
1995 return (
ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1996 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1997 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1998 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1999 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
2000 ID == Intrinsic::hexagon_L2_loadrub_pbr);
2009 V = cast<Operator>(V)->getOperand(0);
2011 V = cast<Instruction>(V)->getOperand(0);
2024 if (Blk == Parent) {
2029 BaseVal = BackEdgeVal;
2031 }
while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
2034 if (IntrBaseVal == BackEdgeVal)
2041 assert(
Idx >= 0 &&
"Unexpected index to incoming argument in PHI");
2049 Value *IntrBaseVal = V;
2056 }
while (BaseVal != V);
2059 if (
const PHINode *PN = dyn_cast<PHINode>(V))
2073 unsigned Intrinsic)
const {
2074 switch (Intrinsic) {
2075 case Intrinsic::hexagon_L2_loadrd_pbr:
2076 case Intrinsic::hexagon_L2_loadri_pbr:
2077 case Intrinsic::hexagon_L2_loadrh_pbr:
2078 case Intrinsic::hexagon_L2_loadruh_pbr:
2079 case Intrinsic::hexagon_L2_loadrb_pbr:
2080 case Intrinsic::hexagon_L2_loadrub_pbr: {
2082 auto &
DL =
I.getCalledFunction()->getParent()->getDataLayout();
2083 auto &Cont =
I.getCalledFunction()->getParent()->getContext();
2087 Type *ElTy =
I.getCalledFunction()->getReturnType()->getStructElementType(0);
2094 Info.align =
DL.getABITypeAlign(
Info.memVT.getTypeForEVT(Cont));
2098 case Intrinsic::hexagon_V6_vgathermw:
2099 case Intrinsic::hexagon_V6_vgathermw_128B:
2100 case Intrinsic::hexagon_V6_vgathermh:
2101 case Intrinsic::hexagon_V6_vgathermh_128B:
2102 case Intrinsic::hexagon_V6_vgathermhw:
2103 case Intrinsic::hexagon_V6_vgathermhw_128B:
2104 case Intrinsic::hexagon_V6_vgathermwq:
2105 case Intrinsic::hexagon_V6_vgathermwq_128B:
2106 case Intrinsic::hexagon_V6_vgathermhq:
2107 case Intrinsic::hexagon_V6_vgathermhq_128B:
2108 case Intrinsic::hexagon_V6_vgathermhwq:
2109 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2110 const Module &M = *
I.getParent()->getParent()->getParent();
2112 Type *VecTy =
I.getArgOperand(1)->getType();
2114 Info.ptrVal =
I.getArgOperand(0);
2117 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2130 return X.getValueType().isScalarInteger();
2150 unsigned DefinedValues)
const {
2155 unsigned Index)
const {
2187 unsigned Action = getPreferredHvxVectorAction(VT);
2193 if (ElemTy == MVT::i1)
2207 unsigned Action = getCustomHvxOperationAction(Op);
2214std::pair<SDValue, int>
2215HexagonTargetLowering::getBaseAndOffset(
SDValue Addr)
const {
2218 if (
auto *CN = dyn_cast<const ConstantSDNode>(Op1.
getNode()))
2219 return {
Addr.getOperand(0), CN->getSExtValue() };
2229 const auto *SVN = cast<ShuffleVectorSDNode>(Op);
2231 assert(AM.
size() <= 8 &&
"Unexpected shuffle mask");
2232 unsigned VecLen = AM.
size();
2236 "HVX shuffles should be legal");
2239 SDValue Op0 = Op.getOperand(0);
2240 SDValue Op1 = Op.getOperand(1);
2241 const SDLoc &dl(Op);
2246 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2255 if (AM[
F] >=
int(VecLen)) {
2263 for (
int M : Mask) {
2265 for (
unsigned j = 0; j != ElemBytes; ++j)
2268 for (
unsigned j = 0; j != ElemBytes; ++j)
2281 for (
unsigned i = 0, e = ByteMask.
size(); i != e; ++i) {
2289 if (ByteMask.
size() == 4) {
2291 if (MaskIdx == (0x03020100 | MaskUnd))
2294 if (MaskIdx == (0x00010203 | MaskUnd)) {
2302 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2303 if (MaskIdx == (0x06040200 | MaskUnd))
2304 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2305 if (MaskIdx == (0x07050301 | MaskUnd))
2306 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2309 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2310 if (MaskIdx == (0x02000604 | MaskUnd))
2311 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2312 if (MaskIdx == (0x03010705 | MaskUnd))
2313 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2316 if (ByteMask.
size() == 8) {
2318 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2321 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2328 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2329 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2330 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2331 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2332 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2333 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2334 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2335 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2336 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2337 VectorPair
P = opSplit(Op0, dl, DAG);
2338 return getInstr(Hexagon::S2_packhl, dl, VecTy, {
P.second,
P.first}, DAG);
2342 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2343 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2344 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2345 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2353 switch (Op.getOpcode()) {
2355 if (
SDValue S = cast<BuildVectorSDNode>(Op)->getSplatValue())
2359 return Op.getOperand(0);
2369 switch (
Op.getOpcode()) {
2383 if (
SDValue Sp = getSplatValue(
Op.getOperand(1), DAG))
2384 return DAG.
getNode(NewOpc,
SDLoc(Op), ty(Op),
Op.getOperand(0), Sp);
2390 const SDLoc &dl(Op);
2396 if (
SDValue S = getVectorShiftByInt(Op, DAG))
2410 MVT ResTy = ty(Res);
2418 auto ShiftPartI8 = [&dl, &DAG,
this](
unsigned Opc,
SDValue V,
SDValue A) {
2428 return ShiftPartI8(Opc, Val, Amt);
2430 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2432 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});
2437 if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2445 SDValue InpV = Op.getOperand(0);
2446 MVT InpTy = ty(InpV);
2448 const SDLoc &dl(Op);
2451 if (InpTy == MVT::i8) {
2452 if (ResTy == MVT::v8i1) {
2455 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2470 bool AllConst =
true;
2472 for (
unsigned i = 0, e = Values.
size(); i != e; ++i) {
2479 if (
auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2480 const ConstantInt *CI = CN->getConstantIntValue();
2482 }
else if (
auto *CN = dyn_cast<ConstantFPSDNode>(
V.getNode())) {
2483 const ConstantFP *CF = CN->getConstantFPValue();
2500 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2504 if (!isUndef(Elem[First]))
2512 return getZero(dl, VecTy, DAG);
2514 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2519 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2520 Consts[1]->getZExtValue() << 16;
2524 if (ElemTy == MVT::f16) {
2531 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2535 if (ElemTy == MVT::i8) {
2538 int32_t
V = (Consts[0]->getZExtValue() & 0xFF) |
2539 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2540 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2541 Consts[3]->getZExtValue() << 24;
2546 bool IsSplat =
true;
2547 for (
unsigned i = First+1; i != Num; ++i) {
2548 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2564 for (
unsigned i = 0; i != 4; ++i) {
2574 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2579 dbgs() <<
"VecTy: " << VecTy <<
'\n';
2591 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2595 if (!isUndef(Elem[First]))
2603 return getZero(dl, VecTy, DAG);
2606 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2607 bool IsSplat =
true;
2608 for (
unsigned i = First+1; i != Num; ++i) {
2609 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2628 for (
unsigned i = 0; i != Num; ++i)
2629 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() &
Mask);
2638 : buildVector32(Elem.
take_front(Num/2), dl, HalfTy, DAG);
2641 : buildVector32(Elem.
drop_front(Num/2), dl, HalfTy, DAG);
2642 return getCombine(
H, L, dl, VecTy, DAG);
2649 MVT VecTy = ty(VecV);
2653 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2658 assert((VecWidth % ElemWidth) == 0);
2659 assert(VecWidth == 32 || VecWidth == 64);
2662 MVT ScalarTy = tyScalar(VecTy);
2668 if (
auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) {
2669 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2670 if (VecWidth == 64 && ValWidth == 32) {
2671 assert(Off == 0 || Off == 32);
2672 ExtV =
Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2673 }
else if (Off == 0 && (ValWidth % 8) == 0) {
2680 {VecV, WidthV, OffV});
2683 if (ty(IdxV) != MVT::i32)
2688 {VecV, WidthV, OffV});
2698HexagonTargetLowering::extractVectorPred(
SDValue VecV,
SDValue IdxV,
2703 MVT VecTy = ty(VecV);
2707 "Vector elements should equal vector width size");
2708 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2711 if (
auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) {
2720 if (ValWidth == 1) {
2721 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2731 unsigned Scale = VecWidth / ValWidth;
2735 assert(ty(IdxV) == MVT::i32);
2736 unsigned VecRep = 8 / VecWidth;
2744 T1 = LoHalf(T1, DAG);
2745 T1 = expandPredicate(T1, dl, DAG);
2756 MVT VecTy = ty(VecV);
2758 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2762 assert(VecWidth == 32 || VecWidth == 64);
2763 assert((VecWidth % ValWidth) == 0);
2779 unsigned W =
C->getZExtValue() * ValWidth;
2782 {VecV, ValV, WidthV, OffV});
2784 if (ty(IdxV) != MVT::i32)
2788 {VecV, ValV, WidthV, OffV});
2795HexagonTargetLowering::insertVectorPred(
SDValue VecV,
SDValue ValV,
2798 MVT VecTy = ty(VecV);
2801 if (ValTy == MVT::i1) {
2802 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2808 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {
Ins}, DAG);
2819 for (
unsigned R = Scale;
R > 1;
R /= 2) {
2820 ValR = contractPredicate(ValR, dl, DAG);
2821 ValR = getCombine(DAG.
getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2833HexagonTargetLowering::expandPredicate(
SDValue Vec32,
const SDLoc &dl,
2835 assert(ty(Vec32).getSizeInBits() == 32);
2844HexagonTargetLowering::contractPredicate(
SDValue Vec64,
const SDLoc &dl,
2846 assert(ty(Vec64).getSizeInBits() == 64);
2852 {0, 2, 4, 6, 1, 3, 5, 7});
2853 return extractVector(S, DAG.
getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2877 MVT ValTy = ty(Val);
2882 if (ValLen == ResLen)
2885 const SDLoc &dl(Val);
2887 assert(ResLen % ValLen == 0);
2890 for (
unsigned i = 1, e = ResLen / ValLen; i <
e; ++i)
2899 MVT ElemTy = ty(
Hi);
2922 const SDLoc &dl(Op);
2924 for (
unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2928 return buildVector32(Ops, dl, VecTy, DAG);
2930 return buildVector64(Ops, dl, VecTy, DAG);
2932 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2934 bool All0 =
true, All1 =
true;
2936 auto *CN = dyn_cast<ConstantSDNode>(
P.getNode());
2937 if (CN ==
nullptr) {
2938 All0 = All1 =
false;
2954 SDValue Z = getZero(dl, MVT::i32, DAG);
2957 for (
unsigned i = 0; i != 8; ++i) {
2959 Rs[i] = DAG.
getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2962 for (
unsigned i = 0, e =
A.size()/2; i != e; ++i)
2966 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2976 const SDLoc &dl(Op);
2978 assert(Op.getNumOperands() == 2);
2979 return getCombine(Op.getOperand(1), Op.getOperand(0), dl, VecTy, DAG);
2983 if (ElemTy == MVT::i1) {
2984 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2985 MVT OpTy = ty(Op.getOperand(0));
2989 assert(Scale == Op.getNumOperands() && Scale > 1);
2998 for (
SDValue P : Op.getNode()->op_values()) {
3000 for (
unsigned R = Scale; R > 1; R /= 2) {
3001 W = contractPredicate(W, dl, DAG);
3002 W = getCombine(DAG.
getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
3010 Words[IdxW ^ 1].
clear();
3012 for (
unsigned i = 0, e = Words[IdxW].
size(); i != e; i += 2) {
3013 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
3016 {W0, W1, WidthV, WidthV});
3024 assert(Scale == 2 && Words[IdxW].
size() == 2);
3026 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3036 SDValue Vec = Op.getOperand(0);
3038 return extractVector(Vec, Op.getOperand(1),
SDLoc(Op), ElemTy, ty(Op), DAG);
3044 return extractVector(Op.getOperand(0), Op.getOperand(1),
SDLoc(Op),
3045 ty(Op), ty(Op), DAG);
3051 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
3058 SDValue ValV = Op.getOperand(1);
3059 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
3060 SDLoc(Op), ty(ValV), DAG);
3079 const SDLoc &dl(Op);
3080 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
3084 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3091 LN = cast<LoadSDNode>(
NL.getNode());
3095 if (!validateConstPtrAlignment(LN->
getBasePtr(), ClaimAlign, dl, DAG))
3096 return replaceMemWithUndef(Op, DAG);
3102 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3116 const SDLoc &dl(Op);
3117 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
3121 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3123 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3130 SN = cast<StoreSDNode>(NS.getNode());
3134 if (!validateConstPtrAlignment(SN->
getBasePtr(), ClaimAlign, dl, DAG))
3135 return replaceMemWithUndef(Op, DAG);
3139 if (ClaimAlign < NeedAlign)
3147 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
3148 MVT LoadTy = ty(Op);
3151 if (HaveAlign >= NeedAlign)
3154 const SDLoc &dl(Op);
3160 bool DoDefault =
false;
3171 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3190 unsigned LoadLen = NeedAlign;
3193 auto BO = getBaseAndOffset(
Base);
3194 unsigned BaseOpc = BO.first.getOpcode();
3198 if (BO.second % LoadLen != 0) {
3200 DAG.
getConstant(BO.second % LoadLen, dl, MVT::i32));
3201 BO.second -= BO.second % LoadLen;
3216 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen,
Align(LoadLen),
3217 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3218 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3234 SDValue X = Op.getOperand(0),
Y = Op.getOperand(1);
3235 auto *CY = dyn_cast<ConstantSDNode>(
Y);
3239 const SDLoc &dl(Op);
3240 SDVTList VTs = Op.getNode()->getVTList();
3243 unsigned Opc = Op.getOpcode();
3247 assert(VY != 0 &&
"This should have been folded");
3271 const SDLoc &dl(Op);
3272 unsigned Opc = Op.getOpcode();
3273 SDValue X = Op.getOperand(0),
Y = Op.getOperand(1),
C = Op.getOperand(2);
3279 EVT CarryTy =
C.getValueType();
3281 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3289 SDValue Chain = Op.getOperand(0);
3291 SDValue Handler = Op.getOperand(2);
3300 unsigned OffsetReg = Hexagon::R28;
3316 unsigned Opc = Op.getOpcode();
3322 if (isHvxOperation(Op.getNode(), DAG)) {
3324 if (
SDValue V = LowerHvxOperation(Op, DAG))
3331 Op.getNode()->dumpr(&DAG);
3333 errs() <<
"Error: check for a non-legal type in this operation\n";
3383 if (isHvxOperation(
N, DAG)) {
3384 LowerHvxOperationWrapper(
N,
Results, DAG);
3390 unsigned Opc =
N->getOpcode();
3395 Results.push_back(opJoin(SplitVectorOp(Op, DAG),
SDLoc(Op), DAG));
3414 if (isHvxOperation(
N, DAG)) {
3415 ReplaceHvxNodeResults(
N,
Results, DAG);
3421 switch (
N->getOpcode()) {
3428 if (
N->getValueType(0) == MVT::i8) {
3429 if (
N->getOperand(0).getValueType() == MVT::v8i1) {
3430 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3431 N->getOperand(0), DAG);
3443 if (isHvxOperation(
N, DCI.
DAG)) {
3444 if (
SDValue V = PerformHvxDAGCombine(
N, DCI))
3450 const SDLoc &dl(Op);
3451 unsigned Opc = Op.getOpcode();
3454 SDValue Op0 = Op.getOperand(0);
3457 EVT TruncTy = Op.getValueType();
3473 switch (
P.getOpcode()) {
3477 return getZero(dl, ty(Op), DCI.
DAG);
3490 Op.getOperand(2), Op.getOperand(1));
3495 SDValue Op0 = Op.getOperand(0);
3498 MVT TruncTy = ty(Op);
3501 if (ty(Elem0) == TruncTy)
3504 if (ty(Elem0).bitsGT(TruncTy))
3510 auto fold0 = [&,
this](
SDValue Op) {
3511 if (ty(Op) != MVT::i64)
3513 SDValue Shl = Op.getOperand(0);
3514 SDValue Zxt = Op.getOperand(1);
3522 auto *Amt = dyn_cast<ConstantSDNode>(Shl.
getOperand(1));
3523 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3524 unsigned A = Amt->getZExtValue();
3546 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3558 if (Constraint.
size() == 1) {
3559 switch (Constraint[0]) {
3574std::pair<unsigned, const TargetRegisterClass*>
3578 if (Constraint.
size() == 1) {
3579 switch (Constraint[0]) {
3583 return {0u,
nullptr};
3589 return {0u, &Hexagon::IntRegsRegClass};
3592 return {0u, &Hexagon::DoubleRegsRegClass};
3597 return {0u,
nullptr};
3598 return {0u, &Hexagon::ModRegsRegClass};
3602 return {0u,
nullptr};
3605 return {0u, &Hexagon::HvxQRRegClass};
3611 return {0u,
nullptr};
3613 return {0u, &Hexagon::HvxVRRegClass};
3616 return {0u, &Hexagon::HvxVRRegClass};
3617 return {0u, &Hexagon::HvxWRRegClass};
3619 return {0u, &Hexagon::HvxWRRegClass};
3623 return {0u,
nullptr};
3634 bool ForCodeSize)
const {
3664 int Scale = AM.
Scale;
3688 return Imm >= -512 && Imm <= 511;
3698 bool IsCalleeStructRet,
3699 bool IsCallerStructRet,
3706 bool CCMatch = CallerCC == CalleeCC;
3714 if (!isa<GlobalAddressSDNode>(
Callee) &&
3715 !isa<ExternalSymbolSDNode>(
Callee)) {
3735 if (IsCalleeStructRet || IsCallerStructRet)
3758 if (Op.size() >= 8 && Op.isAligned(
Align(8)))
3760 if (Op.size() >= 4 && Op.isAligned(
Align(4)))
3762 if (Op.size() >= 2 && Op.isAligned(
Align(2)))
3772 return allowsHvxMemoryAccess(SVT,
Flags,
Fast);
3779 unsigned *
Fast)
const {
3782 return allowsHvxMisalignedMemoryAccesses(SVT,
Flags,
Fast);
3788std::pair<const TargetRegisterClass*, uint8_t>
3796 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3798 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3800 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3812 auto *L = cast<LoadSDNode>(Load);
3813 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3819 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3827 AdjustHvxInstrPostInstrSelection(
MI, Node);
3836 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic loads supported");
3837 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3838 : Intrinsic::hexagon_L4_loadd_locked;
3841 auto PtrTy = cast<PointerType>(
Addr->getType());
3843 Builder.getIntNTy(SZ)->getPointerTo(PtrTy->getAddressSpace());
3848 return Builder.CreateBitCast(Call, ValueTy);
3862 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic stores supported");
3863 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3864 : Intrinsic::hexagon_S4_stored_locked;
3867 unsigned AS =
Addr->getType()->getPointerAddressSpace();
3869 Val =
Builder.CreateBitCast(Val, CastTy);
3888 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))
static Value * getBrevLdObject(Value *V)
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::desc("Enable Fast Math processing"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
const GlobalObject * getAliaseeObject() const
bool isValidAutoIncImm(const EVT VT, const int Offset) const
Hexagon target-specific information for each MachineFunction.
int getVarArgsFrameIndex()
void setFirstNamedArgFrameIndex(int v)
void setHasEHReturn(bool H=true)
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const HexagonInstrInfo * getInstrInfo() const override
const HexagonFrameLowering * getFrameLowering() const override
bool useSmallData() const
const HexagonRegisterInfo * getRegisterInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
Align getTypeAlignment(MVT Ty) const
unsigned getVectorLength() const
bool useHVX128BOps() const
bool isEnvironmentMusl() const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const S