47#include "llvm/IR/IntrinsicsHexagon.h"
69#define DEBUG_TYPE "hexagon-lowering"
73 cl::desc(
"Control jump table emission on Hexagon target"));
77 cl::desc(
"Enable Hexagon SDNode scheduling"));
80 cl::desc(
"Enable Fast Math processing"));
84 cl::desc(
"Set minimum jump tables"));
88 cl::desc(
"Max #stores to inline memcpy"));
92 cl::desc(
"Max #stores to inline memcpy"));
96 cl::desc(
"Max #stores to inline memmove"));
101 cl::desc(
"Max #stores to inline memmove"));
105 cl::desc(
"Max #stores to inline memset"));
109 cl::desc(
"Max #stores to inline memset"));
113 cl::desc(
"Rewrite unaligned loads as a pair of aligned loads"));
118 cl::desc(
"Disable minimum alignment of 1 for "
119 "arguments passed by value on stack"));
123 class HexagonCCState :
public CCState {
124 unsigned NumNamedVarArgParams = 0;
129 unsigned NumNamedArgs)
131 NumNamedVarArgParams(NumNamedArgs) {}
132 unsigned getNumNamedVarArgParams()
const {
return NumNamedVarArgParams; }
144 Hexagon::R0, Hexagon::R1, Hexagon::R2,
145 Hexagon::R3, Hexagon::R4, Hexagon::R5
147 const unsigned NumArgRegs = std::size(ArgRegs);
151 if (RegNum != NumArgRegs && RegNum % 2 == 1)
160#include "HexagonGenCallingConv.inc"
179 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
190 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
193 return CCInfo.
CheckReturn(Outs, RetCC_Hexagon_HVX);
223 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
272 .
Case(
"r0", Hexagon::R0)
273 .
Case(
"r1", Hexagon::R1)
274 .
Case(
"r2", Hexagon::R2)
275 .
Case(
"r3", Hexagon::R3)
276 .
Case(
"r4", Hexagon::R4)
277 .
Case(
"r5", Hexagon::R5)
278 .
Case(
"r6", Hexagon::R6)
279 .
Case(
"r7", Hexagon::R7)
280 .
Case(
"r8", Hexagon::R8)
281 .
Case(
"r9", Hexagon::R9)
282 .
Case(
"r10", Hexagon::R10)
283 .
Case(
"r11", Hexagon::R11)
284 .
Case(
"r12", Hexagon::R12)
285 .
Case(
"r13", Hexagon::R13)
286 .
Case(
"r14", Hexagon::R14)
287 .
Case(
"r15", Hexagon::R15)
288 .
Case(
"r16", Hexagon::R16)
289 .
Case(
"r17", Hexagon::R17)
290 .
Case(
"r18", Hexagon::R18)
291 .
Case(
"r19", Hexagon::R19)
292 .
Case(
"r20", Hexagon::R20)
293 .
Case(
"r21", Hexagon::R21)
294 .
Case(
"r22", Hexagon::R22)
295 .
Case(
"r23", Hexagon::R23)
296 .
Case(
"r24", Hexagon::R24)
297 .
Case(
"r25", Hexagon::R25)
298 .
Case(
"r26", Hexagon::R26)
299 .
Case(
"r27", Hexagon::R27)
300 .
Case(
"r28", Hexagon::R28)
301 .
Case(
"r29", Hexagon::R29)
302 .
Case(
"r30", Hexagon::R30)
303 .
Case(
"r31", Hexagon::R31)
304 .
Case(
"r1:0", Hexagon::D0)
305 .
Case(
"r3:2", Hexagon::D1)
306 .
Case(
"r5:4", Hexagon::D2)
307 .
Case(
"r7:6", Hexagon::D3)
308 .
Case(
"r9:8", Hexagon::D4)
309 .
Case(
"r11:10", Hexagon::D5)
310 .
Case(
"r13:12", Hexagon::D6)
311 .
Case(
"r15:14", Hexagon::D7)
312 .
Case(
"r17:16", Hexagon::D8)
313 .
Case(
"r19:18", Hexagon::D9)
314 .
Case(
"r21:20", Hexagon::D10)
315 .
Case(
"r23:22", Hexagon::D11)
316 .
Case(
"r25:24", Hexagon::D12)
317 .
Case(
"r27:26", Hexagon::D13)
318 .
Case(
"r29:28", Hexagon::D14)
319 .
Case(
"r31:30", Hexagon::D15)
320 .
Case(
"sp", Hexagon::R29)
321 .
Case(
"fp", Hexagon::R30)
322 .
Case(
"lr", Hexagon::R31)
323 .
Case(
"p0", Hexagon::P0)
324 .
Case(
"p1", Hexagon::P1)
325 .
Case(
"p2", Hexagon::P2)
326 .
Case(
"p3", Hexagon::P3)
327 .
Case(
"sa0", Hexagon::SA0)
328 .
Case(
"lc0", Hexagon::LC0)
329 .
Case(
"sa1", Hexagon::SA1)
330 .
Case(
"lc1", Hexagon::LC1)
331 .
Case(
"m0", Hexagon::M0)
332 .
Case(
"m1", Hexagon::M1)
333 .
Case(
"usr", Hexagon::USR)
334 .
Case(
"ugp", Hexagon::UGP)
335 .
Case(
"cs0", Hexagon::CS0)
336 .
Case(
"cs1", Hexagon::CS1)
366 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
368 if (RVLocs[i].getValVT() == MVT::i1) {
378 Register PredR =
MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
390 RVLocs[i].getValVT(), Glue);
416 bool IsStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
430 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.
getContext(),
434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
438 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
443 IsVarArg, IsStructRet, StructAttrFlag, Outs,
452 :
"Argument must be passed on stack. "
453 "Not eligible for Tail Call\n"));
456 unsigned NumBytes = CCInfo.getStackSize();
464 bool NeedsArgAlign =
false;
465 Align LargestAlignSeen;
467 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
473 NeedsArgAlign |= ArgAlign;
499 StackPtr.getValueType());
502 LargestAlignSeen = std::max(
504 if (Flags.isByVal()) {
524 if (NeedsArgAlign && Subtarget.
hasV60Ops()) {
525 LLVM_DEBUG(
dbgs() <<
"Function needs byte stack align due to call args\n");
526 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
527 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
532 if (!MemOpChains.
empty())
546 for (
const auto &R : RegsToPass) {
547 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
562 for (
const auto &R : RegsToPass) {
563 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
578 dyn_cast<ExternalSymbolSDNode>(Callee)) {
590 for (
const auto &R : RegsToPass)
594 assert(Mask &&
"Missing call preserved mask for calling convention");
611 Chain = DAG.
getNode(OpCode, dl, NodeTys, Ops);
621 InVals, OutVals, Callee);
636 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
637 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
638 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
639 VT == MVT::v4i16 || VT == MVT::v8i8 ||
648 if (!isa<ConstantSDNode>(
Offset.getNode()))
652 int32_t V = cast<ConstantSDNode>(
Offset.getNode())->getSExtValue();
668 unsigned LR = HRI.getRARegister();
675 if (
Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
680 unsigned NumVals = Flags.getNumOperandRegisters();
683 switch (Flags.getKind()) {
694 for (; NumVals; --NumVals, ++i) {
695 Register Reg = cast<RegisterSDNode>(
Op.getOperand(i))->getReg();
698 HMFI.setHasClobberLR(
true);
750 unsigned IntNo =
Op.getConstantOperandVal(1);
752 if (IntNo == Intrinsic::hexagon_prefetch) {
770 assert(AlignConst &&
"Non-constant Align in LowerDYNAMIC_STACKALLOC");
776 A = HFI.getStackAlign().value();
779 dbgs () << __func__ <<
" Align: " <<
A <<
" Size: ";
780 Size.getNode()->dump(&DAG);
805 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
810 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
812 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
814 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
823 switch (RC.
getID()) {
824 case Hexagon::IntRegsRegClassID:
825 return Reg - Hexagon::R0 + 1;
826 case Hexagon::DoubleRegsRegClassID:
827 return (Reg - Hexagon::D0 + 1) * 2;
828 case Hexagon::HvxVRRegClassID:
829 return Reg - Hexagon::V0 + 1;
830 case Hexagon::HvxWRRegClassID:
831 return (Reg - Hexagon::W0 + 1) * 2;
838 HFL.FirstVarArgSavedReg = 0;
841 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
844 bool ByVal = Flags.isByVal();
850 if (VA.
isRegLoc() && ByVal && Flags.getByValSize() <= 8)
854 (!ByVal || (ByVal && Flags.getByValSize() > 8));
883 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.
getLocReg());
889 unsigned ObjSize = Flags.isByVal()
890 ? Flags.getByValSize()
898 if (Flags.isByVal()) {
912 for (
int i = HFL.FirstVarArgSavedReg; i < 6; i++)
913 MRI.addLiveIn(Hexagon::R0+i);
917 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
921 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
922 bool RequiresPadding = (NumVarArgRegs & 1);
923 int RegSaveAreaSizePlusPadding = RequiresPadding
924 ? (NumVarArgRegs + 1) * 4
927 if (RegSaveAreaSizePlusPadding > 0) {
930 if (!(RegAreaStart % 8))
931 RegAreaStart = (RegAreaStart + 7) & -8;
933 int RegSaveAreaFrameIndex =
935 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
938 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
940 HMFI.setVarArgsFrameIndex(FI);
946 HMFI.setRegSavedAreaStartFrameIndex(FI);
947 HMFI.setVarArgsFrameIndex(FI);
956 HMFI.setVarArgsFrameIndex(FI);
969 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
989 SDValue SavedRegAreaStartFrameIndex =
990 DAG.
getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
994 if (HFL.FirstVarArgSavedReg & 1)
995 SavedRegAreaStartFrameIndex =
1004 SavedRegAreaStartFrameIndex,
1036 const Value *DestSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
1037 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
1043 false,
false,
nullptr, std::nullopt,
1055 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1072 auto isSExtFree = [
this](
SDValue N) {
1073 switch (
N.getOpcode()) {
1079 EVT OrigTy = cast<VTSDNode>(
Op.getOperand(1))->getVT();
1085 return ThisBW >= OrigBW;
1094 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1096 bool IsNegative =
C &&
C->getAPIntValue().isNegative();
1097 if (IsNegative || isSExtFree(
LHS) || isSExtFree(
RHS))
1109 SDValue Op1 =
Op.getOperand(1), Op2 =
Op.getOperand(2);
1113 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1131 EVT ValTy =
Op.getValueType();
1134 bool isVTi1Type =
false;
1135 if (
auto *CV = dyn_cast<ConstantVector>(CPN->
getConstVal())) {
1136 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1139 unsigned VecLen = CV->getNumOperands();
1141 "conversion only supported for pow2 VectorSize");
1142 for (
unsigned i = 0; i < VecLen; ++i)
1158 else if (isVTi1Type)
1164 assert(cast<ConstantPoolSDNode>(
T)->getTargetFlags() == TF &&
1165 "Inconsistent target flag encountered");
1167 if (IsPositionIndependent)
1174 EVT VT =
Op.getValueType();
1175 int Idx = cast<JumpTableSDNode>(
Op)->getIndex();
1195 EVT VT =
Op.getValueType();
1197 unsigned Depth =
Op.getConstantOperandVal(0);
1217 EVT VT =
Op.getValueType();
1219 unsigned Depth =
Op.getConstantOperandVal(0);
1237 auto *GAN = cast<GlobalAddressSDNode>(
Op);
1239 auto *GV = GAN->getGlobal();
1240 int64_t
Offset = GAN->getOffset();
1248 if (GO && Subtarget.
useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1270 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
1296 unsigned char OperandFlags)
const {
1315 assert(Mask &&
"Missing call preserved mask for calling convention");
1350 if (IsPositionIndependent) {
1422 Hexagon::R0, Flags);
1559 for (
unsigned LegalIntOp :
1599 for (
unsigned IntExpOp :
1608 for (
unsigned FPExpOp :
1644 static const unsigned VectExpOps[] = {
1670 for (
unsigned VectExpOp : VectExpOps)
1684 if (VT.getVectorElementType() != MVT::i32) {
1708 for (
MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1709 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1724 if (NativeVT.getVectorElementType() != MVT::i1) {
1731 for (
MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1742 for (
MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1743 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1749 for (
MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1755 for (
MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1767 for (
MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1820 for (
MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1821 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1849 initializeHVXLowering();
1869 setLibcallName(RTLIB::FPTOUINT_F32_I128,
"__hexagon_fixunssfti");
1870 setLibcallName(RTLIB::FPTOUINT_F64_I128,
"__hexagon_fixunsdfti");
1965HexagonTargetLowering::validateConstPtrAlignment(
SDValue Ptr,
Align NeedAlign,
1967 auto *CA = dyn_cast<ConstantSDNode>(
Ptr);
1970 unsigned Addr = CA->getZExtValue();
1973 if (HaveAlign >= NeedAlign)
1979 DiagnosticInfoMisalignedTrap(
StringRef M)
1985 return DI->
getKind() == DK_MisalignedTrap;
1993 <<
" has alignment " << HaveAlign.
value()
1994 <<
", but the memory access requires " << NeedAlign.
value();
1997 O <<
". The instruction has been replaced with a trap.";
2007 auto *
LS = cast<LSBaseSDNode>(
Op.getNode());
2008 assert(!
LS->isIndexed() &&
"Not expecting indexed ops on constant address");
2020 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
2021 return (
ID == Intrinsic::hexagon_L2_loadrd_pbr ||
2022 ID == Intrinsic::hexagon_L2_loadri_pbr ||
2023 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
2024 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
2025 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
2026 ID == Intrinsic::hexagon_L2_loadrub_pbr);
2035 V = cast<Operator>(V)->getOperand(0);
2037 V = cast<Instruction>(V)->getOperand(0);
2050 if (Blk == Parent) {
2055 BaseVal = BackEdgeVal;
2057 }
while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
2060 if (IntrBaseVal == BackEdgeVal)
2067 assert(
Idx >= 0 &&
"Unexpected index to incoming argument in PHI");
2075 Value *IntrBaseVal = V;
2082 }
while (BaseVal != V);
2085 if (
const PHINode *PN = dyn_cast<PHINode>(V))
2099 unsigned Intrinsic)
const {
2100 switch (Intrinsic) {
2101 case Intrinsic::hexagon_L2_loadrd_pbr:
2102 case Intrinsic::hexagon_L2_loadri_pbr:
2103 case Intrinsic::hexagon_L2_loadrh_pbr:
2104 case Intrinsic::hexagon_L2_loadruh_pbr:
2105 case Intrinsic::hexagon_L2_loadrb_pbr:
2106 case Intrinsic::hexagon_L2_loadrub_pbr: {
2108 auto &
DL =
I.getDataLayout();
2109 auto &Cont =
I.getCalledFunction()->getParent()->getContext();
2113 Type *ElTy =
I.getCalledFunction()->getReturnType()->getStructElementType(0);
2120 Info.align =
DL.getABITypeAlign(
Info.memVT.getTypeForEVT(Cont));
2124 case Intrinsic::hexagon_V6_vgathermw:
2125 case Intrinsic::hexagon_V6_vgathermw_128B:
2126 case Intrinsic::hexagon_V6_vgathermh:
2127 case Intrinsic::hexagon_V6_vgathermh_128B:
2128 case Intrinsic::hexagon_V6_vgathermhw:
2129 case Intrinsic::hexagon_V6_vgathermhw_128B:
2130 case Intrinsic::hexagon_V6_vgathermwq:
2131 case Intrinsic::hexagon_V6_vgathermwq_128B:
2132 case Intrinsic::hexagon_V6_vgathermhq:
2133 case Intrinsic::hexagon_V6_vgathermhq_128B:
2134 case Intrinsic::hexagon_V6_vgathermhwq:
2135 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2136 const Module &M = *
I.getParent()->getParent()->getParent();
2138 Type *VecTy =
I.getArgOperand(1)->getType();
2140 Info.ptrVal =
I.getArgOperand(0);
2143 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2156 return X.getValueType().isScalarInteger();
2176 unsigned DefinedValues)
const {
2181 unsigned Index)
const {
2213 unsigned Action = getPreferredHvxVectorAction(VT);
2219 if (ElemTy == MVT::i1)
2233 unsigned Action = getCustomHvxOperationAction(
Op);
2240std::pair<SDValue, int>
2241HexagonTargetLowering::getBaseAndOffset(
SDValue Addr)
const {
2244 if (
auto *CN = dyn_cast<const ConstantSDNode>(Op1.
getNode()))
2245 return {
Addr.getOperand(0), CN->getSExtValue() };
2255 const auto *SVN = cast<ShuffleVectorSDNode>(
Op);
2257 assert(AM.
size() <= 8 &&
"Unexpected shuffle mask");
2258 unsigned VecLen = AM.
size();
2262 "HVX shuffles should be legal");
2272 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2281 if (AM[
F] >=
int(VecLen)) {
2289 for (
int M : Mask) {
2291 for (
unsigned j = 0; j != ElemBytes; ++j)
2294 for (
unsigned j = 0; j != ElemBytes; ++j)
2307 for (
unsigned i = 0, e = ByteMask.
size(); i != e; ++i) {
2315 if (ByteMask.
size() == 4) {
2317 if (MaskIdx == (0x03020100 | MaskUnd))
2320 if (MaskIdx == (0x00010203 | MaskUnd)) {
2328 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2329 if (MaskIdx == (0x06040200 | MaskUnd))
2330 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2331 if (MaskIdx == (0x07050301 | MaskUnd))
2332 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2335 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2336 if (MaskIdx == (0x02000604 | MaskUnd))
2337 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2338 if (MaskIdx == (0x03010705 | MaskUnd))
2339 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2342 if (ByteMask.
size() == 8) {
2344 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2347 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2354 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2355 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2356 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2357 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2358 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2359 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2360 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2361 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2362 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2363 VectorPair
P = opSplit(Op0, dl, DAG);
2364 return getInstr(Hexagon::S2_packhl, dl, VecTy, {
P.second,
P.first}, DAG);
2368 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2369 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2370 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2371 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2379 switch (
Op.getOpcode()) {
2381 if (
SDValue S = cast<BuildVectorSDNode>(
Op)->getSplatValue())
2385 return Op.getOperand(0);
2395 switch (
Op.getOpcode()) {
2409 if (
SDValue Sp = getSplatValue(
Op.getOperand(1), DAG))
2422 if (
SDValue S = getVectorShiftByInt(
Op, DAG))
2436 MVT ResTy = ty(Res);
2444 auto ShiftPartI8 = [&dl, &DAG,
this](
unsigned Opc,
SDValue V,
SDValue A) {
2454 return ShiftPartI8(Opc, Val, Amt);
2456 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2458 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});
2463 if (isa<ConstantSDNode>(
Op.getOperand(1).getNode()))
2472 MVT InpTy = ty(InpV);
2477 if (InpTy == MVT::i8) {
2478 if (ResTy == MVT::v8i1) {
2481 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2496 bool AllConst =
true;
2498 for (
unsigned i = 0, e = Values.
size(); i != e; ++i) {
2501 Consts[i] = ConstantInt::get(IntTy, 0);
2505 if (
auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2506 const ConstantInt *CI = CN->getConstantIntValue();
2508 }
else if (
auto *CN = dyn_cast<ConstantFPSDNode>(
V.getNode())) {
2509 const ConstantFP *CF = CN->getConstantFPValue();
2511 Consts[i] = ConstantInt::get(IntTy,
A.getZExtValue());
2526 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2530 if (!isUndef(Elem[
First]))
2538 return getZero(dl, VecTy, DAG);
2540 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2545 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2546 Consts[1]->getZExtValue() << 16;
2550 if (ElemTy == MVT::f16) {
2557 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2561 if (ElemTy == MVT::i8) {
2564 int32_t
V = (Consts[0]->getZExtValue() & 0xFF) |
2565 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2566 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2567 Consts[3]->getZExtValue() << 24;
2572 bool IsSplat =
true;
2573 for (
unsigned i =
First+1; i != Num; ++i) {
2574 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2590 for (
unsigned i = 0; i != 4; ++i) {
2600 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2605 dbgs() <<
"VecTy: " << VecTy <<
'\n';
2617 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2621 if (!isUndef(Elem[
First]))
2629 return getZero(dl, VecTy, DAG);
2632 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2633 bool IsSplat =
true;
2634 for (
unsigned i =
First+1; i != Num; ++i) {
2635 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2654 for (
unsigned i = 0; i != Num; ++i)
2655 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() &
Mask);
2664 : buildVector32(Elem.
take_front(Num/2), dl, HalfTy, DAG);
2667 : buildVector32(Elem.
drop_front(Num/2), dl, HalfTy, DAG);
2668 return getCombine(
H, L, dl, VecTy, DAG);
2675 MVT VecTy = ty(VecV);
2679 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2684 assert((VecWidth % ElemWidth) == 0);
2685 assert(VecWidth == 32 || VecWidth == 64);
2688 MVT ScalarTy = tyScalar(VecTy);
2694 if (
auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) {
2695 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2696 if (VecWidth == 64 && ValWidth == 32) {
2697 assert(Off == 0 || Off == 32);
2698 ExtV =
Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2699 }
else if (Off == 0 && (ValWidth % 8) == 0) {
2706 {VecV, WidthV, OffV});
2709 if (ty(IdxV) != MVT::i32)
2714 {VecV, WidthV, OffV});
2724HexagonTargetLowering::extractVectorPred(
SDValue VecV,
SDValue IdxV,
2729 MVT VecTy = ty(VecV);
2733 "Vector elements should equal vector width size");
2734 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2745 if (ValWidth == 1) {
2746 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2756 unsigned Scale = VecWidth / ValWidth;
2760 assert(ty(IdxV) == MVT::i32);
2761 unsigned VecRep = 8 / VecWidth;
2769 T1 = LoHalf(T1, DAG);
2770 T1 = expandPredicate(T1, dl, DAG);
2781 MVT VecTy = ty(VecV);
2783 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2787 assert(VecWidth == 32 || VecWidth == 64);
2788 assert((VecWidth % ValWidth) == 0);
2804 unsigned W =
C->getZExtValue() * ValWidth;
2807 {VecV, ValV, WidthV, OffV});
2809 if (ty(IdxV) != MVT::i32)
2813 {VecV, ValV, WidthV, OffV});
2820HexagonTargetLowering::insertVectorPred(
SDValue VecV,
SDValue ValV,
2823 MVT VecTy = ty(VecV);
2826 if (ValTy == MVT::i1) {
2827 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2833 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {
Ins}, DAG);
2844 for (
unsigned R = Scale;
R > 1;
R /= 2) {
2845 ValR = contractPredicate(ValR, dl, DAG);
2846 ValR = getCombine(DAG.
getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2858HexagonTargetLowering::expandPredicate(
SDValue Vec32,
const SDLoc &dl,
2860 assert(ty(Vec32).getSizeInBits() == 32);
2869HexagonTargetLowering::contractPredicate(
SDValue Vec64,
const SDLoc &dl,
2871 assert(ty(Vec64).getSizeInBits() == 64);
2877 {0, 2, 4, 6, 1, 3, 5, 7});
2878 return extractVector(S, DAG.
getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2902 MVT ValTy = ty(Val);
2907 if (ValLen == ResLen)
2910 const SDLoc &dl(Val);
2912 assert(ResLen % ValLen == 0);
2915 for (
unsigned i = 1, e = ResLen / ValLen; i <
e; ++i)
2924 MVT ElemTy = ty(
Hi);
2953 return buildVector32(Ops, dl, VecTy, DAG);
2955 return buildVector64(Ops, dl, VecTy, DAG);
2957 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2959 bool All0 =
true, All1 =
true;
2961 auto *CN = dyn_cast<ConstantSDNode>(
P.getNode());
2962 if (CN ==
nullptr) {
2963 All0 = All1 =
false;
2979 SDValue Z = getZero(dl, MVT::i32, DAG);
2982 for (
unsigned i = 0; i != 8; ++i) {
2984 Rs[i] = DAG.
getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2987 for (
unsigned i = 0, e =
A.size()/2; i != e; ++i)
2991 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
3004 return getCombine(
Op.getOperand(1),
Op.getOperand(0), dl, VecTy, DAG);
3008 if (ElemTy == MVT::i1) {
3009 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
3010 MVT OpTy = ty(
Op.getOperand(0));
3023 for (
SDValue P :
Op.getNode()->op_values()) {
3025 for (
unsigned R = Scale; R > 1; R /= 2) {
3026 W = contractPredicate(W, dl, DAG);
3027 W = getCombine(DAG.
getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
3035 Words[IdxW ^ 1].
clear();
3037 for (
unsigned i = 0, e = Words[IdxW].
size(); i != e; i += 2) {
3038 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
3041 {W0, W1, WidthV, WidthV});
3049 assert(Scale == 2 && Words[IdxW].
size() == 2);
3051 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3063 return extractVector(Vec,
Op.getOperand(1),
SDLoc(
Op), ElemTy, ty(
Op), DAG);
3069 return extractVector(
Op.getOperand(0),
Op.getOperand(1),
SDLoc(
Op),
3070 ty(
Op), ty(
Op), DAG);
3076 return insertVector(
Op.getOperand(0),
Op.getOperand(1),
Op.getOperand(2),
3084 return insertVector(
Op.getOperand(0), ValV,
Op.getOperand(2),
3109 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3116 LN = cast<LoadSDNode>(
NL.getNode());
3120 if (!validateConstPtrAlignment(LN->
getBasePtr(), ClaimAlign, dl, DAG))
3121 return replaceMemWithUndef(
Op, DAG);
3127 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3146 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3148 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3155 SN = cast<StoreSDNode>(NS.getNode());
3159 if (!validateConstPtrAlignment(SN->
getBasePtr(), ClaimAlign, dl, DAG))
3160 return replaceMemWithUndef(
Op, DAG);
3164 if (ClaimAlign < NeedAlign)
3173 MVT LoadTy = ty(
Op);
3176 if (HaveAlign >= NeedAlign)
3185 bool DoDefault =
false;
3196 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3215 unsigned LoadLen = NeedAlign;
3218 auto BO = getBaseAndOffset(
Base);
3219 unsigned BaseOpc = BO.first.getOpcode();
3223 if (BO.second % LoadLen != 0) {
3225 DAG.
getConstant(BO.second % LoadLen, dl, MVT::i32));
3226 BO.second -= BO.second % LoadLen;
3241 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen,
Align(LoadLen),
3242 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3243 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3260 auto *CY = dyn_cast<ConstantSDNode>(
Y);
3268 unsigned Opc =
Op.getOpcode();
3272 assert(VY != 0 &&
"This should have been folded");
3297 unsigned Opc =
Op.getOpcode();
3304 EVT CarryTy =
C.getValueType();
3306 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3325 unsigned OffsetReg = Hexagon::R28;
3341 unsigned Opc =
Op.getOpcode();
3347 if (isHvxOperation(
Op.getNode(), DAG)) {
3349 if (
SDValue V = LowerHvxOperation(
Op, DAG))
3356 Op.getNode()->dumpr(&DAG);
3358 errs() <<
"Error: check for a non-legal type in this operation\n";
3412 if (isHvxOperation(
N, DAG)) {
3413 LowerHvxOperationWrapper(
N,
Results, DAG);
3419 unsigned Opc =
N->getOpcode();
3443 if (isHvxOperation(
N, DAG)) {
3444 ReplaceHvxNodeResults(
N,
Results, DAG);
3450 switch (
N->getOpcode()) {
3457 if (
N->getValueType(0) == MVT::i8) {
3458 if (
N->getOperand(0).getValueType() == MVT::v8i1) {
3459 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3460 N->getOperand(0), DAG);
3472 if (isHvxOperation(
N, DCI.
DAG)) {
3473 if (
SDValue V = PerformHvxDAGCombine(
N, DCI))
3480 unsigned Opc =
Op.getOpcode();
3486 EVT TruncTy =
Op.getValueType();
3502 switch (
P.getOpcode()) {
3506 return getZero(dl, ty(
Op), DCI.
DAG);
3519 Op.getOperand(2),
Op.getOperand(1));
3527 MVT TruncTy = ty(
Op);
3530 if (ty(Elem0) == TruncTy)
3533 if (ty(Elem0).bitsGT(TruncTy))
3540 if (ty(
Op) != MVT::i64)
3551 auto *Amt = dyn_cast<ConstantSDNode>(Shl.
getOperand(1));
3552 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3553 unsigned A = Amt->getZExtValue();
3575 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3587 if (Constraint.
size() == 1) {
3588 switch (Constraint[0]) {
3603std::pair<unsigned, const TargetRegisterClass*>
3607 if (Constraint.
size() == 1) {
3608 switch (Constraint[0]) {
3612 return {0u,
nullptr};
3618 return {0u, &Hexagon::IntRegsRegClass};
3621 return {0u, &Hexagon::DoubleRegsRegClass};
3626 return {0u,
nullptr};
3627 return {0u, &Hexagon::ModRegsRegClass};
3631 return {0u,
nullptr};
3634 return {0u, &Hexagon::HvxQRRegClass};
3640 return {0u,
nullptr};
3642 return {0u, &Hexagon::HvxVRRegClass};
3645 return {0u, &Hexagon::HvxVRRegClass};
3646 return {0u, &Hexagon::HvxWRRegClass};
3648 return {0u, &Hexagon::HvxWRRegClass};
3652 return {0u,
nullptr};
3663 bool ForCodeSize)
const {
3693 int Scale = AM.
Scale;
3717 return Imm >= -512 && Imm <= 511;
3727 bool IsCalleeStructRet,
3728 bool IsCallerStructRet,
3735 bool CCMatch = CallerCC == CalleeCC;
3743 if (!isa<GlobalAddressSDNode>(Callee) &&
3744 !isa<ExternalSymbolSDNode>(Callee)) {
3764 if (IsCalleeStructRet || IsCallerStructRet)
3787 if (
Op.size() >= 8 &&
Op.isAligned(
Align(8)))
3789 if (
Op.size() >= 4 &&
Op.isAligned(
Align(4)))
3791 if (
Op.size() >= 2 &&
Op.isAligned(
Align(2)))
3801 return allowsHvxMemoryAccess(SVT, Flags,
Fast);
3803 Context,
DL, VT, AddrSpace, Alignment, Flags,
Fast);
3808 unsigned *
Fast)
const {
3811 return allowsHvxMisalignedMemoryAccesses(SVT, Flags,
Fast);
3817std::pair<const TargetRegisterClass*, uint8_t>
3825 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3827 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3829 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3841 auto *L = cast<LoadSDNode>(Load);
3842 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3848 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3856 AdjustHvxInstrPostInstrSelection(
MI, Node);
3865 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic loads supported");
3866 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3867 : Intrinsic::hexagon_L4_loadd_locked;
3886 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic stores supported");
3887 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3888 : Intrinsic::hexagon_S4_stored_locked;
3910 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
unsigned const MachineRegisterInfo * MRI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))
static Value * getBrevLdObject(Value *V)
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::desc("Enable Fast Math processing"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
const GlobalObject * getAliaseeObject() const
bool isValidAutoIncImm(const EVT VT, const int Offset) const
Hexagon target-specific information for each MachineFunction.
int getVarArgsFrameIndex()
void setFirstNamedArgFrameIndex(int v)
void setHasEHReturn(bool H=true)
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const HexagonInstrInfo * getInstrInfo() const override
const HexagonFrameLowering * getFrameLowering() const override
bool useSmallData() const
const HexagonRegisterInfo * getRegisterInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
Align getTypeAlignment(MVT Ty) const
unsigned getVectorLength() const
bool useHVX128BOps() const
bool isEnvironmentMusl() const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Return true if the target supports a memory access of this type for the given address space and align...
SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
LegalizeAction getCustomOperationAction(SDNode &Op) const override
How to legalize this custom operation?
SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame),...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, EVT) const override
Return true if an FMA operation is faster than a pair of mul and add instructions.
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const
HexagonTargetObjectFile * getObjFileLowering() const override
bool isGlobalInSmallSection(const GlobalObject *GO, const TargetMachine &TM) const
Return true if this global value should be placed into small data/bss section.
Common base class shared among various IRBuilders.
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
BasicBlock * GetInsertBlock() const
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
unsigned getNumFixedObjects() const
Return the number of fixed objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ UndefinedBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ MO_PCREL
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
@ MO_GOT
MO_GOT - Indicates a GOT-relative relocation.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ GLOBAL_OFFSET_TABLE
The address of the GOT.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
int getNextAvailablePluginDiagnosticKind()
Get the next available kind ID for a plugin diagnostic.
unsigned M0(unsigned Val)
constexpr unsigned BitWidth
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const