21#include "llvm/IR/IntrinsicsHexagon.h"
32 cl::desc(
"Lower threshold (in bytes) for widening to HVX vectors"));
34static const MVT LegalV64[] = { MVT::v64i8, MVT::v32i16, MVT::v16i32 };
35static const MVT LegalW64[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 };
36static const MVT LegalV128[] = { MVT::v128i8, MVT::v64i16, MVT::v32i32 };
37static const MVT LegalW128[] = { MVT::v256i8, MVT::v128i16, MVT::v64i32 };
42 switch (ElemTy.SimpleTy) {
44 return std::make_tuple(5, 15, 10);
46 return std::make_tuple(8, 127, 23);
48 return std::make_tuple(11, 1023, 52);
56HexagonTargetLowering::initializeHVXLowering() {
57 if (Subtarget.useHVX64BOps()) {
75 }
else if (Subtarget.useHVX128BOps()) {
85 if (Subtarget.useHVXV68Ops() && Subtarget.useHVXFloatingPoint()) {
95 bool Use64b = Subtarget.useHVX64BOps();
98 MVT ByteV = Use64b ? MVT::v64i8 : MVT::v128i8;
99 MVT WordV = Use64b ? MVT::v16i32 : MVT::v32i32;
100 MVT ByteW = Use64b ? MVT::v128i8 : MVT::v256i8;
102 auto setPromoteTo = [
this] (
unsigned Opc, MVT FromTy, MVT ToTy) {
120 if (Subtarget.useHVX128BOps())
122 if (Subtarget.useHVX128BOps() && Subtarget.useHVXV68Ops() &&
123 Subtarget.useHVXFloatingPoint()) {
125 static const MVT FloatV[] = { MVT::v64f16, MVT::v32f32 };
126 static const MVT FloatW[] = { MVT::v128f16, MVT::v64f32 };
128 for (MVT
T : FloatV) {
163 for (MVT
P : FloatW) {
185 if (Subtarget.useHVXQFloatOps()) {
188 }
else if (Subtarget.useHVXIEEEFPOps()) {
194 for (MVT
T : LegalV) {
221 if (
T.getScalarType() != MVT::i32) {
230 if (
T.getScalarType() != MVT::i32) {
258 if (Subtarget.useHVXFloatingPoint()) {
275 for (MVT
T : LegalW) {
330 if (
T.getScalarType() != MVT::i32) {
335 if (Subtarget.useHVXFloatingPoint()) {
378 for (MVT
T : LegalW) {
395 for (MVT
T : LegalV) {
410 for (MVT
T: {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32})
413 for (MVT
T: {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32})
418 unsigned HwLen = Subtarget.getVectorLength();
419 for (MVT ElemTy : Subtarget.getHVXElementTypes()) {
420 if (ElemTy == MVT::i1)
422 int ElemWidth = ElemTy.getFixedSizeInBits();
423 int MaxElems = (8*HwLen) / ElemWidth;
424 for (
int N = 2;
N < MaxElems;
N *= 2) {
435 if (Subtarget.useHVXFloatingPoint()) {
457HexagonTargetLowering::getPreferredHvxVectorAction(
MVT VecTy)
const {
460 unsigned HwLen = Subtarget.getVectorLength();
463 if (ElemTy == MVT::i1 && VecLen > HwLen)
469 if (ElemTy == MVT::i1) {
484 unsigned HwWidth = 8*HwLen;
485 if (VecWidth > 2*HwWidth)
491 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth)
500HexagonTargetLowering::getCustomHvxOperationAction(
SDNode &
Op)
const {
501 unsigned Opc =
Op.getOpcode();
521HexagonTargetLowering::typeJoin(
const TypePair &Tys)
const {
522 assert(Tys.first.getVectorElementType() == Tys.second.getVectorElementType());
526 Tys.second.getVectorNumElements());
529HexagonTargetLowering::TypePair
530HexagonTargetLowering::typeSplit(
MVT VecTy)
const {
533 assert((NumElem % 2) == 0 &&
"Expecting even-sized vector type");
535 return { HalfTy, HalfTy };
539HexagonTargetLowering::typeExtElem(
MVT VecTy,
unsigned Factor)
const {
546HexagonTargetLowering::typeTruncElem(
MVT VecTy,
unsigned Factor)
const {
553HexagonTargetLowering::opCastElem(
SDValue Vec,
MVT ElemTy,
562HexagonTargetLowering::opJoin(
const VectorPair &
Ops,
const SDLoc &dl,
568HexagonTargetLowering::VectorPair
569HexagonTargetLowering::opSplit(
SDValue Vec,
const SDLoc &dl,
571 TypePair Tys = typeSplit(ty(Vec));
574 return DAG.
SplitVector(Vec, dl, Tys.first, Tys.second);
578HexagonTargetLowering::isHvxSingleTy(
MVT Ty)
const {
579 return Subtarget.isHVXVectorType(Ty) &&
584HexagonTargetLowering::isHvxPairTy(
MVT Ty)
const {
585 return Subtarget.isHVXVectorType(Ty) &&
590HexagonTargetLowering::isHvxBoolTy(
MVT Ty)
const {
591 return Subtarget.isHVXVectorType(Ty,
true) &&
595bool HexagonTargetLowering::allowsHvxMemoryAccess(
603 if (!Subtarget.isHVXVectorType(VecTy,
false))
610bool HexagonTargetLowering::allowsHvxMisalignedMemoryAccesses(
612 if (!Subtarget.isHVXVectorType(VecTy))
620void HexagonTargetLowering::AdjustHvxInstrPostInstrSelection(
622 unsigned Opc =
MI.getOpcode();
623 const TargetInstrInfo &
TII = *Subtarget.getInstrInfo();
624 MachineBasicBlock &MB = *
MI.getParent();
628 auto At =
MI.getIterator();
631 case Hexagon::PS_vsplatib:
632 if (Subtarget.useHVXV62Ops()) {
635 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
637 .
add(
MI.getOperand(1));
639 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplatb), OutV)
644 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
645 const MachineOperand &InpOp =
MI.getOperand(1);
647 uint32_t
V = InpOp.
getImm() & 0xFF;
649 .
addImm(V << 24 | V << 16 | V << 8 | V);
655 case Hexagon::PS_vsplatrb:
656 if (Subtarget.useHVXV62Ops()) {
659 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplatb), OutV)
660 .
add(
MI.getOperand(1));
662 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
663 const MachineOperand &InpOp =
MI.getOperand(1);
664 BuildMI(MB, At,
DL,
TII.get(Hexagon::S2_vsplatrb), SplatV)
667 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplatw), OutV)
672 case Hexagon::PS_vsplatih:
673 if (Subtarget.useHVXV62Ops()) {
676 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
678 .
add(
MI.getOperand(1));
680 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplath), OutV)
685 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
686 const MachineOperand &InpOp =
MI.getOperand(1);
688 uint32_t
V = InpOp.
getImm() & 0xFFFF;
696 case Hexagon::PS_vsplatrh:
697 if (Subtarget.useHVXV62Ops()) {
700 BuildMI(MB, At,
DL,
TII.get(Hexagon::V6_lvsplath), OutV)
701 .
add(
MI.getOperand(1));
705 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
706 const MachineOperand &InpOp =
MI.getOperand(1);
707 BuildMI(MB, At,
DL,
TII.get(Hexagon::A2_combine_ll), SplatV)
715 case Hexagon::PS_vsplatiw:
716 case Hexagon::PS_vsplatrw:
717 if (
Opc == Hexagon::PS_vsplatiw) {
719 Register SplatV =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
721 .
add(
MI.getOperand(1));
722 MI.getOperand(1).ChangeToRegister(SplatV,
false);
725 MI.setDesc(
TII.get(Hexagon::V6_lvsplatw));
731HexagonTargetLowering::convertToByteIndex(
SDValue ElemIdx,
MVT ElemTy,
741 const SDLoc &dl(ElemIdx);
747HexagonTargetLowering::getIndexInWord32(
SDValue Idx,
MVT ElemTy,
750 assert(ElemWidth >= 8 && ElemWidth <= 32);
754 if (ty(Idx) != MVT::i32)
756 const SDLoc &dl(Idx);
763HexagonTargetLowering::getByteShuffle(
const SDLoc &dl,
SDValue Op0,
770 if (ElemTy == MVT::i8)
774 MVT ResTy = tyVector(OpTy, MVT::i8);
777 SmallVector<int,128> ByteMask;
780 for (
unsigned I = 0;
I != ElemSize; ++
I)
783 int NewM =
M*ElemSize;
784 for (
unsigned I = 0;
I != ElemSize; ++
I)
790 opCastElem(Op1, MVT::i8, DAG), ByteMask);
797 unsigned VecLen = Values.
size();
801 unsigned HwLen = Subtarget.getVectorLength();
803 unsigned ElemSize = ElemWidth / 8;
804 assert(ElemSize*VecLen == HwLen);
808 !(Subtarget.useHVXFloatingPoint() &&
810 assert((ElemSize == 1 || ElemSize == 2) &&
"Invalid element size");
811 unsigned OpsPerWord = (ElemSize == 1) ? 4 : 2;
813 for (
unsigned i = 0; i != VecLen; i += OpsPerWord) {
814 SDValue W = buildVector32(Values.
slice(i, OpsPerWord), dl, PartVT, DAG);
822 unsigned NumValues = Values.size();
825 for (
unsigned i = 0; i != NumValues; ++i) {
826 if (Values[i].isUndef())
829 if (!SplatV.getNode())
831 else if (SplatV != Values[i])
839 unsigned NumWords = Words.
size();
841 bool IsSplat =
isSplat(Words, SplatV);
842 if (IsSplat && isUndef(SplatV))
847 return getZero(dl, VecTy, DAG);
856 bool AllConst = getBuildVectorConstInts(Values, VecTy, DAG, Consts);
859 (Constant**)Consts.end());
861 Align Alignment(HwLen);
875 auto IsBuildFromExtracts = [
this,&Values] (
SDValue &SrcVec,
876 SmallVectorImpl<int> &SrcIdx) {
880 SrcIdx.push_back(-1);
893 int I =
C->getSExtValue();
894 assert(
I >= 0 &&
"Negative element index");
901 SmallVector<int,128> ExtIdx;
903 if (IsBuildFromExtracts(ExtVec, ExtIdx)) {
904 MVT ExtTy = ty(ExtVec);
906 if (ExtLen == VecLen || ExtLen == 2*VecLen) {
910 SmallVector<int,128>
Mask;
911 BitVector
Used(ExtLen);
913 for (
int M : ExtIdx) {
923 for (
unsigned I = 0;
I != ExtLen; ++
I) {
924 if (
Mask.size() == ExtLen)
932 return ExtLen == VecLen ? S : LoHalf(S, DAG);
940 assert(4*Words.
size() == Subtarget.getVectorLength());
943 for (
unsigned i = 0; i != NumWords; ++i) {
945 if (Words[i].isUndef())
947 for (
unsigned j = i;
j != NumWords; ++
j)
948 if (Words[i] == Words[j])
951 if (VecHist[i] > VecHist[n])
955 SDValue HalfV = getZero(dl, VecTy, DAG);
956 if (VecHist[n] > 1) {
959 {HalfV, SplatV, DAG.
getConstant(HwLen/2, dl, MVT::i32)});
971 for (
unsigned i = 0; i != NumWords/2; ++i) {
973 if (Words[i] != Words[n] || VecHist[n] <= 1) {
980 if (Words[i+NumWords/2] != Words[n] || VecHist[n] <= 1) {
984 {HalfV1, Words[i+NumWords/2]});
1007HexagonTargetLowering::createHvxPrefixPred(
SDValue PredV,
const SDLoc &dl,
1008 unsigned BitBytes,
bool ZeroFill,
SelectionDAG &DAG)
const {
1009 MVT PredTy = ty(PredV);
1010 unsigned HwLen = Subtarget.getVectorLength();
1013 if (Subtarget.isHVXVectorType(PredTy,
true)) {
1023 SmallVector<int,128>
Mask(HwLen);
1028 for (
unsigned i = 0; i != HwLen; ++i) {
1029 unsigned Num = i % Scale;
1030 unsigned Off = i / Scale;
1039 assert(BlockLen < HwLen &&
"vsetq(v1) prerequisite");
1041 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
1048 assert(PredTy == MVT::v2i1 || PredTy == MVT::v4i1 || PredTy == MVT::v8i1);
1060 while (Bytes < BitBytes) {
1062 Words[IdxW].
clear();
1065 for (
const SDValue &W : Words[IdxW ^ 1]) {
1066 SDValue T = expandPredicate(W, dl, DAG);
1071 for (
const SDValue &W : Words[IdxW ^ 1]) {
1079 assert(Bytes == BitBytes);
1081 SDValue Vec = ZeroFill ? getZero(dl, ByteTy, DAG) : DAG.getUNDEF(ByteTy);
1083 for (
const SDValue &W : Words[IdxW]) {
1097 unsigned VecLen = Values.
size();
1098 unsigned HwLen = Subtarget.getVectorLength();
1099 assert(VecLen <= HwLen || VecLen == 8*HwLen);
1101 bool AllT =
true, AllF =
true;
1105 return !
N->isZero();
1114 if (VecLen <= HwLen) {
1118 assert(HwLen % VecLen == 0);
1119 unsigned BitBytes = HwLen / VecLen;
1126 for (
unsigned B = 0;
B != BitBytes; ++
B)
1133 for (
unsigned I = 0;
I != VecLen;
I += 8) {
1136 for (;
B != 8; ++
B) {
1137 if (!Values[
I+
B].isUndef())
1150 assert(Values[
I+
B].isUndef() || Values[
I+
B] ==
F);
1160 SDValue ByteVec = buildHvxVectorReg(Bytes, dl, ByteTy, DAG);
1165HexagonTargetLowering::extractHvxElementReg(
SDValue VecV,
SDValue IdxV,
1170 assert(ElemWidth >= 8 && ElemWidth <= 32);
1173 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG);
1176 if (ElemTy == MVT::i32)
1182 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG);
1185 return extractVector(ExVec, SubIdx, dl, ElemTy, MVT::i32, DAG);
1189HexagonTargetLowering::extractHvxElementPred(
SDValue VecV,
SDValue IdxV,
1192 assert(ResTy == MVT::i1);
1194 unsigned HwLen = Subtarget.getVectorLength();
1198 unsigned Scale = HwLen / ty(VecV).getVectorNumElements();
1202 SDValue ExtB = extractHvxElementReg(ByteVec, IdxV, dl, MVT::i32, DAG);
1204 return getInstr(Hexagon::C2_cmpgtui, dl, MVT::i1, {ExtB,
Zero}, DAG);
1208HexagonTargetLowering::insertHvxElementReg(
SDValue VecV,
SDValue IdxV,
1213 assert(ElemWidth >= 8 && ElemWidth <= 32);
1218 MVT VecTy = ty(VecV);
1219 unsigned HwLen = Subtarget.getVectorLength();
1231 SDValue ByteIdx = convertToByteIndex(IdxV, ElemTy, DAG);
1232 if (ElemTy == MVT::i32)
1233 return InsertWord(VecV, ValV, ByteIdx);
1239 SDValue Ext = extractHvxElementReg(opCastElem(VecV, MVT::i32, DAG), WordIdx,
1244 SDValue SubIdx = getIndexInWord32(IdxV, ElemTy, DAG);
1245 MVT SubVecTy = tyVector(ty(Ext), ElemTy);
1247 ValV, SubIdx, dl, ElemTy, DAG);
1250 return InsertWord(VecV, Ins, ByteIdx);
1254HexagonTargetLowering::insertHvxElementPred(
SDValue VecV,
SDValue IdxV,
1256 unsigned HwLen = Subtarget.getVectorLength();
1260 unsigned Scale = HwLen / ty(VecV).getVectorNumElements();
1265 SDValue InsV = insertHvxElementReg(ByteVec, IdxV, ValV, dl, DAG);
1270HexagonTargetLowering::extractHvxSubvectorReg(
SDValue OrigOp,
SDValue VecV,
1272 MVT VecTy = ty(VecV);
1273 unsigned HwLen = Subtarget.getVectorLength();
1281 if (isHvxPairTy(VecTy)) {
1282 unsigned SubIdx = Hexagon::vsub_lo;
1283 if (Idx * ElemWidth >= 8 * HwLen) {
1284 SubIdx = Hexagon::vsub_hi;
1288 VecTy = typeSplit(VecTy).first;
1298 MVT WordTy = tyVector(VecTy, MVT::i32);
1300 unsigned WordIdx = (Idx*ElemWidth) / 32;
1303 SDValue W0 = extractHvxElementReg(WordVec, W0Idx, dl, MVT::i32, DAG);
1308 SDValue W1 = extractHvxElementReg(WordVec, W1Idx, dl, MVT::i32, DAG);
1309 SDValue WW = getCombine(W1, W0, dl, MVT::i64, DAG);
1314HexagonTargetLowering::extractHvxSubvectorPred(
SDValue VecV,
SDValue IdxV,
1316 MVT VecTy = ty(VecV);
1317 unsigned HwLen = Subtarget.getVectorLength();
1325 unsigned Offset = Idx * BitBytes;
1327 SmallVector<int,128>
Mask;
1329 if (Subtarget.isHVXVectorType(ResTy,
true)) {
1336 for (
unsigned i = 0; i != HwLen/Rep; ++i) {
1337 for (
unsigned j = 0;
j != Rep; ++
j)
1354 unsigned Rep = 8 / ResLen;
1357 for (
unsigned r = 0; r != HwLen/ResLen; ++r) {
1359 for (
unsigned i = 0; i != ResLen; ++i) {
1360 for (
unsigned j = 0;
j != Rep; ++
j)
1372 SDValue Vec64 = getCombine(W1, W0, dl, MVT::v8i8, DAG);
1373 return getInstr(Hexagon::A4_vcmpbgtui, dl, ResTy,
1378HexagonTargetLowering::insertHvxSubvectorReg(
SDValue VecV,
SDValue SubV,
1380 MVT VecTy = ty(VecV);
1381 MVT SubTy = ty(SubV);
1382 unsigned HwLen = Subtarget.getVectorLength();
1386 bool IsPair = isHvxPairTy(VecTy);
1394 V0 = LoHalf(VecV, DAG);
1395 V1 = HiHalf(VecV, DAG);
1400 if (isHvxSingleTy(SubTy)) {
1402 unsigned Idx = CN->getZExtValue();
1404 unsigned SubIdx = (Idx == 0) ? Hexagon::vsub_lo : Hexagon::vsub_hi;
1427 if (!IdxN || !IdxN->isZero()) {
1435 unsigned RolBase = HwLen;
1450 if (RolBase != 4 || !IdxN || !IdxN->isZero()) {
1465HexagonTargetLowering::insertHvxSubvectorPred(
SDValue VecV,
SDValue SubV,
1467 MVT VecTy = ty(VecV);
1468 MVT SubTy = ty(SubV);
1469 assert(Subtarget.isHVXVectorType(VecTy,
true));
1474 unsigned HwLen = Subtarget.getVectorLength();
1475 assert(HwLen % VecLen == 0 &&
"Unexpected vector type");
1478 unsigned BitBytes = HwLen / VecLen;
1479 unsigned BlockLen = HwLen / Scale;
1483 SDValue ByteSub = createHvxPrefixPred(SubV, dl, BitBytes,
false, DAG);
1487 if (!IdxN || !IdxN->isZero()) {
1496 assert(BlockLen < HwLen &&
"vsetq(v1) prerequisite");
1498 SDValue Q = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
1500 ByteVec = getInstr(Hexagon::V6_vmux, dl, ByteTy, {Q, ByteSub, ByteVec}, DAG);
1502 if (!IdxN || !IdxN->isZero()) {
1511HexagonTargetLowering::extendHvxVectorPred(
SDValue VecV,
const SDLoc &dl,
1516 assert(Subtarget.isHVXVectorType(ResTy));
1523 SDValue False = getZero(dl, ResTy, DAG);
1524 return DAG.
getSelect(dl, ResTy, VecV, True, False);
1528HexagonTargetLowering::compressHvxPred(
SDValue VecQ,
const SDLoc &dl,
1536 unsigned HwLen = Subtarget.getVectorLength();
1538 MVT PredTy = ty(VecQ);
1540 assert(HwLen % PredLen == 0);
1547 for (
unsigned i = 0; i != HwLen/8; ++i) {
1548 for (
unsigned j = 0;
j != 8; ++
j)
1549 Tmp.
push_back(ConstantInt::get(Int8Ty, 1ull << j));
1552 Align Alignment(HwLen);
1561 getZero(dl, VecTy, DAG));
1567 SDValue Vrmpy = getInstr(Hexagon::V6_vrmpyub, dl, ByteTy, {Sel, All1}, DAG);
1569 SDValue Rot = getInstr(Hexagon::V6_valignbi, dl, ByteTy,
1576 SmallVector<int,128>
Mask;
1577 for (
unsigned i = 0; i != HwLen; ++i)
1578 Mask.push_back((8*i) % HwLen + i/(HwLen/8));
1588 MVT InpTy = ty(VecV);
1596 return InpWidth < ResWidth
1597 ? DAG.
getNode(ISD::FP_EXTEND, dl, ResTy, VecV)
1604 if (InpWidth < ResWidth) {
1606 return DAG.
getNode(ExtOpc, dl, ResTy, VecV);
1614HexagonTargetLowering::extractSubvector(
SDValue Vec,
MVT SubTy,
unsigned SubIdx,
1618 const SDLoc &dl(Vec);
1627 const SDLoc &dl(
Op);
1632 for (
unsigned i = 0; i !=
Size; ++i)
1633 Ops.push_back(
Op.getOperand(i));
1636 return buildHvxVectorPred(
Ops, dl, VecTy, DAG);
1643 for (
unsigned i = 0; i !=
Size; i++)
1647 tyVector(VecTy, MVT::i16), NewOps);
1648 return DAG.
getBitcast(tyVector(VecTy, MVT::f16), T0);
1654 if (VecTy.
getSizeInBits() == 16 * Subtarget.getVectorLength()) {
1656 MVT SingleTy = typeSplit(VecTy).first;
1657 SDValue V0 = buildHvxVectorReg(
A.take_front(
Size / 2), dl, SingleTy, DAG);
1658 SDValue V1 = buildHvxVectorReg(
A.drop_front(
Size / 2), dl, SingleTy, DAG);
1662 return buildHvxVectorReg(
Ops, dl, VecTy, DAG);
1668 const SDLoc &dl(
Op);
1670 MVT ArgTy = ty(
Op.getOperand(0));
1672 if (ArgTy == MVT::f16) {
1690 const SDLoc &dl(
Op);
1707 MVT NTy = typeLegalize(Ty, DAG);
1711 V.getOperand(0),
V.getOperand(1)),
1716 switch (
V.getOpcode()) {
1724 V =
V.getOperand(0);
1735 unsigned HwLen = Subtarget.getVectorLength();
1742 if (Subtarget.isHVXVectorType(ty(Op0),
true)) {
1750 MVT HalfTy = typeSplit(VecTy).first;
1752 Ops.take_front(NumOp/2));
1754 Ops.take_back(NumOp/2));
1763 for (
SDValue V :
Op.getNode()->op_values()) {
1764 SDValue P = createHvxPrefixPred(V, dl, BitBytes,
true, DAG);
1768 unsigned InpLen = ty(
Op.getOperand(0)).getVectorNumElements();
1771 SDValue Res = getZero(dl, ByteTy, DAG);
1772 for (
unsigned i = 0, e = Prefixes.
size(); i != e; ++i) {
1785 const SDLoc &dl(
Op);
1787 if (ElemTy == MVT::i1)
1788 return extractHvxElementPred(VecV, IdxV, dl, ty(
Op), DAG);
1790 return extractHvxElementReg(VecV, IdxV, dl, ty(
Op), DAG);
1796 const SDLoc &dl(
Op);
1802 if (ElemTy == MVT::i1)
1803 return insertHvxElementPred(VecV, IdxV, ValV, dl, DAG);
1805 if (ElemTy == MVT::f16) {
1807 tyVector(VecTy, MVT::i16),
1808 DAG.
getBitcast(tyVector(VecTy, MVT::i16), VecV),
1810 return DAG.
getBitcast(tyVector(VecTy, MVT::f16), T0);
1813 return insertHvxElementReg(VecV, IdxV, ValV, dl, DAG);
1820 MVT SrcTy = ty(SrcV);
1826 const SDLoc &dl(
Op);
1829 if (ElemTy == MVT::i1)
1830 return extractHvxSubvectorPred(SrcV, IdxV, dl, DstTy, DAG);
1832 return extractHvxSubvectorReg(
Op, SrcV, IdxV, dl, DstTy, DAG);
1843 const SDLoc &dl(
Op);
1844 MVT VecTy = ty(VecV);
1846 if (ElemTy == MVT::i1)
1847 return insertHvxSubvectorPred(VecV, ValV, IdxV, dl, DAG);
1849 return insertHvxSubvectorReg(VecV, ValV, IdxV, dl, DAG);
1861 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy))
1862 return LowerHvxSignExt(
Op, DAG);
1871 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy))
1872 return extendHvxVectorPred(InpV, SDLoc(
Op), ty(
Op),
false, DAG);
1881 if (ElemTy == MVT::i1 && Subtarget.isHVXVectorType(ResTy))
1882 return extendHvxVectorPred(InpV, SDLoc(
Op), ty(
Op),
true, DAG);
1890 const SDLoc &dl(
Op);
1893 assert(ResTy == ty(InpV));
1918 const SDLoc &dl(
Op);
1925 SDVTList ResTys = DAG.
getVTList(ResTy, ResTy);
1926 unsigned Opc =
Op.getOpcode();
1942 const SDLoc &dl(
Op);
1943 unsigned Opc =
Op.getOpcode();
1948 if (
auto HiVal =
Op.getValue(1); HiVal.use_empty()) {
1960 if (Subtarget.useHVXV62Ops())
1961 return emitHvxMulLoHiV62(Vu, SignedVu, Vv, SignedVv, dl, DAG);
1966 if (
auto LoVal =
Op.getValue(0); LoVal.use_empty()) {
1967 SDValue Hi = emitHvxMulHsV60(Vu, Vv, dl, DAG);
1973 return emitHvxMulLoHiV60(Vu, SignedVu, Vv, SignedVv, dl, DAG);
1980 MVT ValTy = ty(Val);
1981 const SDLoc &dl(
Op);
1984 unsigned HwLen = Subtarget.getVectorLength();
1986 SDValue VQ = compressHvxPred(Val, dl, WordTy, DAG);
2001 for (
unsigned i = 0; i !=
BitWidth/32; ++i) {
2003 VQ, DAG.
getConstant(i, dl, MVT::i32), dl, MVT::i32, DAG);
2008 for (
unsigned i = 0, e = Words.
size(); i < e; i += 2) {
2009 SDValue C = getCombine(Words[i+1], Words[i], dl, MVT::i64, DAG);
2022 if (ResTy == MVT::v32i1 &&
2023 (ValTy == MVT::i32 || ValTy == MVT::v2i16 || ValTy == MVT::v4i8) &&
2024 Subtarget.useHVX128BOps()) {
2026 if (ValTy == MVT::v2i16 || ValTy == MVT::v4i8)
2027 Val32 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, Val);
2032 for (
unsigned i = 0; i < 32; ++i)
2043 unsigned HwLen = Subtarget.getVectorLength();
2055 for (
unsigned I = 0;
I != HwLen / 8; ++
I) {
2059 for (
unsigned J = 0; J != 8; ++J) {
2067 SDValue I2V = buildHvxVectorReg(Bytes, dl, ConstantVecTy, DAG);
2091 const SDLoc &dl(
Op);
2092 unsigned HwLen = Subtarget.getVectorLength();
2094 assert(HwLen % VecLen == 0);
2095 unsigned ElemSize = HwLen / VecLen;
2107 if (
SDValue S = getVectorShiftByInt(
Op, DAG))
2113HexagonTargetLowering::LowerHvxFunnelShift(
SDValue Op,
2115 unsigned Opc =
Op.getOpcode();
2127 const SDLoc &dl(
Op);
2133 bool UseShifts = ElemTy != MVT::i8;
2134 if (Subtarget.useHVXV65Ops() && ElemTy == MVT::i32)
2137 if (
SDValue SplatV = getSplatValue(S, DAG); SplatV && UseShifts) {
2145 {DAG.
getConstant(ElemWidth, dl, MVT::i32), ModS});
2161 InpTy, dl, DAG.
getConstant(ElemWidth - 1, dl, ElemTy));
2170 const SDLoc &dl(
Op);
2171 unsigned IntNo =
Op.getConstantOperandVal(0);
2179 case Intrinsic::hexagon_V6_pred_typecast:
2180 case Intrinsic::hexagon_V6_pred_typecast_128B: {
2181 MVT ResTy = ty(
Op), InpTy = ty(
Ops[1]);
2182 if (isHvxBoolTy(ResTy) && isHvxBoolTy(InpTy)) {
2189 case Intrinsic::hexagon_V6_vmpyss_parts:
2190 case Intrinsic::hexagon_V6_vmpyss_parts_128B:
2193 case Intrinsic::hexagon_V6_vmpyuu_parts:
2194 case Intrinsic::hexagon_V6_vmpyuu_parts_128B:
2197 case Intrinsic::hexagon_V6_vmpyus_parts:
2198 case Intrinsic::hexagon_V6_vmpyus_parts_128B: {
2209 const SDLoc &dl(
Op);
2210 unsigned HwLen = Subtarget.getVectorLength();
2214 SDValue Chain = MaskN->getChain();
2218 unsigned Opc =
Op->getOpcode();
2221 if (
Opc == ISD::MLOAD) {
2235 unsigned StoreOpc = Hexagon::V6_vS32b_qpred_ai;
2239 if (MaskN->getAlign().value() % HwLen == 0) {
2248 SDValue Z = getZero(dl, ty(V), DAG);
2252 SDValue LoV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {
V,
Z,
A}, DAG);
2253 SDValue HiV = getInstr(Hexagon::V6_vlalignb, dl, ty(V), {
Z,
V,
A}, DAG);
2254 return std::make_pair(LoV, HiV);
2260 VectorPair Tmp = StoreAlign(MaskV,
Base);
2263 VectorPair ValueU = StoreAlign(
Value,
Base);
2267 getInstr(StoreOpc, dl, MVT::Other,
2268 {MaskU.first,
Base, Offset0, ValueU.first, Chain}, DAG);
2270 getInstr(StoreOpc, dl, MVT::Other,
2271 {MaskU.second,
Base, Offset1, ValueU.second, Chain}, DAG);
2281 assert(Subtarget.useHVXQFloatOps());
2283 assert(
Op->getOpcode() == ISD::FP_EXTEND);
2286 MVT ArgTy = ty(
Op.getOperand(0));
2287 const SDLoc &dl(
Op);
2288 assert(VecTy == MVT::v64f32 && ArgTy == MVT::v64f16);
2297 getInstr(Hexagon::V6_vmpy_qf32_hf, dl, VecTy, {F16Vec, Fp16Ones}, DAG);
2299 MVT HalfTy = typeSplit(VecTy).first;
2300 VectorPair Pair = opSplit(VmpyVec, dl, DAG);
2302 getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.first}, DAG);
2304 getInstr(Hexagon::V6_vconv_sf_qf32, dl, HalfTy, {Pair.second}, DAG);
2307 getInstr(Hexagon::V6_vshuffvdd, dl, VecTy,
2320 MVT FpTy = ty(
Op.getOperand(0)).getVectorElementType();
2323 if (Subtarget.useHVXIEEEFPOps()) {
2325 if (FpTy == MVT::f16) {
2327 assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
2329 if (IntTy == MVT::i8 || IntTy == MVT::i16)
2335 return EqualizeFpIntConversion(
Op, DAG);
2337 return ExpandHvxFpToInt(
Op, DAG);
2353 MVT ResTy = ty(PredOp);
2354 const SDLoc &dl(PredOp);
2357 SDNode *RegConst = DAG.
getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, Const);
2358 SDNode *SplatConst = DAG.
getMachineNode(Hexagon::V6_lvsplatw, dl, MVT::v32i32,
2360 SDNode *PredTransfer =
2363 SDNode *PrefixSum = DAG.
getMachineNode(Hexagon::V6_vprefixqw, dl, MVT::v32i32,
2366 Hexagon::V6_lvsplatw, dl, MVT::v32i32,
2371 SDNode *IndexShift =
2377 SDNode *Convert = DAG.
getMachineNode(Hexagon::V6_vconv_sf_w, dl, ResTy,
2402 MVT ResTy = ty(PredOp);
2403 const SDLoc &dl(PredOp);
2413 SDNode *RegConst = DAG.
getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, Const);
2414 SDNode *SplatConst = DAG.
getMachineNode(Hexagon::V6_lvsplatw, dl, MVT::v32i32,
2419 DAG.
getNode(ISD::BITCAST, dl, MVT::i32, HiReg));
2422 DAG.
getNode(ISD::BITCAST, dl, MVT::i32, LoReg));
2424 SDNode *PredTransfer =
2428 SDNode *PrefixSum = DAG.
getMachineNode(Hexagon::V6_vprefixqw, dl, MVT::v32i32,
2435 SDNode *IndexShift_hi =
2438 SDNode *IndexShift_lo =
2442 SDNode *MaskOff_hi =
2445 SDNode *MaskOff_lo =
2468 if (ResTy == MVT::v32f32 && ty(
Op.getOperand(0)) == MVT::v32i1)
2469 return LowerHvxPred32ToFp(
Op, DAG);
2470 if (ResTy == MVT::v64f16 && ty(
Op.getOperand(0)) == MVT::v64i1)
2471 return LowerHvxPred64ToFp(
Op, DAG);
2474 if (Subtarget.useHVXIEEEFPOps()) {
2476 if (FpTy == MVT::f16) {
2478 assert(IntTy == MVT::i8 || IntTy == MVT::i16 || IntTy == MVT::i32);
2480 if (IntTy == MVT::i8 || IntTy == MVT::i16)
2486 return EqualizeFpIntConversion(
Op, DAG);
2488 return ExpandHvxIntToFp(
Op, DAG);
2491HexagonTargetLowering::TypePair
2492HexagonTargetLowering::typeExtendToWider(
MVT Ty0,
MVT Ty1)
const {
2503 unsigned MaxWidth = std::max(Width0, Width1);
2505 auto getScalarWithWidth = [](MVT ScalarTy,
unsigned Width) {
2512 MVT WideETy0 = getScalarWithWidth(ElemTy0, MaxWidth);
2513 MVT WideETy1 = getScalarWithWidth(ElemTy1, MaxWidth);
2517 return {WideETy0, WideETy1};
2528HexagonTargetLowering::TypePair
2529HexagonTargetLowering::typeWidenToWider(
MVT Ty0,
MVT Ty1)
const {
2539 unsigned MaxLen = std::max(Len0, Len1);
2552HexagonTargetLowering::typeWidenToHvx(
MVT Ty)
const {
2553 unsigned HwWidth = 8 * Subtarget.getVectorLength();
2562HexagonTargetLowering::VectorPair
2593HexagonTargetLowering::VectorPair
2594HexagonTargetLowering::emitHvxShiftRightRnd(
SDValue Val,
unsigned Amt,
2599 const SDLoc &dl(Val);
2600 MVT ValTy = ty(Val);
2614 MVT IntTy = tyVector(ValTy, ElemTy);
2626 auto [Tmp0, Ovf] = emitHvxAddWithOverflow(Inp, LowBits, dl,
Signed, DAG);
2645 MVT
PairTy = typeJoin({VecTy, VecTy});
2671 SDValue T0 = getInstr(Hexagon::V6_vmpyewuh, dl, VecTy, {
B,
A}, DAG);
2673 SDValue T1 = getInstr(Hexagon::V6_vasrw, dl, VecTy, {
A,
S16}, DAG);
2681 SDValue P1 = getInstr(Hexagon::V6_vadduhw, dl,
PairTy, {T0, T2}, DAG);
2683 SDValue P2 = getInstr(Hexagon::V6_vaddhw, dl,
PairTy, {T0, T2}, DAG);
2686 SDValue T3 = getInstr(Hexagon::V6_vasrw_acc, dl, VecTy,
2687 {HiHalf(P2, DAG), LoHalf(P1, DAG),
S16}, DAG);
2688 SDValue T4 = getInstr(Hexagon::V6_vasrw, dl, VecTy, {
B,
S16}, DAG);
2699HexagonTargetLowering::emitHvxMulLoHiV60(
SDValue A,
bool SignedA,
SDValue B,
2700 bool SignedB,
const SDLoc &dl,
2703 MVT
PairTy = typeJoin({VecTy, VecTy});
2708 if (SignedA && !SignedB) {
2724 SDValue T0 = getInstr(Hexagon::V6_lvsplatw, dl, VecTy,
2725 {DAG.
getConstant(0x02020202, dl, MVT::i32)}, DAG);
2726 SDValue T1 = getInstr(Hexagon::V6_vdelta, dl, VecTy, {
B, T0}, DAG);
2735 {HiHalf(P1, DAG), LoHalf(P1, DAG)}, DAG);
2738 getInstr(Hexagon::V6_vlsrw, dl, VecTy, {LoHalf(P0, DAG),
S16}, DAG);
2742 SDValue T4 = getInstr(Hexagon::V6_vasrw_acc, dl, VecTy,
2743 {HiHalf(P2, DAG), T3,
S16}, DAG);
2746 Lo = getInstr(Hexagon::V6_vaslw_acc, dl, VecTy,
2747 {LoHalf(P0, DAG), LoHalf(P2, DAG),
S16}, DAG);
2751 assert(SignedB &&
"Signed A and unsigned B should have been inverted");
2758 SDValue X1 = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q1, X0,
A}, DAG);
2759 Hi = getInstr(Hexagon::V6_vsubw, dl, VecTy, {
Hi, X1}, DAG);
2760 }
else if (SignedB) {
2766 Hi = getInstr(Hexagon::V6_vsubwq, dl, VecTy, {Q1,
Hi,
A}, DAG);
2768 assert(!SignedA && !SignedB);
2775HexagonTargetLowering::emitHvxMulLoHiV62(
SDValue A,
bool SignedA,
2780 MVT
PairTy = typeJoin({VecTy, VecTy});
2783 if (SignedA && !SignedB) {
2792 getInstr(Hexagon::V6_vmpyowh_64_acc, dl,
PairTy, {
P0,
A,
B}, DAG);
2797 assert(!SignedA &&
"Signed A and unsigned B should have been inverted");
2809 SDValue T0 = getInstr(Hexagon::V6_vandvqv, dl, VecTy, {Q0,
B}, DAG);
2810 SDValue T1 = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q1, T0,
A}, DAG);
2811 Hi = getInstr(Hexagon::V6_vaddw, dl, VecTy, {
Hi,
T1}, DAG);
2812 }
else if (!SignedA) {
2822 Hi = getInstr(Hexagon::V6_vaddwq, dl, VecTy, {Q0,
Hi,
B}, DAG);
2840 unsigned Opc =
Op.getOpcode();
2845 MVT InpTy = ty(Inp);
2851 const SDLoc &dl(
Op);
2854 auto [WInpTy, WResTy] = typeExtendToWider(InpTy, ResTy);
2863 unsigned Opc =
Op.getOpcode();
2866 const SDLoc &dl(
Op);
2868 MVT InpTy = ty(Op0);
2948 unsigned ElemWidth = 1 + ExpWidth + FracWidth;
2949 assert((1ull << (ExpWidth - 1)) == (1 + ExpBias));
2992 unsigned Opc =
Op.getOpcode();
2995 const SDLoc &dl(
Op);
2997 MVT InpTy = ty(Op0);
3030 unsigned ElemWidth = 1 + ExpWidth + FracWidth;
3040 auto [Frac, Ovf] = emitHvxShiftRightRnd(Frac0, ExpWidth + 1,
false, DAG);
3063 unsigned Opc =
Op.getOpcode();
3080 const SDLoc &dl(
Op);
3081 return DAG.
getNode(TLOpc, dl, ty(
Op),
Op.getOperand(0),
3090 unsigned Opc =
Op.getConstantOperandVal(2);
3094HexagonTargetLowering::VectorPair
3098 const SDLoc &dl(
Op);
3100 auto SplitVTNode = [&DAG,
this](
const VTSDNode *
N) {
3101 MVT Ty = typeSplit(
N->getVT().getSimpleVT()).first;
3103 return std::make_pair(TV, TV);
3108 ty(
A).isVector() ? opSplit(
A, dl, DAG) : std::make_pair(
A,
A);
3110 switch (
Op.getOpcode()) {
3111 case ISD::SIGN_EXTEND_INREG:
3112 case HexagonISD::SSAT:
3113 case HexagonISD::USAT:
3114 if (const auto *N = dyn_cast<const VTSDNode>(A.getNode()))
3115 std::tie(Lo, Hi) = SplitVTNode(N);
3123 MVT HalfTy = typeSplit(ResTy).first;
3133 MVT MemTy = MemN->getMemoryVT().getSimpleVT();
3134 if (!isHvxPairTy(MemTy))
3137 const SDLoc &dl(
Op);
3138 unsigned HwLen = Subtarget.getVectorLength();
3139 MVT SingleTy = typeSplit(MemTy).first;
3140 SDValue Chain = MemN->getChain();
3141 SDValue Base0 = MemN->getBasePtr();
3146 MachineMemOperand *MOp0 =
nullptr, *MOp1 =
nullptr;
3147 if (MachineMemOperand *MMO = MemN->getMemOperand()) {
3149 uint64_t MemSize = (MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE)
3156 if (MemOpc == ISD::LOAD) {
3165 if (MemOpc == ISD::STORE) {
3173 assert(MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE);
3176 assert(MaskN->isUnindexed());
3177 VectorPair Masks = opSplit(MaskN->getMask(), dl, DAG);
3180 if (MemOpc == ISD::MLOAD) {
3196 if (MemOpc == ISD::MSTORE) {
3199 Masks.first, SingleTy, MOp0,
3202 Masks.second, SingleTy, MOp1,
3207 std::string
Name =
"Unexpected operation: " +
Op->getOperationName(&DAG);
3213 const SDLoc &dl(
Op);
3215 assert(LoadN->isUnindexed() &&
"Not widening indexed loads yet");
3216 assert(LoadN->getMemoryVT().getVectorElementType() != MVT::i1 &&
3217 "Not widening loads of i1 yet");
3219 SDValue Chain = LoadN->getChain();
3224 unsigned HwLen = Subtarget.getVectorLength();
3226 assert(ResLen < HwLen &&
"vsetq(v1) prerequisite");
3229 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
3237 DAG.
getUNDEF(LoadTy), LoadTy, MemOp,
3245 const SDLoc &dl(
Op);
3247 assert(StoreN->isUnindexed() &&
"Not widening indexed stores yet");
3248 assert(StoreN->getMemoryVT().getVectorElementType() != MVT::i1 &&
3249 "Not widening stores of i1 yet");
3251 SDValue Chain = StoreN->getChain();
3255 SDValue Value = opCastElem(StoreN->getValue(), MVT::i8, DAG);
3256 MVT ValueTy = ty(
Value);
3258 unsigned HwLen = Subtarget.getVectorLength();
3261 for (
unsigned Len = ValueLen;
Len < HwLen; ) {
3263 Len = ty(
Value).getVectorNumElements();
3265 assert(ty(
Value).getVectorNumElements() == HwLen);
3267 assert(ValueLen < HwLen &&
"vsetq(v1) prerequisite");
3269 SDValue Mask = getInstr(Hexagon::V6_pred_scalar2, dl, BoolTy,
3272 auto *MemOp = MF.getMachineMemOperand(StoreN->getMemOperand(), 0, HwLen);
3279 const SDLoc &dl(
Op);
3280 SDValue Op0 =
Op.getOperand(0), Op1 =
Op.getOperand(1);
3282 unsigned HwLen = Subtarget.getVectorLength();
3287 if (!Subtarget.isHVXVectorType(WideOpTy,
true))
3290 SDValue WideOp0 = appendUndef(Op0, WideOpTy, DAG);
3291 SDValue WideOp1 = appendUndef(Op1, WideOpTy, DAG);
3295 {WideOp0, WideOp1,
Op.getOperand(2)});
3297 EVT RetTy = typeLegalize(ty(
Op), DAG);
3299 {SetCC, getZero(dl, MVT::i32, DAG)});
3304 unsigned Opc =
Op.getOpcode();
3305 bool IsPairOp = isHvxPairTy(ty(
Op)) ||
3307 return isHvxPairTy(ty(V));
3318 return SplitHvxMemOp(
Op, DAG);
3323 if (ty(
Op).getSizeInBits() == ty(
Op.getOperand(0)).getSizeInBits())
3324 return opJoin(SplitVectorOp(
Op, DAG), SDLoc(
Op), DAG);
3334 case ISD::FMINIMUMNUM:
3335 case ISD::FMAXIMUMNUM:
3354 return opJoin(SplitVectorOp(
Op, DAG), SDLoc(
Op), DAG);
3359 if (ty(
Op.getOperand(0)).getVectorElementType() == MVT::i1)
3360 return opJoin(SplitVectorOp(
Op, DAG), SDLoc(
Op), DAG);
3375 case ISD::BITCAST:
return LowerHvxBitcast(
Op, DAG);
3383 case ISD::SRL:
return LowerHvxShift(
Op, DAG);
3385 case ISD::FSHR:
return LowerHvxFunnelShift(
Op, DAG);
3395 case ISD::MSTORE:
return LowerHvxMaskedOp(
Op, DAG);
3397 case ISD::LOAD:
return SDValue();
3398 case ISD::FP_EXTEND:
return LowerHvxFpExtend(
Op, DAG);
3424 unsigned Opc =
Op.getOpcode();
3444 MVT InpTy = ty(Inp);
3449 assert(InpWidth != ResWidth);
3451 if (InpWidth == 2 * ResWidth || ResWidth == 2 * InpWidth)
3454 const SDLoc &dl(
Op);
3458 auto repeatOp = [&](
unsigned NewWidth,
SDValue Arg) {
3466 return DAG.
getNode(
Opc, dl, Ty, {Arg,
Op.getOperand(1),
Op.getOperand(2)});
3473 if (InpWidth < ResWidth) {
3475 while (InpWidth * 2 <= ResWidth)
3476 S = repeatOp(InpWidth *= 2, S);
3480 while (InpWidth / 2 >= ResWidth)
3481 S = repeatOp(InpWidth /= 2, S);
3489 MVT InpTy = ty(Inp0);
3493 unsigned Opc =
Op.getOpcode();
3495 if (shouldWidenToHvx(InpTy, DAG) || shouldWidenToHvx(ResTy, DAG)) {
3500 auto [WInpTy, WResTy] =
3501 InpWidth < ResWidth ? typeWidenToWider(typeWidenToHvx(InpTy), ResTy)
3502 : typeWidenToWider(InpTy, typeWidenToHvx(ResTy));
3503 SDValue W = appendUndef(Inp0, WInpTy, DAG);
3511 SDValue T = ExpandHvxResizeIntoSteps(S, DAG);
3512 return extractSubvector(
T, typeLegalize(ResTy, DAG), 0, DAG);
3513 }
else if (shouldSplitToHvx(InpWidth < ResWidth ? ResTy : InpTy, DAG)) {
3514 return opJoin(SplitVectorOp(
Op, DAG), SDLoc(
Op), DAG);
3517 return RemoveTLWrapper(
Op, DAG);
3523HexagonTargetLowering::LowerHvxOperationWrapper(
SDNode *
N,
3525 unsigned Opc =
N->getOpcode();
3528 if (
N->getNumOperands() > 0)
3529 Inp0 =
Op.getOperand(0);
3536 if (Subtarget.isHVXElementType(ty(
Op)) &&
3537 Subtarget.isHVXElementType(ty(Inp0))) {
3538 Results.push_back(CreateTLWrapper(
Op, DAG));
3542 if (shouldWidenToHvx(ty(Inp0), DAG)) {
3555 if (isHvxPairTy(ty(
Op))) {
3563 if (isHvxPairTy(ty(
Op->getOperand(1)))) {
3572 if (ty(
Op).getSizeInBits() != ty(Inp0).getSizeInBits()) {
3573 SDValue T = EqualizeFpIntConversion(
Op, DAG);
3581 Results.push_back(LegalizeHvxResize(
Op, DAG));
3589HexagonTargetLowering::ReplaceHvxNodeResults(
SDNode *
N,
3591 unsigned Opc =
N->getOpcode();
3594 if (
N->getNumOperands() > 0)
3595 Inp0 =
Op.getOperand(0);
3602 if (Subtarget.isHVXElementType(ty(
Op)) &&
3603 Subtarget.isHVXElementType(ty(Inp0))) {
3604 Results.push_back(CreateTLWrapper(
Op, DAG));
3608 if (shouldWidenToHvx(ty(
Op), DAG)) {
3614 if (shouldWidenToHvx(ty(
Op), DAG)) {
3623 if (isHvxBoolTy(ty(Inp0))) {
3630 if (ty(
Op).getSizeInBits() != ty(Inp0).getSizeInBits()) {
3631 SDValue T = EqualizeFpIntConversion(
Op, DAG);
3639 Results.push_back(LegalizeHvxResize(
Op, DAG));
3647HexagonTargetLowering::combineTruncateBeforeLegal(
SDValue Op,
3648 DAGCombinerInfo &DCI)
const {
3653 SelectionDAG &DAG = DCI.DAG;
3654 const SDLoc &dl(
Op);
3656 if (
Op.getOperand(0).getOpcode() == ISD::BITCAST)
3661 EVT TruncTy =
Op.getValueType();
3663 EVT SrcTy = Src.getValueType();
3670 if (2 * CastLen != SrcLen)
3673 SmallVector<int, 128>
Mask(SrcLen);
3674 for (
int i = 0; i !=
static_cast<int>(CastLen); ++i) {
3676 Mask[i + CastLen] = 2 * i + 1;
3680 return opSplit(Deal, dl, DAG).first;
3684HexagonTargetLowering::combineConcatVectorsBeforeLegal(
3685 SDValue Op, DAGCombinerInfo &DCI)
const {
3693 SelectionDAG &DAG = DCI.DAG;
3694 const SDLoc &dl(
Op);
3703 SetVector<SDValue> Order;
3709 if (Order.
size() > 2)
3718 SmallVector<int, 128> LongMask;
3719 auto AppendToMask = [&](
SDValue Shuffle) {
3721 ArrayRef<int>
Mask = SV->getMask();
3724 for (
int M : Mask) {
3729 SDValue Src =
static_cast<unsigned>(
M) < InpLen ?
X :
Y;
3730 if (
static_cast<unsigned>(M) >= InpLen)
3733 int OutOffset = Order[0] == Src ? 0 : InpLen;
3750HexagonTargetLowering::PerformHvxDAGCombine(
SDNode *
N, DAGCombinerInfo &DCI)
3753 SelectionDAG &DAG = DCI.DAG;
3755 unsigned Opc =
Op.getOpcode();
3760 return combineTruncateBeforeLegal(
Op, DCI);
3762 return combineConcatVectorsBeforeLegal(
Op, DCI);
3764 if (DCI.isBeforeLegalizeOps())
3790 return getZero(dl, ty(
Op), DAG);
3793 if (isUndef(
Ops[1]))
3811HexagonTargetLowering::shouldSplitToHvx(
MVT Ty,
SelectionDAG &DAG)
const {
3812 if (Subtarget.isHVXVectorType(Ty,
true))
3814 auto Action = getPreferredHvxVectorAction(Ty);
3816 return Subtarget.isHVXVectorType(typeLegalize(Ty, DAG),
true);
3821HexagonTargetLowering::shouldWidenToHvx(
MVT Ty,
SelectionDAG &DAG)
const {
3822 if (Subtarget.isHVXVectorType(Ty,
true))
3824 auto Action = getPreferredHvxVectorAction(Ty);
3826 return Subtarget.isHVXVectorType(typeLegalize(Ty, DAG),
true);
3832 if (!Subtarget.useHVXOps())
3836 auto IsHvxTy = [
this](EVT Ty) {
3837 return Ty.isSimple() && Subtarget.isHVXVectorType(Ty.getSimpleVT(),
true);
3840 return Op.getValueType().isSimple() &&
3841 Subtarget.isHVXVectorType(ty(
Op),
true);
3847 auto IsWidenedToHvx = [
this, &DAG](
SDValue Op) {
3848 if (!
Op.getValueType().isSimple())
3851 return ValTy.
isVector() && shouldWidenToHvx(ValTy, DAG);
3854 for (
int i = 0, e =
N->getNumValues(); i != e; ++i) {
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const HexagonInstrInfo * TII
static std::tuple< unsigned, unsigned, unsigned > getIEEEProperties(MVT Ty)
static const MVT LegalV128[]
static const MVT LegalW128[]
static const MVT LegalW64[]
static const MVT LegalV64[]
static cl::opt< unsigned > HvxWidenThreshold("hexagon-hvx-widen", cl::Hidden, cl::init(16), cl::desc("Lower threshold (in bytes) for widening to HVX vectors"))
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
const SmallVectorImpl< MachineOperand > & Cond
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
uint64_t getNumOperands() const
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &, LLVMContext &C, EVT VT) const override
Return the ValueType of the result of SETCC operations.
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
static MVT getFloatingPointVT(unsigned BitWidth)
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
MVT changeTypeToInteger()
Return the type converted to an equivalently sized integer or vector with integer element type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
unsigned getSubReg() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
size_type size() const
Determine the number of elements in the SetVector.
const value_type & front() const
Return the first element of the SetVector.
const value_type & back() const
Return the last element of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
SmallVectorImpl< T >::const_pointer c_str(SmallVectorImpl< T > &str)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.