28 #define DEBUG_TYPE "aarch64-isel"
51 const char *getPassName()
const override {
52 return "AArch64 Instruction Selection";
67 bool SelectInlineAsmMemoryOperand(
const SDValue &Op,
68 unsigned ConstraintID,
69 std::vector<SDValue> &OutOps)
override;
72 SDNode *SelectMULLV64LaneV128(
unsigned IntNo,
SDNode *N);
77 return SelectShiftedRegister(N,
false, Reg, Shift);
80 return SelectShiftedRegister(N,
true, Reg, Shift);
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
116 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
122 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
136 const unsigned SubRegs[]);
138 SDNode *SelectTable(
SDNode *N,
unsigned NumVecs,
unsigned Opc,
bool isExt);
142 SDNode *SelectLoad(
SDNode *N,
unsigned NumVecs,
unsigned Opc,
144 SDNode *SelectPostLoad(
SDNode *N,
unsigned NumVecs,
unsigned Opc,
146 SDNode *SelectLoadLane(
SDNode *N,
unsigned NumVecs,
unsigned Opc);
147 SDNode *SelectPostLoadLane(
SDNode *N,
unsigned NumVecs,
unsigned Opc);
149 SDNode *SelectStore(
SDNode *N,
unsigned NumVecs,
unsigned Opc);
150 SDNode *SelectPostStore(
SDNode *N,
unsigned NumVecs,
unsigned Opc);
151 SDNode *SelectStoreLane(
SDNode *N,
unsigned NumVecs,
unsigned Opc);
152 SDNode *SelectPostStoreLane(
SDNode *N,
unsigned NumVecs,
unsigned Opc);
163 #include "AArch64GenDAGISel.inc"
166 bool SelectShiftedRegister(
SDValue N,
bool AllowROR,
SDValue &Reg,
168 bool SelectAddrModeIndexed(
SDValue N,
unsigned Size,
SDValue &Base,
170 bool SelectAddrModeUnscaled(
SDValue N,
unsigned Size,
SDValue &Base,
178 bool isWorthFolding(
SDValue V)
const;
179 bool SelectExtendedSHL(
SDValue N,
unsigned Size,
bool WantExtend,
182 template<
unsigned RegW
idth>
184 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
187 bool SelectCVTFixedPosOperand(
SDValue N,
SDValue &FixedPos,
unsigned Width);
194 if (
const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
195 Imm = C->getZExtValue();
216 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
217 const SDValue &Op,
unsigned ConstraintID, std::vector<SDValue> &OutOps) {
218 switch(ConstraintID) {
227 OutOps.push_back(Op);
243 if (!isa<ConstantSDNode>(N.
getNode()))
246 uint64_t Immed = cast<ConstantSDNode>(N.
getNode())->getZExtValue();
249 if (Immed >> 12 == 0) {
251 }
else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
259 Val = CurDAG->getTargetConstant(Immed, dl,
MVT::i32);
260 Shift = CurDAG->getTargetConstant(ShVal, dl,
MVT::i32);
266 bool AArch64DAGToDAGISel::SelectNegArithImmed(
SDValue N,
SDValue &Val,
273 if (!isa<ConstantSDNode>(N.
getNode()))
277 uint64_t Immed = cast<ConstantSDNode>(N.
getNode())->getZExtValue();
286 Immed = ~((uint32_t)Immed) + 1;
288 Immed = ~Immed + 1ULL;
289 if (Immed & 0xFFFFFFFFFF000000ULL)
292 Immed &= 0xFFFFFFULL;
293 return SelectArithImmed(CurDAG->getConstant(Immed,
SDLoc(N),
MVT::i32), Val,
315 bool AArch64DAGToDAGISel::isWorthFolding(
SDValue V)
const {
328 bool AArch64DAGToDAGISel::SelectShiftedRegister(
SDValue N,
bool AllowROR,
338 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
342 Shift = CurDAG->getTargetConstant(ShVal,
SDLoc(N),
MVT::i32);
343 return isWorthFolding(N);
357 SrcVT = cast<VTSDNode>(N.
getOperand(1))->getVT();
361 if (!IsLoadStore && SrcVT ==
MVT::i8)
363 else if (!IsLoadStore && SrcVT ==
MVT::i16)
367 assert(SrcVT !=
MVT::i64 &&
"extend from 64-bits?");
373 if (!IsLoadStore && SrcVT ==
MVT::i8)
375 else if (!IsLoadStore && SrcVT ==
MVT::i16)
379 assert(SrcVT !=
MVT::i64 &&
"extend from 64-bits?");
419 LaneIdx = DLidx->
getSExtValue() + EVidx->getSExtValue();
428 SDValue &LaneOp,
int &LaneIdx) {
442 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(
SDNode *N) {
462 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
464 unsigned MLAOpc = ~0U;
470 MLAOpc = AArch64::MLAv4i16_indexed;
473 MLAOpc = AArch64::MLAv8i16_indexed;
476 MLAOpc = AArch64::MLAv2i32_indexed;
479 MLAOpc = AArch64::MLAv4i32_indexed;
483 return CurDAG->getMachineNode(MLAOpc, dl, N->
getValueType(0), Ops);
486 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(
unsigned IntNo,
SDNode *N) {
498 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
500 unsigned SMULLOpc = ~0U;
502 if (IntNo == Intrinsic::aarch64_neon_smull) {
507 SMULLOpc = AArch64::SMULLv4i16_indexed;
510 SMULLOpc = AArch64::SMULLv2i32_indexed;
513 }
else if (IntNo == Intrinsic::aarch64_neon_umull) {
518 SMULLOpc = AArch64::UMULLv4i16_indexed;
521 SMULLOpc = AArch64::UMULLv2i32_indexed;
527 return CurDAG->getMachineNode(SMULLOpc, dl, N->
getValueType(0), Ops);
548 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(
SDValue N,
SDValue &Reg,
550 unsigned ShiftVal = 0;
583 return isWorthFolding(N);
610 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(
SDValue N,
unsigned Size,
616 int FI = cast<FrameIndexSDNode>(
N)->getIndex();
617 Base = CurDAG->getTargetFrameIndex(FI, TLI->
getPointerTy(DL));
618 OffImm = CurDAG->getTargetConstant(0, dl,
MVT::i64);
633 if (Alignment == 0 && Ty->
isSized())
636 if (Alignment >= Size)
640 if (CurDAG->isBaseWithConstantOffset(N)) {
642 int64_t RHSC = (int64_t)RHS->getZExtValue();
643 unsigned Scale =
Log2_32(Size);
644 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
647 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
648 Base = CurDAG->getTargetFrameIndex(FI, TLI->
getPointerTy(DL));
650 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl,
MVT::i64);
658 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
666 OffImm = CurDAG->getTargetConstant(0, dl,
MVT::i64);
675 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(
SDValue N,
unsigned Size,
678 if (!CurDAG->isBaseWithConstantOffset(N))
681 int64_t RHSC = RHS->getSExtValue();
683 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
684 RHSC < (0x1000 <<
Log2_32(Size)))
686 if (RHSC >= -256 && RHSC < 256) {
689 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
691 Base = CurDAG->getTargetFrameIndex(
694 OffImm = CurDAG->getTargetConstant(RHSC,
SDLoc(N),
MVT::i64);
713 bool AArch64DAGToDAGISel::SelectExtendedSHL(
SDValue N,
unsigned Size,
714 bool WantExtend,
SDValue &Offset,
718 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
733 SignExtend = CurDAG->getTargetConstant(0, dl,
MVT::i32);
736 unsigned LegalShiftVal =
Log2_32(Size);
737 unsigned ShiftVal = CSD->getZExtValue();
739 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
742 if (isWorthFolding(N))
748 bool AArch64DAGToDAGISel::SelectAddrModeWRO(
SDValue N,
unsigned Size,
760 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
768 if (!isa<MemSDNode>(*UI))
773 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
777 SelectExtendedSHL(RHS, Size,
true, Offset, SignExtend)) {
779 DoShift = CurDAG->getTargetConstant(
true, dl,
MVT::i32);
785 SelectExtendedSHL(LHS, Size,
true, Offset, SignExtend)) {
787 DoShift = CurDAG->getTargetConstant(
true, dl,
MVT::i32);
792 DoShift = CurDAG->getTargetConstant(
false, dl,
MVT::i32);
796 if (IsExtendedRegisterWorthFolding &&
803 if (isWorthFolding(LHS))
808 if (IsExtendedRegisterWorthFolding &&
815 if (isWorthFolding(RHS))
827 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
830 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
832 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
833 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
837 bool AArch64DAGToDAGISel::SelectAddrModeXRO(
SDValue N,
unsigned Size,
852 if (!isa<MemSDNode>(*UI))
867 if (isa<ConstantSDNode>(RHS)) {
868 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
869 unsigned Scale =
Log2_32(Size);
873 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
879 CurDAG->getMachineNode(AArch64::MOVi64imm, DL,
MVT::i64, Ops);
886 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
890 SelectExtendedSHL(RHS, Size,
false, Offset, SignExtend)) {
892 DoShift = CurDAG->getTargetConstant(
true, DL,
MVT::i32);
898 SelectExtendedSHL(LHS, Size,
false, Offset, SignExtend)) {
900 DoShift = CurDAG->getTargetConstant(
true, DL,
MVT::i32);
907 SignExtend = CurDAG->getTargetConstant(
false, DL,
MVT::i32);
908 DoShift = CurDAG->getTargetConstant(
false, DL,
MVT::i32);
914 static const unsigned RegClassIDs[] = {
915 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
916 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
917 AArch64::dsub2, AArch64::dsub3};
919 return createTuple(Regs, RegClassIDs, SubRegs);
923 static const unsigned RegClassIDs[] = {
924 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
925 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
926 AArch64::qsub2, AArch64::qsub3};
928 return createTuple(Regs, RegClassIDs, SubRegs);
932 const unsigned RegClassIDs[],
933 const unsigned SubRegs[]) {
936 if (Regs.
size() == 1)
939 assert(Regs.
size() >= 2 && Regs.
size() <= 4);
947 CurDAG->getTargetConstant(RegClassIDs[Regs.
size() - 2], DL,
MVT::i32));
950 for (
unsigned i = 0; i < Regs.
size(); ++i) {
960 SDNode *AArch64DAGToDAGISel::SelectTable(
SDNode *N,
unsigned NumVecs,
961 unsigned Opc,
bool isExt) {
965 unsigned ExtOff = isExt;
968 unsigned Vec0Off = ExtOff + 1;
971 SDValue RegSeq = createQTuple(Regs);
978 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
981 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(
SDNode *N,
bool &Done) {
996 bool InsertTo64 =
false;
998 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1001 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1003 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1005 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1014 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1016 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1018 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1027 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1029 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1031 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1038 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1040 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1042 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1051 SDValue Ops[] = { Base, Offset, Chain };
1052 SDNode *Res = CurDAG->getMachineNode(Opcode, dl,
MVT::i64, DstVT,
1058 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl,
MVT::i32);
1060 SDValue(CurDAG->getMachineNode(
1062 CurDAG->getTargetConstant(0, dl,
MVT::i64), LoadedVal,
1067 ReplaceUses(
SDValue(N, 0), LoadedVal);
1074 SDNode *AArch64DAGToDAGISel::SelectLoad(
SDNode *N,
unsigned NumVecs,
1075 unsigned Opc,
unsigned SubRegIdx) {
1085 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1087 for (
unsigned i = 0; i < NumVecs; ++i)
1089 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1095 SDNode *AArch64DAGToDAGISel::SelectPostLoad(
SDNode *N,
unsigned NumVecs,
1096 unsigned Opc,
unsigned SubRegIdx) {
1108 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1116 ReplaceUses(
SDValue(N, 0), SuperReg);
1118 for (
unsigned i = 0; i < NumVecs; ++i)
1120 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1127 SDNode *AArch64DAGToDAGISel::SelectStore(
SDNode *N,
unsigned NumVecs,
1135 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1143 SDNode *AArch64DAGToDAGISel::SelectPostStore(
SDNode *N,
unsigned NumVecs,
1153 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1159 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1182 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1199 SDNode *AArch64DAGToDAGISel::SelectLoadLane(
SDNode *N,
unsigned NumVecs,
1212 SDValue RegSeq = createQTuple(Regs);
1217 cast<ConstantSDNode>(N->
getOperand(NumVecs + 2))->getZExtValue();
1219 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl,
MVT::i64),
1221 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1224 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1225 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1227 for (
unsigned i = 0; i < NumVecs; ++i) {
1228 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1231 ReplaceUses(
SDValue(N, i), NV);
1239 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(
SDNode *N,
unsigned NumVecs,
1252 SDValue RegSeq = createQTuple(Regs);
1258 cast<ConstantSDNode>(N->
getOperand(NumVecs + 1))->getZExtValue();
1261 CurDAG->getTargetConstant(LaneNo, dl,
1266 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1277 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1278 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1280 for (
unsigned i = 0; i < NumVecs; ++i) {
1281 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1285 ReplaceUses(
SDValue(N, i), NV);
1295 SDNode *AArch64DAGToDAGISel::SelectStoreLane(
SDNode *N,
unsigned NumVecs,
1308 SDValue RegSeq = createQTuple(Regs);
1311 cast<ConstantSDNode>(N->
getOperand(NumVecs + 2))->getZExtValue();
1313 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl,
MVT::i64),
1319 MemOp[0] = cast<MemIntrinsicSDNode>(
N)->getMemOperand();
1320 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1325 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(
SDNode *N,
unsigned NumVecs,
1338 SDValue RegSeq = createQTuple(Regs);
1344 cast<ConstantSDNode>(N->
getOperand(NumVecs + 1))->getZExtValue();
1346 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl,
MVT::i64),
1350 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1354 MemOp[0] = cast<MemIntrinsicSDNode>(
N)->getMemOperand();
1355 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1362 unsigned &LSB,
unsigned &MSB,
1363 unsigned NumberOfIgnoredLowBits,
1364 bool BiggerPattern) {
1366 "N must be a AND operation to call this function");
1374 "Type checking must have been done before calling this function");
1384 uint64_t And_imm = 0;
1392 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1395 if (And_imm & (And_imm + 1))
1398 bool ClampMSB =
false;
1399 uint64_t Srl_imm = 0;
1418 }
else if (BiggerPattern) {
1430 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1432 <<
": Found large shift immediate, this should not happen\n"));
1437 MSB = Srl_imm + (VT ==
MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1438 : countTrailingOnes<uint64_t>(And_imm)) -
1445 MSB = MSB > 31 ? 31 : MSB;
1447 Opc = VT ==
MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1471 uint64_t And_mask = 0;
1477 uint64_t Srl_imm = 0;
1483 if (BitWide &&
isMask_64(And_mask >> Srl_imm)) {
1485 Opc = AArch64::UBFMWri;
1487 Opc = AArch64::UBFMXri;
1490 MSB = BitWide + Srl_imm - 1;
1498 unsigned &Immr,
unsigned &Imms,
1499 bool BiggerPattern) {
1501 "N must be a SHR/SRA operation to call this function");
1509 "Type checking must have been done before calling this function");
1516 uint64_t Shl_imm = 0;
1517 uint64_t Trunc_bits = 0;
1529 assert(VT ==
MVT::i64 &&
"the promoted type should be i64");
1530 }
else if (BiggerPattern) {
1542 <<
": Found large shift immediate, this should not happen\n"));
1546 uint64_t Srl_imm = 0;
1551 "bad amount in shift node!");
1552 int immr = Srl_imm - Shl_imm;
1564 SDValue &Opd0,
unsigned &Immr,
unsigned &Imms,
1565 unsigned NumberOfIgnoredLowBits = 0,
1566 bool BiggerPattern =
false) {
1577 NumberOfIgnoredLowBits, BiggerPattern);
1587 case AArch64::SBFMWri:
1588 case AArch64::UBFMWri:
1589 case AArch64::SBFMXri:
1590 case AArch64::UBFMXri:
1601 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(
SDNode *N) {
1602 unsigned Opc, Immr, Imms;
1612 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT ==
MVT::i32) {
1613 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl,
MVT::i64),
1614 CurDAG->getTargetConstant(Imms, dl,
MVT::i64)};
1617 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl,
MVT::i32);
1624 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1625 CurDAG->getTargetConstant(Imms, dl, VT)};
1626 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1634 unsigned NumberOfIgnoredHighBits,
EVT VT) {
1636 "i32 or i64 mask type expected!");
1637 unsigned BitWidth = VT.
getSizeInBits() - NumberOfIgnoredHighBits;
1639 APInt SignificantDstMask =
APInt(BitWidth, DstMask);
1640 APInt SignificantBitsToBeInserted = BitsToBeInserted.
zextOrTrunc(BitWidth);
1642 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1643 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1668 UsefulBits &=
APInt(UsefulBits.getBitWidth(), Imm);
1673 uint64_t Imm, uint64_t MSB,
1676 APInt OpUsefulBits(UsefulBits);
1680 OpUsefulBits = OpUsefulBits.
shl(MSB - Imm + 1);
1685 OpUsefulBits = OpUsefulBits.
shl(Imm);
1687 OpUsefulBits = OpUsefulBits.
shl(MSB + 1);
1690 OpUsefulBits = OpUsefulBits.
shl(OpUsefulBits.
getBitWidth() - Imm);
1696 UsefulBits &= OpUsefulBits;
1711 uint64_t ShiftTypeAndValue =
1713 APInt Mask(UsefulBits);
1720 Mask = Mask.
shl(ShiftAmt);
1722 Mask = Mask.
lshr(ShiftAmt);
1728 Mask = Mask.
lshr(ShiftAmt);
1730 Mask = Mask.
shl(ShiftAmt);
1747 APInt OpUsefulBits(UsefulBits);
1751 OpUsefulBits = OpUsefulBits.
shl(MSB - Imm + 1);
1753 UsefulBits &= ~OpUsefulBits;
1756 OpUsefulBits = OpUsefulBits.
shl(MSB + 1);
1758 UsefulBits = ~(OpUsefulBits.
shl(OpUsefulBits.
getBitWidth() - Imm));
1764 SDValue Orig,
unsigned Depth) {
1774 case AArch64::ANDSWri:
1775 case AArch64::ANDSXri:
1776 case AArch64::ANDWri:
1777 case AArch64::ANDXri:
1781 case AArch64::UBFMWri:
1782 case AArch64::UBFMXri:
1785 case AArch64::ORRWrs:
1786 case AArch64::ORRXrs:
1791 case AArch64::BFMWri:
1792 case AArch64::BFMXri:
1804 UsefulBits =
APInt(Bitwidth, 0);
1813 UsersUsefulBits |= UsefulBitsForUse;
1818 UsefulBits &= UsersUsefulBits;
1831 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1834 if (ShlAmount > 0) {
1837 UBFMOpc, dl, VT, Op,
1842 assert(ShlAmount < 0 &&
"expected right shift");
1843 int ShrAmount = -ShlAmount;
1855 SDValue &Src,
int &ShiftAmount,
1860 assert(BitWidth == 32 || BitWidth == 64);
1862 APInt KnownZero, KnownOne;
1867 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1873 assert((~
APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1913 Opc = AArch64::BFMWri;
1915 Opc = AArch64::BFMXri;
1932 for (
int i = 0; i < 2;
1937 NumberOfIgnoredLowBits,
true)) {
1940 if ((BFXOpc != AArch64::UBFMXri && VT ==
MVT::i64) ||
1941 (BFXOpc != AArch64::UBFMWri && VT ==
MVT::i32))
1946 Width = ImmS - ImmR + 1;
1957 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1970 APInt KnownZero, KnownOne;
1975 APInt BitsToBeInserted =
1978 if ((BitsToBeInserted & ~KnownZero) != 0)
1999 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(
SDNode *N) {
2014 CurDAG->getTargetConstant(LSB, dl, VT),
2015 CurDAG->getTargetConstant(MSB, dl, VT) };
2016 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2023 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2033 unsigned FRINTXOpc = FRINTXOpcs[Variant];
2039 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2040 Opc = FRINTPOpcs[Variant];
2044 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2045 Opc = FRINTMOpcs[Variant];
2049 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2050 Opc = FRINTZOpcs[Variant];
2054 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2055 Opc = FRINTAOpcs[Variant];
2065 if (!
TM.Options.UnsafeFPMath) {
2066 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT,
MVT::Glue, In);
2070 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2074 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(
SDValue N,
SDValue &FixedPos,
2075 unsigned RegWidth) {
2078 FVal = CN->getValueAPF();
2079 else if (
LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2082 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2087 FVal = cast<ConstantFP>(CN->
getConstVal())->getValueAPF();
2106 if (!IsExact || !
IntVal.isPowerOf2())
return false;
2107 unsigned FBits =
IntVal.logBase2();
2111 if (FBits == 0 || FBits > RegWidth)
return false;
2113 FixedPos = CurDAG->getTargetConstant(FBits,
SDLoc(N),
MVT::i32);
2122 RegString.
split(Fields,
":");
2124 if (Fields.
size() == 1)
2127 assert(Fields.
size() == 5
2128 &&
"Invalid number of fields in read register string");
2131 bool AllIntFields =
true;
2135 AllIntFields &= !
Field.getAsInteger(10, IntField);
2136 Ops.push_back(IntField);
2139 assert(AllIntFields &&
2140 "Unexpected non-integer value in special register string.");
2144 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2145 (Ops[3] << 3) | (Ops[4]);
2152 SDNode *AArch64DAGToDAGISel::SelectReadRegister(
SDNode *N) {
2161 CurDAG->getTargetConstant(Reg, DL,
MVT::i32),
2167 bool IsValidSpecialReg;
2168 Reg = mapper.
fromString(RegString->getString(),
2169 Subtarget->getFeatureBits(),
2171 if (IsValidSpecialReg)
2174 CurDAG->getTargetConstant(Reg, DL,
MVT::i32),
2184 SDNode *AArch64DAGToDAGISel::SelectWriteRegister(
SDNode *N) {
2191 return CurDAG->getMachineNode(AArch64::MSR, DL,
MVT::Other,
2192 CurDAG->getTargetConstant(Reg, DL,
MVT::i32),
2201 bool IsValidSpecialReg;
2202 Reg = PMapper.
fromString(RegString->getString(),
2203 Subtarget->getFeatureBits(),
2205 if (IsValidSpecialReg) {
2206 assert (isa<ConstantSDNode>(N->
getOperand(2))
2207 &&
"Expected a constant integer expression.");
2208 uint64_t Immed = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
2209 return CurDAG->getMachineNode(AArch64::MSRpstate, DL,
MVT::Other,
2210 CurDAG->getTargetConstant(Reg, DL,
MVT::i32),
2211 CurDAG->getTargetConstant(Immed, DL,
MVT::i16),
2219 Reg = Mapper.
fromString(RegString->getString(),
2220 Subtarget->getFeatureBits(),
2223 if (IsValidSpecialReg)
2224 return CurDAG->getMachineNode(AArch64::MSR, DL,
MVT::Other,
2225 CurDAG->getTargetConstant(Reg, DL,
MVT::i32),
2245 SDNode *ResNode =
nullptr;
2253 if (
SDNode *Res = SelectReadRegister(Node))
2258 if (
SDNode *Res = SelectWriteRegister(Node))
2263 if (
SDNode *
I = SelectMLAV64LaneV128(Node))
2271 SDNode *
I = SelectIndexedLoad(Node, Done);
2280 if (
SDNode *I = SelectBitfieldExtractOp(Node))
2285 if (
SDNode *I = SelectBitfieldInsertOp(Node))
2313 SubReg = AArch64::dsub;
2316 SubReg = AArch64::ssub;
2319 SubReg = AArch64::hsub;
2324 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg,
SDLoc(Node), VT,
2326 DEBUG(
dbgs() <<
"ISEL: Custom selection!\n=> ");
2337 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
SDLoc(Node),
2340 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
SDLoc(Node),
2348 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2351 SDValue TFI = CurDAG->getTargetFrameIndex(
2355 CurDAG->getTargetConstant(Shifter, DL,
MVT::i32) };
2356 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri,
MVT::i64, Ops);
2359 unsigned IntNo = cast<ConstantSDNode>(Node->
getOperand(1))->getZExtValue();
2363 case Intrinsic::aarch64_ldaxp:
2364 case Intrinsic::aarch64_ldxp: {
2366 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2376 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2377 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2380 case Intrinsic::aarch64_stlxp:
2381 case Intrinsic::aarch64_stxp: {
2383 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2391 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2396 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2397 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2401 case Intrinsic::aarch64_neon_ld1x2:
2403 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2405 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2407 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2409 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2411 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2413 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2415 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2417 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2419 case Intrinsic::aarch64_neon_ld1x3:
2421 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2423 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2425 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2427 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2429 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2431 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2433 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2435 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2437 case Intrinsic::aarch64_neon_ld1x4:
2439 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2441 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2443 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2445 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2447 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2449 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2451 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2453 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2455 case Intrinsic::aarch64_neon_ld2:
2457 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2459 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2461 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2463 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2465 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2467 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2469 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2471 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2473 case Intrinsic::aarch64_neon_ld3:
2475 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2477 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2479 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2481 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2483 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2485 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2487 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2489 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2491 case Intrinsic::aarch64_neon_ld4:
2493 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2495 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2497 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2499 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2501 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2503 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2505 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2507 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2509 case Intrinsic::aarch64_neon_ld2r:
2511 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2513 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2515 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2517 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2519 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2521 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2523 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2525 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2527 case Intrinsic::aarch64_neon_ld3r:
2529 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2531 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2533 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2535 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2537 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2539 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2541 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2543 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2545 case Intrinsic::aarch64_neon_ld4r:
2547 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2549 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2551 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2553 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2555 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2557 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2559 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2561 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2563 case Intrinsic::aarch64_neon_ld2lane:
2565 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2568 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2571 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2574 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2576 case Intrinsic::aarch64_neon_ld3lane:
2578 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2581 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2584 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2587 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2589 case Intrinsic::aarch64_neon_ld4lane:
2591 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2594 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2597 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2600 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2605 unsigned IntNo = cast<ConstantSDNode>(Node->
getOperand(0))->getZExtValue();
2609 case Intrinsic::aarch64_neon_tbl2:
2610 return SelectTable(Node, 2, VT ==
MVT::v8i8 ? AArch64::TBLv8i8Two
2611 : AArch64::TBLv16i8Two,
2613 case Intrinsic::aarch64_neon_tbl3:
2614 return SelectTable(Node, 3, VT ==
MVT::v8i8 ? AArch64::TBLv8i8Three
2615 : AArch64::TBLv16i8Three,
2617 case Intrinsic::aarch64_neon_tbl4:
2618 return SelectTable(Node, 4, VT ==
MVT::v8i8 ? AArch64::TBLv8i8Four
2619 : AArch64::TBLv16i8Four,
2621 case Intrinsic::aarch64_neon_tbx2:
2622 return SelectTable(Node, 2, VT ==
MVT::v8i8 ? AArch64::TBXv8i8Two
2623 : AArch64::TBXv16i8Two,
2625 case Intrinsic::aarch64_neon_tbx3:
2626 return SelectTable(Node, 3, VT ==
MVT::v8i8 ? AArch64::TBXv8i8Three
2627 : AArch64::TBXv16i8Three,
2629 case Intrinsic::aarch64_neon_tbx4:
2630 return SelectTable(Node, 4, VT ==
MVT::v8i8 ? AArch64::TBXv8i8Four
2631 : AArch64::TBXv16i8Four,
2633 case Intrinsic::aarch64_neon_smull:
2634 case Intrinsic::aarch64_neon_umull:
2635 if (
SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2642 unsigned IntNo = cast<ConstantSDNode>(Node->
getOperand(1))->getZExtValue();
2648 case Intrinsic::aarch64_neon_st1x2: {
2650 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2652 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2654 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2656 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2658 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2660 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2662 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2664 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2667 case Intrinsic::aarch64_neon_st1x3: {
2669 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2671 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2673 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2675 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2677 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2679 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2681 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2683 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2686 case Intrinsic::aarch64_neon_st1x4: {
2688 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2690 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2692 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2694 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2696 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2698 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2700 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2702 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2705 case Intrinsic::aarch64_neon_st2: {
2707 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2709 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2711 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2713 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2715 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2717 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2719 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2721 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2724 case Intrinsic::aarch64_neon_st3: {
2726 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2728 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2730 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2732 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2734 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2736 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2738 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2740 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2743 case Intrinsic::aarch64_neon_st4: {
2745 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2747 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2749 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2751 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2753 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2755 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2757 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2759 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2762 case Intrinsic::aarch64_neon_st2lane: {
2764 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2767 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2770 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2773 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2776 case Intrinsic::aarch64_neon_st3lane: {
2778 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2781 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2784 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2787 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2790 case Intrinsic::aarch64_neon_st4lane: {
2792 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2795 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2798 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2801 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2808 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2810 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2812 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2814 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2816 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2818 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2820 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2822 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2827 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2829 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2831 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2833 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2835 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2837 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2839 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2841 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2846 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2848 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2850 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2852 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2854 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2856 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2858 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2860 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2865 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2867 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2869 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2871 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2873 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2875 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2877 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2879 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2884 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2886 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2888 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2890 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2892 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2894 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2896 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2898 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2903 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2905 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
2907 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
2909 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2911 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2913 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2915 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2917 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2922 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2924 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
2926 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
2928 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2930 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2932 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2934 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2936 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2941 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2943 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
2945 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
2947 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2949 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2951 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2953 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2955 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2960 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2962 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
2964 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
2966 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2968 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2970 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2972 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2974 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2979 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2981 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
2983 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
2985 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2987 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2989 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2991 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2993 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2998 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
3001 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3004 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3007 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3012 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
3015 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3018 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3021 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3026 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
3029 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3032 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3035 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3040 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
3043 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3046 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3049 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3055 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3057 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
3059 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
3061 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3063 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3065 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3067 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3069 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3075 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3077 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
3079 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
3081 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3083 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3085 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3087 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3089 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3095 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3097 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
3099 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
3101 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3103 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3105 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3107 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3109 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3115 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3117 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
3119 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
3121 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3123 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3125 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3127 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3129 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3135 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3137 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3139 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3141 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3143 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3145 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3147 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3149 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3155 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3157 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3159 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3161 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3163 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3165 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3167 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3169 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3175 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3178 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3181 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3184 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3190 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3193 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3196 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3199 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3205 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3208 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3211 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3214 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3222 if (
SDNode *I = SelectLIBM(Node))
3228 ResNode = SelectCode(Node);
3231 if (ResNode ==
nullptr || ResNode == Node)
3244 return new AArch64DAGToDAGISel(TM, OptLevel);
void clearAllBits()
Set every bit to 0.
static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc, SDValue &Opd0, unsigned &Immr, unsigned &Imms, unsigned NumberOfIgnoredLowBits=0, bool BiggerPattern=false)
void push_back(const T &Elt)
A parsed version of the target data layout string in and methods for querying it. ...
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void flipAllBits()
Toggle every bit to its opposite value.
void dump() const
Dump this node, for debugging.
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted, unsigned NumberOfIgnoredHighBits, EVT VT)
Does DstMask form a complementary pair with the mask provided by BitsToBeInserted, suitable for use in a BFI instruction.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG)
NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 regi...
static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits, SDValue Orig, unsigned Depth)
static bool isWorthFoldingADDlow(SDValue N)
If there's a use of this ADDlow that's not itself a load/store then we'll need to create a real ADD i...
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
const GlobalValue * getGlobal() const
static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0, unsigned &LSB, unsigned &MSB)
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc, uint64_t &Imm)
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
void setNodeId(int Id)
Set unique node id.
const SDValue & getBasePtr() const
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
void dumpr() const
Dump (recursively) this node and its use-def subgraph.
MachineMemOperand - A description of a memory reference used in the backend.
static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp, SDValue &LaneOp, int &LaneIdx)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
static bool isPreferredADD(int64_t ImmOff)
Shift and rotation operations.
std::size_t countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
A Use represents the edge between a Value definition and its users.
APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(unsigned shiftAmt) const
Logical right-shift function.
Reg
All possible values of the reg field in the ModR/M byte.
bool isSized(SmallPtrSetImpl< const Type * > *Visited=nullptr) const
isSized - Return true if it makes sense to take the size of this type.
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth=0)
static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc, SDValue &Opd0, unsigned &LSB, unsigned &MSB, unsigned NumberOfIgnoredLowBits, bool BiggerPattern)
Simple integer binary arithmetic operators.
bool isMask_64(uint64_t Value)
isMask_64 - This function returns true if the argument is a non-empty sequence of ones starting at th...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(unsigned shiftAmt) const
Left-shift function.
EVT getMemoryVT() const
Return the type of the in-memory value.
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Type * getElementType() const
size_t size() const
size - Get the array size.
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG)
WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 regis...
static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, unsigned Depth)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N)
getShiftTypeForNode - Translate a shift node to the corresponding ShiftType value.
SDNode * getNode() const
get the SDNode which holds the desired result
IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
MVT - Machine Value Type.
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, unsigned Depth)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
const Constant * getConstVal() const
static uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize)
decodeLogicalImmediate - Decode a logical immediate value in the form "N:immr:imms" (where the immr a...
INSERT_SUBREG - This instruction takes three operands: a register that has subregisters, a register providing an insert value, and a subregister index.
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT, bool isOpaque=false)
unsigned getBitWidth() const
Return the number of bits in the APInt.
unsigned getOpcode() const
FunctionPass class - This class is used to implement most global optimizations.
static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N)
Instructions that accept extend modifiers like UXTW expect the register being extended to be a GPR32...
EVT - Extended Value Type.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc, or post-dec.
static SDValue Widen(SelectionDAG *CurDAG, SDValue N)
bool isShiftedMask_64(uint64_t Value)
isShiftedMask_64 - This function returns true if the argument contains a non-empty sequence of ones w...
REG_SEQUENCE - This variadic instruction is used to form a register that represents a consecutive seq...
const SDValue & getOffset() const
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
const MDOperand & getOperand(unsigned I) const
EXTRACT_SUBREG - This instruction takes two operands: a register that has subregisters, and a subregister index.
static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, uint64_t Imm, uint64_t MSB, unsigned Depth)
static bool isIntImmediate(const SDNode *N, uint64_t &Imm)
isIntImmediate - This method tests to see if the node is a constant operand.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount)
Create a machine node performing a notional SHL of Op by ShlAmount.
static AArch64_AM::ShiftExtendType getExtendTypeForNode(SDValue N, bool IsLoadStore=false)
getExtendTypeForNode - Translate an extend node to the corresponding ExtendType value.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Class for arbitrary precision integers.
iterator_range< use_iterator > uses()
int64_t getSExtValue() const
op_iterator op_begin() const
ZERO_EXTEND - Used for integer types, zeroing the new bits.
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
ANY_EXTEND - Used for integer types. The high bits are undefined.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
FunctionPass * createAArch64ISelDag(AArch64TargetMachine &TM, CodeGenOpt::Level OptLevel)
createAArch64ISelDag - This pass converts a legalized DAG into a AArch64-specific DAG...
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
uint32_t fromString(StringRef Name, const FeatureBitset &FeatureBits, bool &Valid) const
PointerType * getType() const
Global values are always pointers.
static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx)
Bitwise operators - logical and, logical or, logical xor.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static AArch64_AM::ShiftExtendType getShiftType(unsigned Imm)
getShiftType - Extract the shift type.
static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0, unsigned &Immr, unsigned &Imms, bool BiggerPattern)
static int getIntOperandFromRegisterString(StringRef RegString)
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
MachineSDNode * getMachineNode(unsigned Opcode, SDLoc dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
uint32_t fromString(StringRef Name, const FeatureBitset &FeatureBits, bool &Valid) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits, unsigned Depth)
const MDNode * getMD() const
unsigned getAlignment() const
static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits, unsigned Depth)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
SDValue getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
StringRef - Represent a constant reference to a string, i.e.
SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that the first operand is an imme...
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
TRUNCATE - Completely drop the high bits.
static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst, SDValue &Src, unsigned &ImmR, unsigned &ImmS, SelectionDAG *CurDAG)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op, SDValue &Src, int &ShiftAmount, int &MaskWidth)
Does this tree qualify as an attempt to move a bitfield into position, essentially "(and (shl VAL...
std::size_t countLeadingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the most significant bit to the first zero bit.
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
This class is used to represent ISD::LOAD nodes.
Function must be optimized for size first.