Go to the documentation of this file.
34 #define GEN_CHECK_COMPRESS_INSTR
35 #include "RISCVGenCompressInstEmitter.inc"
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRINFO_NAMED_OPS
39 #include "RISCVGenInstrInfo.inc"
43 cl::desc(
"Prefer whole register move for vector registers."));
46 namespace RISCVVPseudosTable {
48 using namespace RISCV;
50 #define GET_RISCVVPseudosTable_IMPL
51 #include "RISCVGenSearchableTables.inc"
61 if (
STI.getFeatureBits()[RISCV::FeatureStdExtC])
71 switch (
MI.getOpcode()) {
87 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
88 MI.getOperand(2).getImm() == 0) {
90 return MI.getOperand(0).getReg();
98 switch (
MI.getOpcode()) {
111 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
112 MI.getOperand(2).getImm() == 0) {
114 return MI.getOperand(0).getReg();
122 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
133 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
134 "Unexpected COPY instruction.");
138 bool FoundDef =
false;
139 bool FirstVSetVLI =
false;
140 unsigned FirstSEW = 0;
143 if (
MBBI->isMetaInstruction())
146 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
147 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
148 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
158 unsigned FirstVType =
MBBI->getOperand(2).getImm();
163 if (FirstLMul != LMul)
168 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
170 if (
MBBI->getOperand(1).isImm())
172 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
178 unsigned VType =
MBBI->getOperand(2).getImm();
196 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
198 }
else if (
MBBI->getNumDefs()) {
201 if (
MBBI->modifiesRegister(RISCV::VL))
207 if (!MO.isReg() || !MO.isDef())
224 if (MO.getReg() != SrcReg)
260 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
268 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
269 RISCV::GPRRegClass.
contains(DstReg)) {
279 bool IsScalableVector =
true;
282 unsigned SubRegIdx = RISCV::sub_vrm1_0;
283 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
284 Opc = RISCV::FSGNJ_H;
285 IsScalableVector =
false;
286 }
else if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
287 Opc = RISCV::FSGNJ_S;
288 IsScalableVector =
false;
289 }
else if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
290 Opc = RISCV::FSGNJ_D;
291 IsScalableVector =
false;
292 }
else if (RISCV::VRRegClass.
contains(DstReg, SrcReg)) {
293 Opc = RISCV::PseudoVMV1R_V;
295 }
else if (RISCV::VRM2RegClass.
contains(DstReg, SrcReg)) {
296 Opc = RISCV::PseudoVMV2R_V;
298 }
else if (RISCV::VRM4RegClass.
contains(DstReg, SrcReg)) {
299 Opc = RISCV::PseudoVMV4R_V;
301 }
else if (RISCV::VRM8RegClass.
contains(DstReg, SrcReg)) {
302 Opc = RISCV::PseudoVMV8R_V;
304 }
else if (RISCV::VRN2M1RegClass.
contains(DstReg, SrcReg)) {
305 Opc = RISCV::PseudoVMV1R_V;
306 SubRegIdx = RISCV::sub_vrm1_0;
309 }
else if (RISCV::VRN2M2RegClass.
contains(DstReg, SrcReg)) {
310 Opc = RISCV::PseudoVMV2R_V;
311 SubRegIdx = RISCV::sub_vrm2_0;
314 }
else if (RISCV::VRN2M4RegClass.
contains(DstReg, SrcReg)) {
315 Opc = RISCV::PseudoVMV4R_V;
316 SubRegIdx = RISCV::sub_vrm4_0;
319 }
else if (RISCV::VRN3M1RegClass.
contains(DstReg, SrcReg)) {
320 Opc = RISCV::PseudoVMV1R_V;
321 SubRegIdx = RISCV::sub_vrm1_0;
324 }
else if (RISCV::VRN3M2RegClass.
contains(DstReg, SrcReg)) {
325 Opc = RISCV::PseudoVMV2R_V;
326 SubRegIdx = RISCV::sub_vrm2_0;
329 }
else if (RISCV::VRN4M1RegClass.
contains(DstReg, SrcReg)) {
330 Opc = RISCV::PseudoVMV1R_V;
331 SubRegIdx = RISCV::sub_vrm1_0;
334 }
else if (RISCV::VRN4M2RegClass.
contains(DstReg, SrcReg)) {
335 Opc = RISCV::PseudoVMV2R_V;
336 SubRegIdx = RISCV::sub_vrm2_0;
339 }
else if (RISCV::VRN5M1RegClass.
contains(DstReg, SrcReg)) {
340 Opc = RISCV::PseudoVMV1R_V;
341 SubRegIdx = RISCV::sub_vrm1_0;
344 }
else if (RISCV::VRN6M1RegClass.
contains(DstReg, SrcReg)) {
345 Opc = RISCV::PseudoVMV1R_V;
346 SubRegIdx = RISCV::sub_vrm1_0;
349 }
else if (RISCV::VRN7M1RegClass.
contains(DstReg, SrcReg)) {
350 Opc = RISCV::PseudoVMV1R_V;
351 SubRegIdx = RISCV::sub_vrm1_0;
354 }
else if (RISCV::VRN8M1RegClass.
contains(DstReg, SrcReg)) {
355 Opc = RISCV::PseudoVMV1R_V;
356 SubRegIdx = RISCV::sub_vrm1_0;
363 if (IsScalableVector) {
364 bool UseVMV_V_V =
false;
366 unsigned DefExplicitOpNum;
370 DefExplicitOpNum = DefMBBI->getNumExplicitOperands();
377 Opc = RISCV::PseudoVMV_V_V_M1;
378 VIOpc = RISCV::PseudoVMV_V_I_M1;
381 Opc = RISCV::PseudoVMV_V_V_M2;
382 VIOpc = RISCV::PseudoVMV_V_I_M2;
385 Opc = RISCV::PseudoVMV_V_V_M4;
386 VIOpc = RISCV::PseudoVMV_V_I_M4;
389 Opc = RISCV::PseudoVMV_V_V_M8;
390 VIOpc = RISCV::PseudoVMV_V_I_M8;
395 bool UseVMV_V_I =
false;
396 if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
404 MIB = MIB.add(DefMBBI->getOperand(1));
410 MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2));
411 MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1));
418 int I = 0, End = NF, Incr = 1;
424 assert(!Fractional &&
"It is impossible be fractional lmul here.");
431 for (;
I != End;
I += Incr) {
435 MIB = MIB.add(DefMBBI->getOperand(1));
440 MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2));
441 MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1));
456 Register SrcReg,
bool IsKill,
int FI,
461 DL =
I->getDebugLoc();
467 bool IsScalableVector =
true;
468 bool IsZvlsseg =
true;
469 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
471 RISCV::SW : RISCV::SD;
472 IsScalableVector =
false;
473 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
475 IsScalableVector =
false;
476 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
478 IsScalableVector =
false;
479 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
481 IsScalableVector =
false;
482 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
483 Opcode = RISCV::PseudoVSPILL_M1;
485 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
486 Opcode = RISCV::PseudoVSPILL_M2;
488 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
489 Opcode = RISCV::PseudoVSPILL_M4;
491 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
492 Opcode = RISCV::PseudoVSPILL_M8;
494 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
495 Opcode = RISCV::PseudoVSPILL2_M1;
496 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
497 Opcode = RISCV::PseudoVSPILL2_M2;
498 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
499 Opcode = RISCV::PseudoVSPILL2_M4;
500 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
501 Opcode = RISCV::PseudoVSPILL3_M1;
502 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
503 Opcode = RISCV::PseudoVSPILL3_M2;
504 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
505 Opcode = RISCV::PseudoVSPILL4_M1;
506 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
507 Opcode = RISCV::PseudoVSPILL4_M2;
508 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
509 Opcode = RISCV::PseudoVSPILL5_M1;
510 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
511 Opcode = RISCV::PseudoVSPILL6_M1;
512 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
513 Opcode = RISCV::PseudoVSPILL7_M1;
514 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
515 Opcode = RISCV::PseudoVSPILL8_M1;
519 if (IsScalableVector) {
555 DL =
I->getDebugLoc();
561 bool IsScalableVector =
true;
562 bool IsZvlsseg =
true;
563 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
566 IsScalableVector =
false;
567 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
569 IsScalableVector =
false;
570 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
572 IsScalableVector =
false;
573 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
575 IsScalableVector =
false;
576 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
577 Opcode = RISCV::PseudoVRELOAD_M1;
579 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
580 Opcode = RISCV::PseudoVRELOAD_M2;
582 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
583 Opcode = RISCV::PseudoVRELOAD_M4;
585 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
586 Opcode = RISCV::PseudoVRELOAD_M8;
588 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
589 Opcode = RISCV::PseudoVRELOAD2_M1;
590 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
591 Opcode = RISCV::PseudoVRELOAD2_M2;
592 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
593 Opcode = RISCV::PseudoVRELOAD2_M4;
594 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
595 Opcode = RISCV::PseudoVRELOAD3_M1;
596 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
597 Opcode = RISCV::PseudoVRELOAD3_M2;
598 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
599 Opcode = RISCV::PseudoVRELOAD4_M1;
600 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
601 Opcode = RISCV::PseudoVRELOAD4_M2;
602 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
603 Opcode = RISCV::PseudoVRELOAD5_M1;
604 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
605 Opcode = RISCV::PseudoVRELOAD6_M1;
606 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
607 Opcode = RISCV::PseudoVRELOAD7_M1;
608 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
609 Opcode = RISCV::PseudoVRELOAD8_M1;
613 if (IsScalableVector) {
654 switch (Inst.getOpndKind()) {
711 "Unknown conditional branch");
724 return get(RISCV::BEQ);
726 return get(RISCV::BNE);
728 return get(RISCV::BLT);
730 return get(RISCV::BGE);
732 return get(RISCV::BLTU);
734 return get(RISCV::BGEU);
761 bool AllowModify)
const {
767 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
773 int NumTerminators = 0;
774 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
777 if (J->getDesc().isUnconditionalBranch() ||
778 J->getDesc().isIndirectBranch()) {
785 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
786 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
787 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
790 I = FirstUncondOrIndirectBr;
794 if (
I->getDesc().isIndirectBranch())
798 if (NumTerminators > 2)
802 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
808 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
814 if (NumTerminators == 2 && std::prev(
I)->
getDesc().isConditionalBranch() &&
815 I->getDesc().isUnconditionalBranch()) {
826 int *BytesRemoved)
const {
833 if (!
I->getDesc().isUnconditionalBranch() &&
834 !
I->getDesc().isConditionalBranch())
840 I->eraseFromParent();
847 if (!
I->getDesc().isConditionalBranch())
853 I->eraseFromParent();
866 assert(TBB &&
"insertBranch must not be told to insert a fallthrough");
868 "RISCV branch conditions have two components!");
901 assert(RS &&
"RegScavenger required for long branching");
903 "new block should be inserted for expanding unconditional branch");
911 "Branch offsets outside of the signed 32-bit range not supported");
925 MI.getIterator(),
false, 0);
927 assert(Scav != RISCV::NoRegister &&
"No register is scavenged!");
935 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
943 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
945 int NumOp =
MI.getNumExplicitOperands();
946 return MI.getOperand(NumOp - 1).getMBB();
950 int64_t BrOffset)
const {
964 return isIntN(13, BrOffset);
966 case RISCV::PseudoBR:
967 return isIntN(21, BrOffset);
968 case RISCV::PseudoJump:
974 if (
MI.isMetaInstruction())
977 unsigned Opcode =
MI.getOpcode();
983 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
987 if (
MI.getParent() &&
MI.getParent()->getParent()) {
988 const auto MF =
MI.getMF();
996 return get(Opcode).getSize();
1000 const unsigned Opcode =
MI.getOpcode();
1004 case RISCV::FSGNJ_D:
1005 case RISCV::FSGNJ_S:
1006 case RISCV::FSGNJ_H:
1008 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1009 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1013 return (
MI.getOperand(1).isReg() &&
1014 MI.getOperand(1).getReg() == RISCV::X0) ||
1015 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1017 return MI.isAsCheapAsAMove();
1024 switch (
MI.getOpcode()) {
1029 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1030 MI.getOperand(2).getImm() == 0)
1033 case RISCV::FSGNJ_D:
1034 case RISCV::FSGNJ_S:
1035 case RISCV::FSGNJ_H:
1037 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1038 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1051 unsigned OpType = OI.value().OperandType;
1063 #define CASE_OPERAND_UIMM(NUM) \
1064 case RISCVOp::OPERAND_UIMM##NUM: \
1065 Ok = isUInt<NUM>(Imm); \
1076 Ok = isInt<12>(
Imm);
1079 if (
STI.getTargetTriple().isArch64Bit())
1080 Ok = isUInt<6>(
Imm);
1082 Ok = isUInt<5>(
Imm);
1085 Ok =
Imm >= 0 &&
Imm <= 10;
1089 ErrInfo =
"Invalid immediate";
1140 int64_t OffsetA = 0, OffsetB = 0;
1141 unsigned int WidthA = 0, WidthB = 0;
1145 int LowOffset =
std::min(OffsetA, OffsetB);
1146 int HighOffset =
std::max(OffsetA, OffsetB);
1147 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1148 if (LowOffset + LowWidth <= HighOffset)
1155 std::pair<unsigned, unsigned>
1158 return std::make_pair(TF &
Mask, TF & ~
Mask);
1163 using namespace RISCVII;
1164 static const std::pair<unsigned, const char *> TargetFlags[] = {
1167 {
MO_LO,
"riscv-lo"},
1168 {
MO_HI,
"riscv-hi"},
1184 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
1197 unsigned &Flags)
const {
1213 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
1219 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
1225 if (RepeatedSequenceLocs.size() < 2)
1228 unsigned SequenceSize = 0;
1230 auto I = RepeatedSequenceLocs[0].front();
1231 auto E = std::next(RepeatedSequenceLocs[0].back());
1236 unsigned CallOverhead = 8;
1237 for (
auto &
C : RepeatedSequenceLocs)
1241 unsigned FrameOverhead = 4;
1242 if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1243 .getFeatureBits()[RISCV::FeatureStdExtC])
1252 unsigned Flags)
const {
1259 if (
MI.isPosition()) {
1261 if (
MI.isCFIInstruction())
1265 return MI.getMF()->getFunction().needsUnwindTableEntry()
1273 if (
MI.isInlineAsm())
1287 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
1288 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1292 for (
const auto &MO :
MI.operands())
1293 if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1298 if (
MI.isMetaInstruction())
1309 bool Changed =
true;
1314 for (;
I !=
E; ++
I) {
1315 if (
I->isCFIInstruction()) {
1316 I->removeFromParent();
1339 .addGlobalAddress(
M.getNamedValue(MF.
getName()), 0,
1349 std::string GenericComment =
1351 if (!GenericComment.empty())
1352 return GenericComment;
1356 return std::string();
1358 std::string Comment;
1365 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
1366 MI.getOpcode() == RISCV::PseudoVSETVLI ||
1367 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
1368 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
1370 unsigned Imm =
MI.getOperand(OpIdx).getImm();
1373 unsigned NumOperands =
MI.getNumExplicitOperands();
1377 if (OpIdx != NumOperands - HasPolicy - 1)
1378 return std::string();
1380 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
1381 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1392 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1393 RISCV::PseudoV##OP##_##TYPE##_##LMUL
1395 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
1396 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1397 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1398 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1399 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1401 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
1402 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1403 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
1405 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
1406 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1407 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
1409 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1410 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1411 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
1413 #define CASE_VFMA_SPLATS(OP) \
1414 CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
1415 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
1416 case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
1420 unsigned &SrcOpIdx1,
1421 unsigned &SrcOpIdx2)
const {
1426 switch (
MI.getOpcode()) {
1447 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1452 unsigned CommutableOpIdx1 = 1;
1453 unsigned CommutableOpIdx2 = 3;
1454 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1467 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1474 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1476 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1480 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1481 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1487 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1488 SrcOpIdx2 == CommuteAnyOperandIndex) {
1491 unsigned CommutableOpIdx1 = SrcOpIdx1;
1492 if (SrcOpIdx1 == SrcOpIdx2) {
1495 CommutableOpIdx1 = 1;
1496 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1498 CommutableOpIdx1 = SrcOpIdx2;
1503 unsigned CommutableOpIdx2;
1504 if (CommutableOpIdx1 != 1) {
1506 CommutableOpIdx2 = 1;
1508 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
1513 if (Op1Reg !=
MI.getOperand(2).getReg())
1514 CommutableOpIdx2 = 2;
1516 CommutableOpIdx2 = 3;
1521 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1533 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1534 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1535 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1538 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
1539 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1540 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1541 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1542 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1544 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
1545 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1546 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
1548 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
1549 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1550 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
1552 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1553 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1554 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
1556 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1557 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
1558 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
1559 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
1564 unsigned OpIdx2)
const {
1567 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
1571 switch (
MI.getOpcode()) {
1592 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
1593 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
1595 switch (
MI.getOpcode()) {
1618 auto &WorkingMI = cloneIfNew(
MI);
1619 WorkingMI.setDesc(
get(Opc));
1629 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
1632 if (OpIdx1 == 3 || OpIdx2 == 3) {
1634 switch (
MI.getOpcode()) {
1645 auto &WorkingMI = cloneIfNew(
MI);
1646 WorkingMI.setDesc(
get(Opc));
1658 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1659 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1660 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1661 #undef CASE_VFMA_SPLATS
1662 #undef CASE_VFMA_OPCODE_LMULS
1663 #undef CASE_VFMA_OPCODE_COMMON
1666 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1667 RISCV::PseudoV##OP##_##LMUL##_TIED
1669 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
1670 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1671 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1672 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1673 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1674 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1676 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1677 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1678 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
1681 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1682 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1683 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1686 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
1687 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1688 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1689 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1690 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1691 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1693 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1694 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1695 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
1700 switch (
MI.getOpcode()) {
1711 MI.getNumExplicitOperands() == 6);
1712 if ((
MI.getOperand(5).getImm() & 1) == 0)
1717 switch (
MI.getOpcode()) {
1731 .
add(
MI.getOperand(0))
1732 .
add(
MI.getOperand(1))
1733 .
add(
MI.getOperand(2))
1734 .
add(
MI.getOperand(3))
1735 .
add(
MI.getOperand(4));
1739 unsigned NumOps =
MI.getNumOperands();
1740 for (
unsigned I = 1;
I < NumOps; ++
I) {
1742 if (
Op.isReg() &&
Op.isKill())
1750 if (
MI.getOperand(0).isEarlyClobber()) {
1768 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1769 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1770 #undef CASE_WIDEOP_OPCODE_LMULS
1771 #undef CASE_WIDEOP_OPCODE_COMMON
1779 assert(Amount > 0 &&
"There is no need to get VLEN scaled value.");
1780 assert(Amount % 8 == 0 &&
1781 "Reserve the stack by the multiple of one vector size.");
1784 int64_t NumOfVReg = Amount / 8;
1790 "Expect the number of vector registers within 32-bits.");
1793 if (ShiftAmount == 0)
1799 }
else if ((NumOfVReg == 3 || NumOfVReg == 5 || NumOfVReg == 9) &&
1804 switch (NumOfVReg) {
1806 case 3: Opc = RISCV::SH1ADD;
break;
1807 case 5: Opc = RISCV::SH2ADD;
break;
1808 case 9: Opc = RISCV::SH3ADD;
break;
1842 "M-extension must be enabled to calculate the vscaled size/offset."});
1860 case RISCV::VL1RE8_V:
1861 case RISCV::VL2RE8_V:
1862 case RISCV::VL4RE8_V:
1863 case RISCV::VL8RE8_V:
1864 case RISCV::VL1RE16_V:
1865 case RISCV::VL2RE16_V:
1866 case RISCV::VL4RE16_V:
1867 case RISCV::VL8RE16_V:
1868 case RISCV::VL1RE32_V:
1869 case RISCV::VL2RE32_V:
1870 case RISCV::VL4RE32_V:
1871 case RISCV::VL8RE32_V:
1872 case RISCV::VL1RE64_V:
1873 case RISCV::VL2RE64_V:
1874 case RISCV::VL4RE64_V:
1875 case RISCV::VL8RE64_V:
1883 unsigned Opcode =
MI.getOpcode();
1884 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1897 case RISCV::PseudoVSPILL2_M1:
1898 case RISCV::PseudoVRELOAD2_M1:
1899 return std::make_pair(2u, 1u);
1900 case RISCV::PseudoVSPILL2_M2:
1901 case RISCV::PseudoVRELOAD2_M2:
1902 return std::make_pair(2u, 2u);
1903 case RISCV::PseudoVSPILL2_M4:
1904 case RISCV::PseudoVRELOAD2_M4:
1905 return std::make_pair(2u, 4u);
1906 case RISCV::PseudoVSPILL3_M1:
1907 case RISCV::PseudoVRELOAD3_M1:
1908 return std::make_pair(3u, 1u);
1909 case RISCV::PseudoVSPILL3_M2:
1910 case RISCV::PseudoVRELOAD3_M2:
1911 return std::make_pair(3u, 2u);
1912 case RISCV::PseudoVSPILL4_M1:
1913 case RISCV::PseudoVRELOAD4_M1:
1914 return std::make_pair(4u, 1u);
1915 case RISCV::PseudoVSPILL4_M2:
1916 case RISCV::PseudoVRELOAD4_M2:
1917 return std::make_pair(4u, 2u);
1918 case RISCV::PseudoVSPILL5_M1:
1919 case RISCV::PseudoVRELOAD5_M1:
1920 return std::make_pair(5u, 1u);
1921 case RISCV::PseudoVSPILL6_M1:
1922 case RISCV::PseudoVRELOAD6_M1:
1923 return std::make_pair(6u, 1u);
1924 case RISCV::PseudoVSPILL7_M1:
1925 case RISCV::PseudoVRELOAD7_M1:
1926 return std::make_pair(7u, 1u);
1927 case RISCV::PseudoVSPILL8_M1:
1928 case RISCV::PseudoVRELOAD8_M1:
1929 return std::make_pair(8u, 1u);
1934 return MI.getNumExplicitDefs() == 2 &&
MI.modifiesRegister(RISCV::VL) &&
static bool isRVVWideningReduction(uint64_t TSFlags)
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
This is an optimization pass for GlobalISel generic memory operations.
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MCInstrDesc & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
RISCVInstrInfo(RISCVSubtarget &STI)
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
CondCode getOppositeBranchCondition(CondCode)
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Diagnostic information for unsupported feature in backend.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
const MachineInstrBuilder & add(const MachineOperand &MO) const
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
A raw_ostream that writes to an std::string.
const SysReg * lookupSysRegByName(StringRef)
return AArch64::GPR64RegClass contains(Reg)
Target - Wrapper for Target specific information.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
This represents a simple continuous liveness interval for a value.
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
static bool isValidSEW(unsigned SEW)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
static bool hasSEWOp(uint64_t TSFlags)
@ MO_TPREL_HI
MO_TPREL_HI/LO - Represents the hi and low part of the offset from.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
#define CASE_OPERAND_UIMM(NUM)
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
InstrType
Represents how an instruction should be mapped by the outliner.
static bool isTailAgnostic(unsigned VType)
A description of a memory reference used in the backend.
@ MO_PLT
On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol name from ...
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
const RISCVSubtarget & STI
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Instances of this class represent a single low-level machine instruction.
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
The information necessary to create an outlined function for some class of candidate.
static bool hasVecPolicyOp(uint64_t TSFlags)
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static unsigned getSEW(unsigned VType)
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
unsigned const TargetRegisterInfo * TRI
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
bool isFaultFirstLoad(const MachineInstr &MI)
@ INLINEASM
INLINEASM - Represents an inline asm block.
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned pred_size() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
static MachineOperand CreateImm(int64_t Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
(vector float) vec_cmpeq(*A, *B) C
const MachineOperand & getOperand(unsigned i) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
@ Kill
The last use of a register.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
Describe properties that are true of each instruction in the target description file.
MachineOperand class - Representation of each machine instruction operand.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const RISCVInstrInfo * getInstrInfo() const override
LiveInterval - This class represents the liveness of a register, or stack slot.
MCInst getNop() const override
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
SlotIndex - An opaque wrapper around machine indexes.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
MachineOutlinerConstructionID
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg, unsigned NumRegs)
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
constexpr bool isInt< 32 >(int64_t x)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Representation of each machine instruction.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
An individual sequence of instructions to be replaced with a call to an outlined function.
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
initializer< Ty > init(const Ty &Val)
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
A Module instance is used to store all the information related to an LLVM module.
#define CASE_VFMA_SPLATS(OP)
bool hasStdExtZba() const
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
LiveInterval & getInterval(Register Reg)
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
void setStackID(int ObjectIdx, uint8_t ID)
MachineBasicBlock * getMBB() const
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
#define CASE_WIDEOP_OPCODE_LMULS(OP)
SmallVector< MachineOperand, 4 > Cond
StringRef - Represent a constant reference to a string, i.e.
MachineBasicBlock MachineBasicBlock::iterator MBBI
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const RISCVRegisterInfo * getRegisterInfo() const override
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ FLD
This instruction implements an extending load to FP stack slots.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
@ MOLoad
The memory access reads data.
unsigned const MachineRegisterInfo * MRI
Wrapper class representing virtual and physical registers.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
void printVType(unsigned VType, raw_ostream &OS)
static RISCVII::VLMUL getVLMUL(unsigned VType)
Interface to description of machine instruction set.
Function & getFunction()
Return the LLVM function that this machine code represents.
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const
Return the size in bits of a register from class RC.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Should compile to something r4 addze r3 instead we get
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
@ Define
Register definition.
bool isRVVSpill(const MachineInstr &MI, bool CheckFIs)
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
@ MOStore
The memory access writes data.
@ ADD
Simple integer binary arithmetic operators.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
bool isAsCheapAsAMove(const MachineInstr &MI) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
unsigned getKillRegState(bool B)
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
static bool isRVVWholeLoadStore(unsigned Opcode)
const char LLVMTargetMachineRef TM
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
Generic base class for all target subtargets.
@ MO_LO
On a symbol operand, this represents the lo part.
@ OPERAND_FIRST_RISCV_IMM
iterator_range< const_opInfo_iterator > operands() const
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_HI
On a symbol operand, this represents the hi part.
Wrapper class representing physical registers. Should be passed by value.