Go to the documentation of this file.
46 #define DEBUG_TYPE "riscv-asm-parser"
49 #define GEN_COMPRESS_INSTR
50 #include "RISCVGenCompressInstEmitter.inc"
53 "Number of RISC-V Compressed instructions emitted");
62 struct ParserOptionsSet {
70 ParserOptionsSet ParserOptions;
72 SMLoc getLoc()
const {
return getParser().getTok().getLoc(); }
73 bool isRV64()
const {
return getSTI().hasFeature(RISCV::Feature64Bit); }
74 bool isRV32E()
const {
return getSTI().hasFeature(RISCV::FeatureRV32E); }
82 unsigned Kind)
override;
85 int64_t Lower, int64_t Upper,
Twine Msg);
87 bool MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
90 bool MatchingInlineAsm)
override;
92 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc)
override;
94 SMLoc &EndLoc)
override;
99 bool ParseDirective(
AsmToken DirectiveID)
override;
130 void emitLoadStoreSymbol(
MCInst &Inst,
unsigned Opcode,
SMLoc IDLoc,
134 void emitPseudoExtend(
MCInst &Inst,
bool SignExtend, int64_t
Width,
157 #define GET_ASSEMBLER_HEADER
158 #include "RISCVGenAsmMatcher.inc"
163 bool AllowParens =
false);
178 bool parseDirectiveOption();
179 bool parseDirectiveAttribute();
180 bool parseDirectiveInsn(
SMLoc L);
183 if (!(getSTI().getFeatureBits()[Feature])) {
185 setAvailableFeatures(
190 bool getFeatureBits(
uint64_t Feature) {
191 return getSTI().getFeatureBits()[Feature];
195 if (getSTI().getFeatureBits()[Feature]) {
197 setAvailableFeatures(
202 void pushFeatureBits() {
203 assert(FeatureBitStack.size() == ParserOptionsStack.size() &&
204 "These two stacks must be kept synchronized");
205 FeatureBitStack.push_back(getSTI().getFeatureBits());
206 ParserOptionsStack.push_back(ParserOptions);
209 bool popFeatureBits() {
210 assert(FeatureBitStack.size() == ParserOptionsStack.size() &&
211 "These two stacks must be kept synchronized");
212 if (FeatureBitStack.empty())
216 copySTI().setFeatureBits(FeatureBits);
217 setAvailableFeatures(ComputeAvailableFeatures(FeatureBits));
224 std::unique_ptr<RISCVOperand> defaultMaskRegOp()
const;
227 enum RISCVMatchResultTy {
228 Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY,
229 #define GET_OPERAND_DIAGNOSTIC_TYPES
230 #include "RISCVGenAsmMatcher.inc"
231 #undef GET_OPERAND_DIAGNOSTIC_TYPES
234 static bool classifySymbolRef(
const MCExpr *Expr,
244 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
247 if (ABIName.endswith(
"f") &&
248 !getSTI().getFeatureBits()[RISCV::FeatureStdExtF]) {
249 errs() <<
"Hard-float 'f' ABI can't be used for a target that "
250 "doesn't support the F instruction set extension (ignoring "
252 }
else if (ABIName.endswith(
"d") &&
253 !getSTI().getFeatureBits()[RISCV::FeatureStdExtD]) {
254 errs() <<
"Hard-float 'd' ABI can't be used for a target that "
255 "doesn't support the D instruction set extension (ignoring "
305 SMLoc StartLoc, EndLoc;
310 struct SysRegOp SysReg;
311 struct VTypeOp VType;
314 RISCVOperand(KindTy K) :
Kind(K) {}
320 StartLoc = o.StartLoc;
326 case KindTy::Immediate:
332 case KindTy::SystemRegister:
341 bool isToken()
const override {
return Kind == KindTy::Token; }
343 bool isV0Reg()
const {
346 bool isImm()
const override {
return Kind == KindTy::Immediate; }
347 bool isMem()
const override {
return false; }
348 bool isSystemRegister()
const {
return Kind == KindTy::SystemRegister; }
352 RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(
Reg.RegNum);
355 bool isGPRAsFPR()
const {
return isGPR() && IsGPRAsFPR; }
357 bool isGPRF64AsFPR()
const {
return isGPR() && IsGPRAsFPR && IsRV64; }
359 bool isGPRPF64AsFPR()
const {
360 return isGPR() && IsGPRAsFPR && !IsRV64 && !((
Reg.RegNum - RISCV::X0) & 1);
363 static bool evaluateConstantImm(
const MCExpr *Expr, int64_t &
Imm,
365 if (
auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
367 return RE->evaluateAsConstant(
Imm);
370 if (
auto CE = dyn_cast<MCConstantExpr>(Expr)) {
372 Imm =
CE->getValue();
381 template <
int N>
bool isBareSimmNLsb0()
const {
386 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
389 IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
397 bool isBareSymbol()
const {
401 if (!
isImm() || evaluateConstantImm(getImm(),
Imm, VK))
403 return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
407 bool isCallSymbol()
const {
411 if (!
isImm() || evaluateConstantImm(getImm(),
Imm, VK))
413 return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
418 bool isPseudoJumpSymbol()
const {
422 if (!
isImm() || evaluateConstantImm(getImm(),
Imm, VK))
424 return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
428 bool isTPRelAddSymbol()
const {
432 if (!
isImm() || evaluateConstantImm(getImm(),
Imm, VK))
434 return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
438 bool isCSRSystemRegister()
const {
return isSystemRegister(); }
440 bool isVTypeImm(
unsigned N)
const {
445 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
451 bool isVTypeI10()
const {
452 if (
Kind == KindTy::Immediate)
453 return isVTypeImm(10);
454 return Kind == KindTy::VType;
456 bool isVTypeI11()
const {
457 if (
Kind == KindTy::Immediate)
458 return isVTypeImm(11);
459 return Kind == KindTy::VType;
464 bool isFenceArg()
const {
470 if (evaluateConstantImm(getImm(),
Imm, VK)) {
475 auto *SVal = dyn_cast<MCSymbolRefExpr>(getImm());
480 StringRef Str = SVal->getSymbol().getName();
486 if (
c !=
'i' &&
c !=
'o' &&
c !=
'r' &&
c !=
'w')
496 bool isFRMArg()
const {
499 const MCExpr *Val = getImm();
500 auto *SVal = dyn_cast<MCSymbolRefExpr>(Val);
504 StringRef Str = SVal->getSymbol().getName();
509 bool isImmXLenLI()
const {
514 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
523 bool isUImmLog2XLen()
const {
528 if (!evaluateConstantImm(getImm(),
Imm, VK) ||
531 return (isRV64() && isUInt<6>(
Imm)) || isUInt<5>(
Imm);
534 bool isUImmLog2XLenNonZero()
const {
539 if (!evaluateConstantImm(getImm(),
Imm, VK) ||
544 return (isRV64() && isUInt<6>(
Imm)) || isUInt<5>(
Imm);
547 bool isUImmLog2XLenHalf()
const {
552 if (!evaluateConstantImm(getImm(),
Imm, VK) ||
555 return (isRV64() && isUInt<5>(
Imm)) || isUInt<4>(
Imm);
558 template <
unsigned N>
bool IsUImm()
const {
563 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
567 bool isUImm2() {
return IsUImm<2>(); }
568 bool isUImm3() {
return IsUImm<3>(); }
569 bool isUImm5() {
return IsUImm<5>(); }
570 bool isUImm7() {
return IsUImm<7>(); }
572 bool isRnumArg()
const {
577 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
578 return IsConstantImm &&
Imm >= INT64_C(0) &&
Imm <= INT64_C(10) &&
582 bool isSImm5()
const {
587 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
591 bool isSImm6()
const {
596 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
600 bool isSImm6NonZero()
const {
605 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
606 return IsConstantImm && isInt<6>(
Imm) && (
Imm != 0) &&
610 bool isCLUIImm()
const {
615 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
616 return IsConstantImm && (
Imm != 0) &&
617 (isUInt<5>(
Imm) || (
Imm >= 0xfffe0 &&
Imm <= 0xfffff)) &&
621 bool isUImm7Lsb00()
const {
626 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
627 return IsConstantImm && isShiftedUInt<5, 2>(
Imm) &&
631 bool isUImm8Lsb00()
const {
636 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
637 return IsConstantImm && isShiftedUInt<6, 2>(
Imm) &&
641 bool isUImm8Lsb000()
const {
646 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
647 return IsConstantImm && isShiftedUInt<5, 3>(
Imm) &&
651 bool isSImm9Lsb0()
const {
return isBareSimmNLsb0<9>(); }
653 bool isUImm9Lsb000()
const {
658 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
659 return IsConstantImm && isShiftedUInt<6, 3>(
Imm) &&
663 bool isUImm10Lsb00NonZero()
const {
668 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
669 return IsConstantImm && isShiftedUInt<8, 2>(
Imm) && (
Imm != 0) &&
673 bool isSImm12()
const {
679 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
681 IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
683 IsValid = isInt<12>(
Imm);
690 bool isSImm12Lsb0()
const {
return isBareSimmNLsb0<12>(); }
692 bool isSImm12Lsb00000()
const {
697 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
698 return IsConstantImm && isShiftedInt<7, 5>(
Imm) &&
702 bool isSImm13Lsb0()
const {
return isBareSimmNLsb0<13>(); }
704 bool isSImm10Lsb0000NonZero()
const {
709 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
710 return IsConstantImm && (
Imm != 0) && isShiftedInt<6, 4>(
Imm) &&
714 bool isUImm20LUI()
const {
720 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
721 if (!IsConstantImm) {
722 IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
732 bool isUImm20AUIPC()
const {
738 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
739 if (!IsConstantImm) {
740 IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
754 bool isSImm21Lsb0JAL()
const {
return isBareSimmNLsb0<21>(); }
756 bool isImmZero()
const {
761 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
765 bool isSImm5Plus1()
const {
770 bool IsConstantImm = evaluateConstantImm(getImm(),
Imm, VK);
771 return IsConstantImm && isInt<5>(
Imm - 1) &&
776 SMLoc getStartLoc()
const override {
return StartLoc; }
778 SMLoc getEndLoc()
const override {
return EndLoc; }
780 bool isRV64()
const {
return IsRV64; }
782 unsigned getReg()
const override {
784 return Reg.RegNum.id();
788 assert(
Kind == KindTy::SystemRegister &&
"Invalid type access!");
789 return StringRef(SysReg.Data, SysReg.Length);
792 const MCExpr *getImm()
const {
793 assert(
Kind == KindTy::Immediate &&
"Invalid type access!");
798 assert(
Kind == KindTy::Token &&
"Invalid type access!");
802 unsigned getVType()
const {
803 assert(
Kind == KindTy::VType &&
"Invalid type access!");
816 case KindTy::Immediate:
823 OS <<
"'" << getToken() <<
"'";
825 case KindTy::SystemRegister:
826 OS <<
"<sysreg: " << getSysReg() <<
'>';
836 static std::unique_ptr<RISCVOperand> createToken(StringRef Str, SMLoc S,
838 auto Op = std::make_unique<RISCVOperand>(KindTy::Token);
846 static std::unique_ptr<RISCVOperand> createReg(unsigned RegNo, SMLoc S,
847 SMLoc E, bool IsRV64,
848 bool IsGPRAsFPR = false) {
849 auto Op = std::make_unique<RISCVOperand>(KindTy::Register);
850 Op->Reg.RegNum = RegNo;
854 Op->IsGPRAsFPR = IsGPRAsFPR;
858 static std::unique_ptr<RISCVOperand> createImm(const MCExpr *Val, SMLoc S,
859 SMLoc E, bool IsRV64) {
860 auto Op = std::make_unique<RISCVOperand>(KindTy::Immediate);
868 static std::unique_ptr<RISCVOperand>
869 createSysReg(StringRef Str, SMLoc S, unsigned Encoding, bool IsRV64) {
870 auto Op = std::make_unique<RISCVOperand>(KindTy::SystemRegister);
871 Op->SysReg.Data = Str.data();
872 Op->SysReg.Length = Str.size();
873 Op->SysReg.Encoding = Encoding;
880 static std::unique_ptr<RISCVOperand> createVType(unsigned VTypeI, SMLoc S,
882 auto Op = std::make_unique<RISCVOperand>(KindTy::VType);
883 Op->VType.Val = VTypeI;
890 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
891 assert(Expr && "Expr shouldn't be null!
");
893 RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
894 bool IsConstant = evaluateConstantImm(Expr, Imm, VK);
897 Inst.addOperand(MCOperand::createImm(Imm));
899 Inst.addOperand(MCOperand::createExpr(Expr));
902 // Used by the TableGen Code
903 void addRegOperands(MCInst &Inst, unsigned N) const {
904 assert(N == 1 && "Invalid number
of operands!
");
905 Inst.addOperand(MCOperand::createReg(getReg()));
908 void addImmOperands(MCInst &Inst, unsigned N) const {
909 assert(N == 1 && "Invalid number
of operands!
");
910 addExpr(Inst, getImm());
913 void addFenceArgOperands(MCInst &Inst, unsigned N) const {
914 assert(N == 1 && "Invalid number
of operands!
");
916 int64_t Constant = 0;
917 RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
918 if (evaluateConstantImm(getImm(), Constant, VK)) {
920 Inst.addOperand(MCOperand::createImm(Constant));
923 llvm_unreachable("FenceArg must contain
only [iorw]
or be 0
");
926 // isFenceArg has validated the operand, meaning this cast is safe
927 auto SE = cast<MCSymbolRefExpr>(getImm());
930 for (char c : SE->getSymbol().getName()) {
933 llvm_unreachable("FenceArg must contain
only [iorw]
or be 0
");
935 Imm |= RISCVFenceField::I;
938 Imm |= RISCVFenceField::O;
941 Imm |= RISCVFenceField::R;
944 Imm |= RISCVFenceField::W;
948 Inst.addOperand(MCOperand::createImm(Imm));
951 void addCSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
952 assert(N == 1 && "Invalid number
of operands!
");
953 Inst.addOperand(MCOperand::createImm(SysReg.Encoding));
956 // Support non-canonical syntax:
957 // "vsetivli rd, uimm, 0xabc
" or "vsetvli rd, rs1, 0xabc
"
958 // "vsetivli rd, uimm, (0xc <<
N)
" or "vsetvli rd, rs1, (0xc <<
N)
"
959 void addVTypeIOperands(MCInst &Inst, unsigned N) const {
960 assert(N == 1 && "Invalid number
of operands!
");
962 if (Kind == KindTy::Immediate) {
963 RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
964 bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
966 assert(IsConstantImm && "Invalid VTypeI Operand!
");
970 Inst.addOperand(MCOperand::createImm(Imm));
973 // Returns the rounding mode represented by this RISCVOperand. Should only
974 // be called after checking isFRMArg.
975 RISCVFPRndMode::RoundingMode getRoundingMode() const {
976 // isFRMArg has validated the operand, meaning this cast is safe.
977 auto SE = cast<MCSymbolRefExpr>(getImm());
978 RISCVFPRndMode::RoundingMode FRM =
979 RISCVFPRndMode::stringToRoundingMode(SE->getSymbol().getName());
980 assert(FRM != RISCVFPRndMode::Invalid && "Invalid rounding
mode");
984 void addFRMArgOperands(MCInst &Inst, unsigned N) const {
985 assert(N == 1 && "Invalid number
of operands!
");
986 Inst.addOperand(MCOperand::createImm(getRoundingMode()));
989 } // end anonymous namespace.
991 #define GET_REGISTER_MATCHER
992 #define GET_SUBTARGET_FEATURE_NAME
993 #define GET_MATCHER_IMPLEMENTATION
994 #define GET_MNEMONIC_SPELL_CHECKER
995 #include "RISCVGenAsmMatcher.inc
"
997 static MCRegister convertFPR64ToFPR16(MCRegister Reg) {
998 assert(Reg >= RISCV::F0_D && Reg <= RISCV::F31_D && "Invalid register");
999 return Reg - RISCV::F0_D + RISCV::F0_H;
1002 static MCRegister convertFPR64ToFPR32(MCRegister Reg) {
1003 assert(Reg >= RISCV::F0_D && Reg <= RISCV::F31_D && "Invalid register");
1004 return Reg - RISCV::F0_D + RISCV::F0_F;
1007 static MCRegister convertVRToVRMx(const MCRegisterInfo &RI, MCRegister Reg,
1009 unsigned RegClassID;
1010 if (Kind == MCK_VRM2)
1011 RegClassID = RISCV::VRM2RegClassID;
1012 else if (Kind == MCK_VRM4)
1013 RegClassID = RISCV::VRM4RegClassID;
1014 else if (Kind == MCK_VRM8)
1015 RegClassID = RISCV::VRM8RegClassID;
1018 return RI.getMatchingSuperReg(Reg, RISCV::sub_vrm1_0,
1019 &RISCVMCRegisterClasses[RegClassID]);
1022 unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
1024 RISCVOperand &Op = static_cast<RISCVOperand &>(AsmOp);
1026 return Match_InvalidOperand;
1028 MCRegister Reg = Op.getReg();
1030 RISCVMCRegisterClasses[RISCV::FPR64RegClassID].contains(Reg);
1032 RISCVMCRegisterClasses[RISCV::FPR64CRegClassID].contains(Reg);
1033 bool IsRegVR = RISCVMCRegisterClasses[RISCV::VRRegClassID].contains(Reg);
1035 // As the parser couldn't differentiate an FPR32 from an FPR64, coerce the
1036 // register from FPR64 to FPR32 or FPR64C to FPR32C if necessary.
1037 if ((IsRegFPR64 && Kind == MCK_FPR32) ||
1038 (IsRegFPR64C && Kind == MCK_FPR32C)) {
1039 Op.Reg.RegNum = convertFPR64ToFPR32(Reg);
1040 return Match_Success;
1042 // As the parser couldn't differentiate an FPR16 from an FPR64, coerce the
1043 // register from FPR64 to FPR16 if necessary.
1044 if (IsRegFPR64 && Kind == MCK_FPR16) {
1045 Op.Reg.RegNum = convertFPR64ToFPR16(Reg);
1046 return Match_Success;
1048 // As the parser couldn't differentiate an VRM2/VRM4/VRM8 from an VR, coerce
1049 // the register from VR to VRM2/VRM4/VRM8 if necessary.
1050 if (IsRegVR && (Kind == MCK_VRM2 || Kind == MCK_VRM4 || Kind == MCK_VRM8)) {
1051 Op.Reg.RegNum = convertVRToVRMx(*getContext().getRegisterInfo(), Reg, Kind);
1052 if (Op.Reg.RegNum == 0)
1053 return Match_InvalidOperand;
1054 return Match_Success;
1056 return Match_InvalidOperand;
1059 bool RISCVAsmParser::generateImmOutOfRangeError(
1060 OperandVector &Operands, uint64_t ErrorInfo, int64_t Lower, int64_t Upper,
1061 Twine Msg = "immediate must
be an integer
in the range
") {
1062 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1063 return Error(ErrorLoc, Msg + " [
" + Twine(Lower) + ",
" + Twine(Upper) + "]
");
1066 bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1067 OperandVector &Operands,
1069 uint64_t &ErrorInfo,
1070 bool MatchingInlineAsm) {
1072 FeatureBitset MissingFeatures;
1074 auto Result = MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
1080 if (validateInstruction(Inst, Operands))
1082 return processInstruction(Inst, IDLoc, Operands, Out);
1083 case Match_MissingFeature: {
1084 assert(MissingFeatures.any() && "Unknown missing features!
");
1085 bool FirstFeature = true;
1087 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
1088 if (MissingFeatures[i]) {
1089 Msg += FirstFeature ? " " : ",
";
1090 Msg += getSubtargetFeatureName(i);
1091 FirstFeature = false;
1094 return Error(IDLoc, Msg);
1096 case Match_MnemonicFail: {
1097 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
1098 std::string Suggestion = RISCVMnemonicSpellCheck(
1099 ((RISCVOperand &)*Operands[0]).getToken(), FBS, 0);
1100 return Error(IDLoc, "unrecognized
instruction mnemonic
" + Suggestion);
1102 case Match_InvalidOperand: {
1103 SMLoc ErrorLoc = IDLoc;
1104 if (ErrorInfo != ~0ULL) {
1105 if (ErrorInfo >= Operands.size())
1108 ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1109 if (ErrorLoc == SMLoc())
1112 return Error(ErrorLoc, "invalid operand
for instruction");
1116 // Handle the case when the error message is of specific type
1117 // other than the generic Match_InvalidOperand, and the
1118 // corresponding operand is missing.
1119 if (Result > FIRST_TARGET_MATCH_RESULT_TY) {
1120 SMLoc ErrorLoc = IDLoc;
1121 if (ErrorInfo != ~0ULL && ErrorInfo >= Operands.size())
1128 case Match_InvalidImmXLenLI:
1130 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1131 return Error(ErrorLoc, "operand must
be a constant 64-
bit integer
");
1133 return generateImmOutOfRangeError(Operands, ErrorInfo,
1134 std::numeric_limits<int32_t>::min(),
1135 std::numeric_limits<uint32_t>::max());
1136 case Match_InvalidImmZero: {
1137 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1138 return Error(ErrorLoc, "immediate must
be zero");
1140 case Match_InvalidUImmLog2XLen:
1142 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 6) - 1);
1143 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
1144 case Match_InvalidUImmLog2XLenNonZero:
1146 return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 6) - 1);
1147 return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 5) - 1);
1148 case Match_InvalidUImmLog2XLenHalf:
1150 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
1151 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 4) - 1);
1152 case Match_InvalidUImm2:
1153 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 2) - 1);
1154 case Match_InvalidUImm3:
1155 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 3) - 1);
1156 case Match_InvalidUImm5:
1157 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
1158 case Match_InvalidUImm7:
1159 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 7) - 1);
1160 case Match_InvalidSImm5:
1161 return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 4),
1163 case Match_InvalidSImm6:
1164 return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 5),
1166 case Match_InvalidSImm6NonZero:
1167 return generateImmOutOfRangeError(
1168 Operands, ErrorInfo, -(1 << 5), (1 << 5) - 1,
1169 "immediate must
be non-
zero in the range
");
1170 case Match_InvalidCLUIImm:
1171 return generateImmOutOfRangeError(
1172 Operands, ErrorInfo, 1, (1 << 5) - 1,
1173 "immediate must
be in [0xfffe0, 0xfffff]
or");
1174 case Match_InvalidUImm7Lsb00:
1175 return generateImmOutOfRangeError(
1176 Operands, ErrorInfo, 0, (1 << 7) - 4,
1177 "immediate must
be a multiple
of 4 bytes
in the range
");
1178 case Match_InvalidUImm8Lsb00:
1179 return generateImmOutOfRangeError(
1180 Operands, ErrorInfo, 0, (1 << 8) - 4,
1181 "immediate must
be a multiple
of 4 bytes
in the range
");
1182 case Match_InvalidUImm8Lsb000:
1183 return generateImmOutOfRangeError(
1184 Operands, ErrorInfo, 0, (1 << 8) - 8,
1185 "immediate must
be a multiple
of 8 bytes
in the range
");
1186 case Match_InvalidSImm9Lsb0:
1187 return generateImmOutOfRangeError(
1188 Operands, ErrorInfo, -(1 << 8), (1 << 8) - 2,
1189 "immediate must
be a multiple
of 2 bytes
in the range
");
1190 case Match_InvalidUImm9Lsb000:
1191 return generateImmOutOfRangeError(
1192 Operands, ErrorInfo, 0, (1 << 9) - 8,
1193 "immediate must
be a multiple
of 8 bytes
in the range
");
1194 case Match_InvalidUImm10Lsb00NonZero:
1195 return generateImmOutOfRangeError(
1196 Operands, ErrorInfo, 4, (1 << 10) - 4,
1197 "immediate must
be a multiple
of 4 bytes
in the range
");
1198 case Match_InvalidSImm10Lsb0000NonZero:
1199 return generateImmOutOfRangeError(
1200 Operands, ErrorInfo, -(1 << 9), (1 << 9) - 16,
1201 "immediate must
be a multiple
of 16 bytes
and non-
zero in the range
");
1202 case Match_InvalidSImm12:
1203 return generateImmOutOfRangeError(
1204 Operands, ErrorInfo, -(1 << 11), (1 << 11) - 1,
1205 "operand must
be a symbol with %lo/%pcrel_lo/%tprel_lo modifier
or an
"
1206 "integer
in the range
");
1207 case Match_InvalidSImm12Lsb0:
1208 return generateImmOutOfRangeError(
1209 Operands, ErrorInfo, -(1 << 11), (1 << 11) - 2,
1210 "immediate must
be a multiple
of 2 bytes
in the range
");
1211 case Match_InvalidSImm12Lsb00000:
1212 return generateImmOutOfRangeError(
1213 Operands, ErrorInfo, -(1 << 11), (1 << 11) - 32,
1214 "immediate must
be a multiple
of 32 bytes
in the range
");
1215 case Match_InvalidSImm13Lsb0:
1216 return generateImmOutOfRangeError(
1217 Operands, ErrorInfo, -(1 << 12), (1 << 12) - 2,
1218 "immediate must
be a multiple
of 2 bytes
in the range
");
1219 case Match_InvalidUImm20LUI:
1220 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 20) - 1,
1221 "operand must
be a symbol with
"
1222 "%hi/%tprel_hi modifier
or an integer
in "
1224 case Match_InvalidUImm20AUIPC:
1225 return generateImmOutOfRangeError(
1226 Operands, ErrorInfo, 0, (1 << 20) - 1,
1227 "operand must
be a symbol with
a "
1228 "%pcrel_hi/%got_pcrel_hi/%tls_ie_pcrel_hi/%tls_gd_pcrel_hi modifier
or "
1229 "an integer
in the range
");
1230 case Match_InvalidSImm21Lsb0JAL:
1231 return generateImmOutOfRangeError(
1232 Operands, ErrorInfo, -(1 << 20), (1 << 20) - 2,
1233 "immediate must
be a multiple
of 2 bytes
in the range
");
1234 case Match_InvalidCSRSystemRegister: {
1235 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 12) - 1,
1236 "operand must
be a valid system
register "
1237 "name or an integer
in the range
");
1239 case Match_InvalidFenceArg: {
1240 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1241 return Error(ErrorLoc, "operand must
be formed
of letters selected
"
1242 "in-order from
'iorw' or be 0
");
1244 case Match_InvalidFRMArg: {
1245 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1248 "operand must
be a valid floating point rounding
mode mnemonic
");
1250 case Match_InvalidBareSymbol: {
1251 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1252 return Error(ErrorLoc, "operand must
be a bare symbol
name");
1254 case Match_InvalidPseudoJumpSymbol: {
1255 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1256 return Error(ErrorLoc, "operand must
be a valid
jump target
");
1258 case Match_InvalidCallSymbol: {
1259 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1260 return Error(ErrorLoc, "operand must
be a bare symbol
name");
1262 case Match_InvalidTPRelAddSymbol: {
1263 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1264 return Error(ErrorLoc, "operand must
be a symbol with %tprel_add modifier
");
1266 case Match_InvalidVTypeI: {
1267 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1271 "e[8|16|32|64|128|256|512|1024],m[1|2|4|8|
f2|f4|f8],[ta|tu],[ma|mu]
");
1273 case Match_InvalidVMaskRegister: {
1274 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
1275 return Error(ErrorLoc, "operand must
be v0.t
");
1277 case Match_InvalidSImm5Plus1: {
1278 return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 4) + 1,
1280 "immediate must
be in the range
");
1282 case Match_InvalidRnumArg: {
1283 return generateImmOutOfRangeError(Operands, ErrorInfo, 0, 10);
1290 // Attempts to match Name as a register (either using the default name or
1291 // alternative ABI names), setting RegNo to the matching register. Upon
1292 // failure, returns true and sets RegNo to 0. If IsRV32E then registers
1293 // x16-x31 will be rejected.
1294 static bool matchRegisterNameHelper(bool IsRV32E, MCRegister &RegNo,
1296 RegNo = MatchRegisterName(Name);
1297 // The 16-/32- and 64-bit FPRs have the same asm name. Check that the initial
1298 // match always matches the 64-bit variant, and not the 16/32-bit one.
1299 assert(!(RegNo >= RISCV::F0_H && RegNo <= RISCV::F31_H));
1300 assert(!(RegNo >= RISCV::F0_F && RegNo <= RISCV::F31_F));
1301 // The default FPR register class is based on the tablegen enum ordering.
1302 static_assert(RISCV::F0_D < RISCV::F0_H, "FPR matching must
be updated
");
1303 static_assert(RISCV::F0_D < RISCV::F0_F, "FPR matching must
be updated
");
1304 if (RegNo == RISCV::NoRegister)
1305 RegNo = MatchRegisterAltName(Name);
1306 if (IsRV32E && RegNo >= RISCV::X16 && RegNo <= RISCV::X31)
1307 RegNo = RISCV::NoRegister;
1308 return RegNo == RISCV::NoRegister;
1311 bool RISCVAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1313 if (tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success)
1314 return Error(StartLoc, "invalid
register name");
1318 OperandMatchResultTy RISCVAsmParser::tryParseRegister(unsigned &RegNo,
1321 const AsmToken &Tok = getParser().getTok();
1322 StartLoc = Tok.getLoc();
1323 EndLoc = Tok.getEndLoc();
1325 StringRef Name = getLexer().getTok().getIdentifier();
1327 if (matchRegisterNameHelper(isRV32E(), (MCRegister &)RegNo, Name))
1328 return MatchOperand_NoMatch;
1330 getParser().Lex(); // Eat identifier token.
1331 return MatchOperand_Success;
1334 OperandMatchResultTy RISCVAsmParser::parseRegister(OperandVector &Operands,
1336 SMLoc FirstS = getLoc();
1337 bool HadParens = false;
1340 // If this is an LParen and a parenthesised register name is allowed, parse it
1342 if (AllowParens && getLexer().is(AsmToken::LParen)) {
1344 size_t ReadCount = getLexer().peekTokens(Buf);
1345 if (ReadCount == 2 && Buf[1].getKind() == AsmToken::RParen) {
1347 LParen = getParser().getTok();
1348 getParser().Lex(); // Eat '('
1352 switch (getLexer().getKind()) {
1355 getLexer().UnLex(LParen);
1356 return MatchOperand_NoMatch;
1357 case AsmToken::Identifier:
1358 StringRef Name = getLexer().getTok().getIdentifier();
1360 matchRegisterNameHelper(isRV32E(), RegNo, Name);
1362 if (RegNo == RISCV::NoRegister) {
1364 getLexer().UnLex(LParen);
1365 return MatchOperand_NoMatch;
1368 Operands.push_back(RISCVOperand::createToken("(
", FirstS, isRV64()));
1370 SMLoc E = SMLoc::getFromPointer(S.getPointer() + Name.size());
1372 Operands.push_back(RISCVOperand::createReg(RegNo, S, E, isRV64()));
1376 getParser().Lex(); // Eat ')'
1377 Operands.push_back(RISCVOperand::createToken(")
", getLoc(), isRV64()));
1380 return MatchOperand_Success;
1383 OperandMatchResultTy
1384 RISCVAsmParser::parseInsnDirectiveOpcode(OperandVector &Operands) {
1389 switch (getLexer().getKind()) {
1391 return MatchOperand_NoMatch;
1392 case AsmToken::LParen:
1393 case AsmToken::Minus:
1394 case AsmToken::Plus:
1395 case AsmToken::Exclaim:
1396 case AsmToken::Tilde:
1397 case AsmToken::Integer:
1398 case AsmToken::String: {
1399 if (getParser().parseExpression(Res, E))
1400 return MatchOperand_ParseFail;
1402 auto *CE = dyn_cast<MCConstantExpr>(Res);
1404 int64_t Imm = CE->getValue();
1405 if (isUInt<7>(Imm)) {
1406 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1407 return MatchOperand_Success;
1411 Twine Msg = "immediate must
be an integer
in the range
";
1412 Error(S, Msg + " [
" + Twine(0) + ",
" + Twine((1 << 7) - 1) + "]
");
1413 return MatchOperand_ParseFail;
1415 case AsmToken::Identifier: {
1416 StringRef Identifier;
1417 if (getParser().parseIdentifier(Identifier))
1418 return MatchOperand_ParseFail;
1420 auto Opcode = RISCVInsnOpcode::lookupRISCVOpcodeByName(Identifier);
1422 Res = MCConstantExpr::create(Opcode->Value, getContext());
1423 E = SMLoc::getFromPointer(S.getPointer() + Identifier.size());
1424 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1425 return MatchOperand_Success;
1428 Twine Msg = "operand must
be a valid opcode
name or an
"
1429 "integer
in the range
";
1430 Error(S, Msg + " [
" + Twine(0) + ",
" + Twine((1 << 7) - 1) + "]
");
1431 return MatchOperand_ParseFail;
1433 case AsmToken::Percent: {
1434 // Discard operand with modifier.
1435 Twine Msg = "immediate must
be an integer
in the range
";
1436 Error(S, Msg + " [
" + Twine(0) + ",
" + Twine((1 << 7) - 1) + "]
");
1437 return MatchOperand_ParseFail;
1441 return MatchOperand_NoMatch;
1444 OperandMatchResultTy
1445 RISCVAsmParser::parseCSRSystemRegister(OperandVector &Operands) {
1449 switch (getLexer().getKind()) {
1451 return MatchOperand_NoMatch;
1452 case AsmToken::LParen:
1453 case AsmToken::Minus:
1454 case AsmToken::Plus:
1455 case AsmToken::Exclaim:
1456 case AsmToken::Tilde:
1457 case AsmToken::Integer:
1458 case AsmToken::String: {
1459 if (getParser().parseExpression(Res))
1460 return MatchOperand_ParseFail;
1462 auto *CE = dyn_cast<MCConstantExpr>(Res);
1464 int64_t Imm = CE->getValue();
1465 if (isUInt<12>(Imm)) {
1466 auto SysReg = RISCVSysReg::lookupSysRegByEncoding(Imm);
1467 // Accept an immediate representing a named or un-named Sys Reg
1468 // if the range is valid, regardless of the required features.
1469 Operands.push_back(RISCVOperand::createSysReg(
1470 SysReg ? SysReg->Name : "", S, Imm, isRV64()));
1471 return MatchOperand_Success;
1475 Twine Msg = "immediate must
be an integer
in the range
";
1476 Error(S, Msg + " [
" + Twine(0) + ",
" + Twine((1 << 12) - 1) + "]
");
1477 return MatchOperand_ParseFail;
1479 case AsmToken::Identifier: {
1480 StringRef Identifier;
1481 if (getParser().parseIdentifier(Identifier))
1482 return MatchOperand_ParseFail;
1484 auto SysReg = RISCVSysReg::lookupSysRegByName(Identifier);
1486 SysReg = RISCVSysReg::lookupSysRegByAltName(Identifier);
1488 if ((SysReg = RISCVSysReg::lookupSysRegByDeprecatedName(Identifier)))
1489 Warning(S, "'" + Identifier + "' is a deprecated alias
for '" +
1490 SysReg->Name + "'");
1492 // Accept a named Sys Reg if the required features are present.
1494 if (!SysReg->haveRequiredFeatures(getSTI().getFeatureBits())) {
1495 Error(S, "system
register use requires an option
to be enabled
");
1496 return MatchOperand_ParseFail;
1498 Operands.push_back(RISCVOperand::createSysReg(
1499 Identifier, S, SysReg->Encoding, isRV64()));
1500 return MatchOperand_Success;
1503 Twine Msg = "operand must
be a valid system
register name "
1504 "or an integer
in the range
";
1505 Error(S, Msg + " [
" + Twine(0) + ",
" + Twine((1 << 12) - 1) + "]
");
1506 return MatchOperand_ParseFail;
1508 case AsmToken::Percent: {
1509 // Discard operand with modifier.
1510 Twine Msg = "immediate must
be an integer
in the range
";
1511 Error(S, Msg + " [
" + Twine(0) + ",
" + Twine((1 << 12) - 1) + "]
");
1512 return MatchOperand_ParseFail;
1516 return MatchOperand_NoMatch;
1519 OperandMatchResultTy RISCVAsmParser::parseImmediate(OperandVector &Operands) {
1524 switch (getLexer().getKind()) {
1526 return MatchOperand_NoMatch;
1527 case AsmToken::LParen:
1529 case AsmToken::Minus:
1530 case AsmToken::Plus:
1531 case AsmToken::Exclaim:
1532 case AsmToken::Tilde:
1533 case AsmToken::Integer:
1534 case AsmToken::String:
1535 case AsmToken::Identifier:
1536 if (getParser().parseExpression(Res, E))
1537 return MatchOperand_ParseFail;
1539 case AsmToken::Percent:
1540 return parseOperandWithModifier(Operands);
1543 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1544 return MatchOperand_Success;
1547 OperandMatchResultTy
1548 RISCVAsmParser::parseOperandWithModifier(OperandVector &Operands) {
1552 if (getLexer().getKind() != AsmToken::Percent) {
1553 Error(getLoc(), "expected
'%' for operand modifier
");
1554 return MatchOperand_ParseFail;
1557 getParser().Lex(); // Eat '%'
1559 if (getLexer().getKind() != AsmToken::Identifier) {
1560 Error(getLoc(), "expected valid identifier
for operand modifier
");
1561 return MatchOperand_ParseFail;
1563 StringRef Identifier = getParser().getTok().getIdentifier();
1564 RISCVMCExpr::VariantKind VK = RISCVMCExpr::getVariantKindForName(Identifier);
1565 if (VK == RISCVMCExpr::VK_RISCV_Invalid) {
1566 Error(getLoc(), "unrecognized operand modifier
");
1567 return MatchOperand_ParseFail;
1570 getParser().Lex(); // Eat the identifier
1571 if (getLexer().getKind() != AsmToken::LParen) {
1572 Error(getLoc(), "expected
'('");
1573 return MatchOperand_ParseFail;
1575 getParser().Lex(); // Eat '('
1577 const MCExpr *SubExpr;
1578 if (getParser().parseParenExpression(SubExpr, E)) {
1579 return MatchOperand_ParseFail;
1582 const MCExpr *ModExpr = RISCVMCExpr::create(SubExpr, VK, getContext());
1583 Operands.push_back(RISCVOperand::createImm(ModExpr, S, E, isRV64()));
1584 return MatchOperand_Success;
1587 OperandMatchResultTy RISCVAsmParser::parseBareSymbol(OperandVector &Operands) {
1591 if (getLexer().getKind() != AsmToken::Identifier)
1592 return MatchOperand_NoMatch;
1594 StringRef Identifier;
1595 AsmToken Tok = getLexer().getTok();
1597 if (getParser().parseIdentifier(Identifier))
1598 return MatchOperand_ParseFail;
1600 SMLoc E = SMLoc::getFromPointer(S.getPointer() + Identifier.size());
1602 if (Identifier.consume_back("@plt
")) {
1604 return MatchOperand_ParseFail;
1607 MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
1609 if (Sym->isVariable()) {
1610 const MCExpr *V = Sym->getVariableValue(/*SetUsed=*/false);
1611 if (!isa<MCSymbolRefExpr>(V)) {
1612 getLexer().UnLex(Tok); // Put back if it's not a bare symbol.
1613 return MatchOperand_NoMatch;
1617 Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
1619 MCBinaryExpr::Opcode Opcode;
1620 switch (getLexer().getKind()) {
1622 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1623 return MatchOperand_Success;
1624 case AsmToken::Plus:
1625 Opcode = MCBinaryExpr::Add;
1628 case AsmToken::Minus:
1629 Opcode = MCBinaryExpr::Sub;
1635 if (getParser().parseExpression(Expr, E))
1636 return MatchOperand_ParseFail;
1637 Res = MCBinaryExpr::create(Opcode, Res, Expr, getContext());
1638 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1639 return MatchOperand_Success;
1642 OperandMatchResultTy RISCVAsmParser::parseCallSymbol(OperandVector &Operands) {
1646 if (getLexer().getKind() != AsmToken::Identifier)
1647 return MatchOperand_NoMatch;
1649 // Avoid parsing the register in `call rd, foo` as a call symbol.
1650 if (getLexer().peekTok().getKind() != AsmToken::EndOfStatement)
1651 return MatchOperand_NoMatch;
1653 StringRef Identifier;
1654 if (getParser().parseIdentifier(Identifier))
1655 return MatchOperand_ParseFail;
1657 SMLoc E = SMLoc::getFromPointer(S.getPointer() + Identifier.size());
1659 RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL;
1660 if (Identifier.consume_back("@plt
"))
1661 Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
1663 MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
1664 Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
1665 Res = RISCVMCExpr::create(Res, Kind, getContext());
1666 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1667 return MatchOperand_Success;
1670 OperandMatchResultTy
1671 RISCVAsmParser::parsePseudoJumpSymbol(OperandVector &Operands) {
1676 if (getParser().parseExpression(Res, E))
1677 return MatchOperand_ParseFail;
1679 if (Res->getKind() != MCExpr::ExprKind::SymbolRef ||
1680 cast<MCSymbolRefExpr>(Res)->getKind() ==
1681 MCSymbolRefExpr::VariantKind::VK_PLT) {
1682 Error(S, "operand must
be a valid
jump target
");
1683 return MatchOperand_ParseFail;
1686 Res = RISCVMCExpr::create(Res, RISCVMCExpr::VK_RISCV_CALL, getContext());
1687 Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
1688 return MatchOperand_Success;
1691 OperandMatchResultTy RISCVAsmParser::parseJALOffset(OperandVector &Operands) {
1692 // Parsing jal operands is fiddly due to the `jal foo` and `jal ra, foo`
1693 // both being acceptable forms. When parsing `jal ra, foo` this function
1694 // will be called for the `ra` register operand in an attempt to match the
1695 // single-operand alias. parseJALOffset must fail for this case. It would
1696 // seem logical to try parse the operand using parseImmediate and return
1697 // NoMatch if the next token is a comma (meaning we must be parsing a jal in
1698 // the second form rather than the first). We can't do this as there's no
1699 // way of rewinding the lexer state. Instead, return NoMatch if this operand
1700 // is an identifier and is followed by a comma.
1701 if (getLexer().is(AsmToken::Identifier) &&
1702 getLexer().peekTok().is(AsmToken::Comma))
1703 return MatchOperand_NoMatch;
1705 return parseImmediate(Operands);
1708 OperandMatchResultTy RISCVAsmParser::parseVTypeI(OperandVector &Operands) {
1710 if (getLexer().isNot(AsmToken::Identifier))
1711 return MatchOperand_NoMatch;
1713 SmallVector<AsmToken, 7> VTypeIElements;
1714 // Put all the tokens for vtypei operand into VTypeIElements vector.
1715 while (getLexer().isNot(AsmToken::EndOfStatement)) {
1716 VTypeIElements.push_back(getLexer().getTok());
1718 if (getLexer().is(AsmToken::EndOfStatement))
1720 if (getLexer().isNot(AsmToken::Comma))
1722 AsmToken Comma = getLexer().getTok();
1723 VTypeIElements.push_back(Comma);
1727 if (VTypeIElements.size() == 7) {
1728 // The VTypeIElements layout is:
1729 // SEW comma LMUL comma TA comma MA
1731 StringRef Name = VTypeIElements[0].getIdentifier();
1732 if (!Name.consume_front("e"))
1735 if (Name.getAsInteger(10, Sew))
1737 if (!RISCVVType::isValidSEW(Sew))
1740 Name = VTypeIElements[2].getIdentifier();
1741 if (!Name.consume_front("m
"))
1744 bool Fractional = Name.consume_front("f");
1746 if (Name.getAsInteger(10, Lmul))
1748 if (!RISCVVType::isValidLMUL(Lmul, Fractional))
1752 Name = VTypeIElements[4].getIdentifier();
1755 TailAgnostic = true;
1756 else if (Name == "tu
")
1757 TailAgnostic = false;
1762 Name = VTypeIElements[6].getIdentifier();
1765 MaskAgnostic = true;
1766 else if (Name == "mu
")
1767 MaskAgnostic = false;
1771 RISCVII::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional);
1774 RISCVVType::encodeVTYPE(VLMUL, Sew, TailAgnostic, MaskAgnostic);
1775 Operands.push_back(RISCVOperand::createVType(VTypeI, S, isRV64()));
1776 return MatchOperand_Success;
1779 // If NoMatch, unlex all the tokens that comprise a vtypei operand
1781 while (!VTypeIElements.empty())
1782 getLexer().UnLex(VTypeIElements.pop_back_val());
1783 return MatchOperand_NoMatch;
1786 OperandMatchResultTy RISCVAsmParser::parseMaskReg(OperandVector &Operands) {
1787 switch (getLexer().getKind()) {
1789 return MatchOperand_NoMatch;
1790 case AsmToken::Identifier:
1791 StringRef Name = getLexer().getTok().getIdentifier();
1792 if (!Name.consume_back(".
t")) {
1793 Error(getLoc(), "expected
'.t' suffix
");
1794 return MatchOperand_ParseFail;
1797 matchRegisterNameHelper(isRV32E(), RegNo, Name);
1799 if (RegNo == RISCV::NoRegister)
1800 return MatchOperand_NoMatch;
1801 if (RegNo != RISCV::V0)
1802 return MatchOperand_NoMatch;
1804 SMLoc E = SMLoc::getFromPointer(S.getPointer() + Name.size());
1806 Operands.push_back(RISCVOperand::createReg(RegNo, S, E, isRV64()));
1809 return MatchOperand_Success;
1812 OperandMatchResultTy RISCVAsmParser::parseGPRAsFPR(OperandVector &Operands) {
1813 switch (getLexer().getKind()) {
1815 return MatchOperand_NoMatch;
1816 case AsmToken::Identifier:
1817 StringRef Name = getLexer().getTok().getIdentifier();
1819 matchRegisterNameHelper(isRV32E(), RegNo, Name);
1821 if (RegNo == RISCV::NoRegister)
1822 return MatchOperand_NoMatch;
1824 SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
1826 Operands.push_back(RISCVOperand::createReg(
1827 RegNo, S, E, isRV64(), !getSTI().hasFeature(RISCV::FeatureStdExtF)));
1829 return MatchOperand_Success;
1832 OperandMatchResultTy
1833 RISCVAsmParser::parseMemOpBaseReg(OperandVector &Operands) {
1834 if (getLexer().isNot(AsmToken::LParen)) {
1835 Error(getLoc(), "expected
'('");
1836 return MatchOperand_ParseFail;
1839 getParser().Lex(); // Eat '('
1840 Operands.push_back(RISCVOperand::createToken("(
", getLoc(), isRV64()));
1842 if (parseRegister(Operands) != MatchOperand_Success) {
1843 Error(getLoc(), "expected
register");
1844 return MatchOperand_ParseFail;
1847 if (getLexer().isNot(AsmToken::RParen)) {
1848 Error(getLoc(), "expected
')'");
1849 return MatchOperand_ParseFail;
1852 getParser().Lex(); // Eat ')'
1853 Operands.push_back(RISCVOperand::createToken(")
", getLoc(), isRV64()));
1855 return MatchOperand_Success;
1858 OperandMatchResultTy
1859 RISCVAsmParser::parseZeroOffsetMemOp(OperandVector &Operands) {
1860 // Atomic operations such as lr.w, sc.w, and amo*.w accept a "memory operand
"
1861 // as one of their register operands, such as `(a0)`. This just denotes that
1862 // the register (in this case `a0`) contains a memory address.
1864 // Normally, we would be able to parse these by putting the parens into the
1865 // instruction string. However, GNU as also accepts a zero-offset memory
1866 // operand (such as `0(a0)`), and ignores the 0. Normally this would be parsed
1867 // with parseImmediate followed by parseMemOpBaseReg, but these instructions
1868 // do not accept an immediate operand, and we do not want to add a "dummy
"
1869 // operand that is silently dropped.
1871 // Instead, we use this custom parser. This will: allow (and discard) an
1872 // offset if it is zero; require (and discard) parentheses; and add only the
1873 // parsed register operand to `Operands`.
1875 // These operands are printed with RISCVInstPrinter::printZeroOffsetMemOp,
1876 // which will only print the register surrounded by parentheses (which GNU as
1877 // also uses as its canonical representation for these operands).
1878 std::unique_ptr<RISCVOperand> OptionalImmOp;
1880 if (getLexer().isNot(AsmToken::LParen)) {
1881 // Parse an Integer token. We do not accept arbritrary constant expressions
1882 // in the offset field (because they may include parens, which complicates
1885 SMLoc ImmStart = getLoc();
1886 if (getParser().parseIntToken(ImmVal,
1887 "expected
'(' or optional integer offset
"))
1888 return MatchOperand_ParseFail;
1890 // Create a RISCVOperand for checking later (so the error messages are
1891 // nicer), but we don't add it to Operands.
1892 SMLoc ImmEnd = getLoc();
1894 RISCVOperand::createImm(MCConstantExpr::create(ImmVal, getContext()),
1895 ImmStart, ImmEnd, isRV64());
1898 if (getLexer().isNot(AsmToken::LParen)) {
1899 Error(getLoc(), OptionalImmOp ? "expected
'(' after optional integer offset
"
1900 : "expected
'(' or optional integer offset
");
1901 return MatchOperand_ParseFail;
1903 getParser().Lex(); // Eat '('
1905 if (parseRegister(Operands) != MatchOperand_Success) {
1906 Error(getLoc(), "expected
register");
1907 return MatchOperand_ParseFail;
1910 if (getLexer().isNot(AsmToken::RParen)) {
1911 Error(getLoc(), "expected
')'");
1912 return MatchOperand_ParseFail;
1914 getParser().Lex(); // Eat ')'
1916 // Deferred Handling of non-zero offsets. This makes the error messages nicer.
1917 if (OptionalImmOp && !OptionalImmOp->isImmZero()) {
1918 Error(OptionalImmOp->getStartLoc(), "optional integer offset must
be 0
",
1919 SMRange(OptionalImmOp->getStartLoc(), OptionalImmOp->getEndLoc()));
1920 return MatchOperand_ParseFail;
1923 return MatchOperand_Success;
1929 bool RISCVAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1930 // Check if the current operand has a custom associated parser, if so, try to
1931 // custom parse the operand, or fallback to the general approach.
1932 OperandMatchResultTy Result =
1933 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
1934 if (Result == MatchOperand_Success)
1936 if (Result == MatchOperand_ParseFail)
1939 // Attempt to parse token as a register.
1940 if (parseRegister(Operands, true) == MatchOperand_Success)
1943 // Attempt to parse token as an immediate
1944 if (parseImmediate(Operands) == MatchOperand_Success) {
1945 // Parse memory base register if present
1946 if (getLexer().is(AsmToken::LParen))
1947 return parseMemOpBaseReg(Operands) != MatchOperand_Success;
1951 // Finally we have exhausted all options and must declare defeat.
1952 Error(getLoc(), "
unknown operand
");
1956 bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1957 StringRef Name, SMLoc NameLoc,
1958 OperandVector &Operands) {
1959 // Ensure that if the instruction occurs when relaxation is enabled,
1960 // relocations are forced for the file. Ideally this would be done when there
1961 // is enough information to reliably determine if the instruction itself may
1962 // cause relaxations. Unfortunately instruction processing stage occurs in the
1963 // same pass as relocation emission, so it's too late to set a 'sticky bit'
1964 // for the entire file.
1965 if (getSTI().getFeatureBits()[RISCV::FeatureRelax]) {
1966 auto *Assembler = getTargetStreamer().getStreamer().getAssemblerPtr();
1967 if (Assembler != nullptr) {
1968 RISCVAsmBackend &MAB =
1969 static_cast<RISCVAsmBackend &>(Assembler->getBackend());
1970 MAB.setForceRelocs();
1974 // First operand is token for instruction
1975 Operands.push_back(RISCVOperand::createToken(Name, NameLoc, isRV64()));
1977 // If there are no more operands, then finish
1978 if (getLexer().is(AsmToken::EndOfStatement)) {
1979 getParser().Lex(); // Consume the EndOfStatement.
1983 // Parse first operand
1984 if (parseOperand(Operands, Name))
1987 // Parse until end of statement, consuming commas between operands
1988 while (getLexer().is(AsmToken::Comma)) {
1989 // Consume comma token
1992 // Parse next operand
1993 if (parseOperand(Operands, Name))
1997 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1998 SMLoc Loc = getLexer().getLoc();
1999 getParser().eatToEndOfStatement();
2000 return Error(Loc, "unexpected token
");
2003 getParser().Lex(); // Consume the EndOfStatement.
2007 bool RISCVAsmParser::classifySymbolRef(const MCExpr *Expr,
2008 RISCVMCExpr::VariantKind &Kind) {
2009 Kind = RISCVMCExpr::VK_RISCV_None;
2011 if (const RISCVMCExpr *RE = dyn_cast<RISCVMCExpr>(Expr)) {
2012 Kind = RE->getKind();
2013 Expr = RE->getSubExpr();
2018 if (Expr->evaluateAsRelocatable(Res, nullptr, &Fixup))
2019 return Res.getRefKind() == RISCVMCExpr::VK_RISCV_None;
2023 bool RISCVAsmParser::ParseDirective(AsmToken DirectiveID) {
2024 // This returns false if this function recognizes the directive
2025 // regardless of whether it is successfully handles or reports an
2026 // error. Otherwise it returns true to give the generic parser a
2027 // chance at recognizing it.
2028 StringRef IDVal = DirectiveID.getString();
2030 if (IDVal == ".option
")
2031 return parseDirectiveOption();
2032 if (IDVal == ".attribute
")
2033 return parseDirectiveAttribute();
2034 if (IDVal == ".insn
")
2035 return parseDirectiveInsn(DirectiveID.getLoc());
2040 bool RISCVAsmParser::parseDirectiveOption() {
2041 MCAsmParser &Parser = getParser();
2042 // Get the option token.
2043 AsmToken Tok = Parser.getTok();
2044 // At the moment only identifiers are supported.
2045 if (Tok.isNot(AsmToken::Identifier))
2046 return Error(Parser.getTok().getLoc(),
2047 "unexpected token, expected identifier
");
2049 StringRef Option = Tok.getIdentifier();
2051 if (Option == "push") {
2052 getTargetStreamer().emitDirectiveOptionPush();
2055 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2056 return Error(Parser.getTok().getLoc(),
2057 "unexpected token, expected
end of statement
");
2063 if (Option == "pop
") {
2064 SMLoc StartLoc = Parser.getTok().getLoc();
2065 getTargetStreamer().emitDirectiveOptionPop();
2068 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2069 return Error(Parser.getTok().getLoc(),
2070 "unexpected token, expected
end of statement
");
2072 if (popFeatureBits())
2073 return Error(StartLoc, ".option pop with no .option
push");
2078 if (Option == "rvc
") {
2079 getTargetStreamer().emitDirectiveOptionRVC();
2082 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2083 return Error(Parser.getTok().getLoc(),
2084 "unexpected token, expected
end of statement
");
2086 setFeatureBits(RISCV::FeatureStdExtC, "c");
2090 if (Option == "norvc
") {
2091 getTargetStreamer().emitDirectiveOptionNoRVC();
2094 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2095 return Error(Parser.getTok().getLoc(),
2096 "unexpected token, expected
end of statement
");
2098 clearFeatureBits(RISCV::FeatureStdExtC, "c");
2102 if (Option == "pic
") {
2103 getTargetStreamer().emitDirectiveOptionPIC();
2106 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2107 return Error(Parser.getTok().getLoc(),
2108 "unexpected token, expected
end of statement
");
2110 ParserOptions.IsPicEnabled = true;
2114 if (Option == "nopic
") {
2115 getTargetStreamer().emitDirectiveOptionNoPIC();
2118 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2119 return Error(Parser.getTok().getLoc(),
2120 "unexpected token, expected
end of statement
");
2122 ParserOptions.IsPicEnabled = false;
2126 if (Option == "relax
") {
2127 getTargetStreamer().emitDirectiveOptionRelax();
2130 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2131 return Error(Parser.getTok().getLoc(),
2132 "unexpected token, expected
end of statement
");
2134 setFeatureBits(RISCV::FeatureRelax, "relax
");
2138 if (Option == "norelax
") {
2139 getTargetStreamer().emitDirectiveOptionNoRelax();
2142 if (Parser.getTok().isNot(AsmToken::EndOfStatement))
2143 return Error(Parser.getTok().getLoc(),
2144 "unexpected token, expected
end of statement
");
2146 clearFeatureBits(RISCV::FeatureRelax, "relax
");
2151 Warning(Parser.getTok().getLoc(),
2152 "unknown option, expected
'push',
'pop',
'rvc',
'norvc',
'relax' or "
2154 Parser.eatToEndOfStatement();
2161 bool RISCVAsmParser::parseDirectiveAttribute() {
2162 MCAsmParser &Parser = getParser();
2165 TagLoc = Parser.getTok().getLoc();
2166 if (Parser.getTok().is(AsmToken::Identifier)) {
2167 StringRef Name = Parser.getTok().getIdentifier();
2168 Optional<unsigned> Ret =
2169 ELFAttrs::attrTypeFromString(Name, RISCVAttrs::getRISCVAttributeTags());
2171 Error(TagLoc, "attribute
name not recognised:
" + Name);
2177 const MCExpr *AttrExpr;
2179 TagLoc = Parser.getTok().getLoc();
2180 if (Parser.parseExpression(AttrExpr))
2183 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
2184 if (check(!CE, TagLoc, "expected numeric
constant"))
2187 Tag = CE->getValue();
2190 if (Parser.parseToken(AsmToken::Comma, "comma expected
"))
2193 StringRef StringValue;
2194 int64_t IntegerValue = 0;
2195 bool IsIntegerValue = true;
2197 // RISC-V attributes have a string value if the tag number is odd
2198 // and an integer value if the tag number is even.
2200 IsIntegerValue = false;
2202 SMLoc ValueExprLoc = Parser.getTok().getLoc();
2203 if (IsIntegerValue) {
2204 const MCExpr *ValueExpr;
2205 if (Parser.parseExpression(ValueExpr))
2208 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
2210 return Error(ValueExprLoc, "expected numeric
constant");
2211 IntegerValue = CE->getValue();
2213 if (Parser.getTok().isNot(AsmToken::String))
2214 return Error(Parser.getTok().getLoc(), "expected
string constant");
2216 StringValue = Parser.getTok().getStringContents();
2220 if (Parser.parseEOL())
2224 getTargetStreamer().emitAttribute(Tag, IntegerValue);
2225 else if (Tag != RISCVAttrs::ARCH)
2226 getTargetStreamer().emitTextAttribute(Tag, StringValue);
2228 StringRef Arch = StringValue;
2229 for (auto Feature : RISCVFeatureKV)
2230 if (llvm::RISCVISAInfo::isSupportedExtensionFeature(Feature.Key))
2231 clearFeatureBits(Feature.Value, Feature.Key);
2233 auto ParseResult = llvm::RISCVISAInfo::parseArchString(
2234 StringValue, /*EnableExperimentalExtension=*/true,
2235 /*ExperimentalExtensionVersionCheck=*/true);
2238 raw_string_ostream OutputErrMsg(Buffer);
2239 handleAllErrors(ParseResult.takeError(), [&](llvm::StringError &ErrMsg) {
2240 OutputErrMsg << "invalid arch
name '" << Arch << "',
"
2241 << ErrMsg.getMessage();
2244 return Error(ValueExprLoc, OutputErrMsg.str());
2246 auto &ISAInfo = *ParseResult;
2248 for (auto Feature : RISCVFeatureKV)
2249 if (ISAInfo->hasExtension(Feature.Key))
2250 setFeatureBits(Feature.Value, Feature.Key);
2252 if (ISAInfo->getXLen() == 32)
2253 clearFeatureBits(RISCV::Feature64Bit, "64
bit");
2254 else if (ISAInfo->getXLen() == 64)
2255 setFeatureBits(RISCV::Feature64Bit, "64
bit");
2257 return Error(ValueExprLoc, "bad arch
string " + Arch);
2259 // Then emit the arch string.
2260 getTargetStreamer().emitTextAttribute(Tag, ISAInfo->toString());
2268 bool RISCVAsmParser::parseDirectiveInsn(SMLoc L) {
2269 MCAsmParser &Parser = getParser();
2271 // Expect instruction format as identifier.
2273 SMLoc ErrorLoc = Parser.getTok().getLoc();
2274 if (Parser.parseIdentifier(Format))
2277 if (Format != "r
" && Format != "r4" && Format != "i" && Format != "b" &&
2278 Format != "sb
" && Format != "u
" && Format != "j" && Format != "uj
" &&
2282 std::string FormatName = (".insn_
" + Format).str();
2284 ParseInstructionInfo Info;
2285 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> Operands;
2287 if (ParseInstruction(Info, FormatName, L, Operands))
2292 return MatchAndEmitInstruction(L, Opcode, Operands, Parser.getStreamer(),
2294 /*MatchingInlineAsm=*/false);
2297 void RISCVAsmParser::emitToStreamer(MCStreamer &S, const MCInst &Inst) {
2299 bool Res = compressInst(CInst, Inst, getSTI(), S.getContext());
2301 ++RISCVNumInstrsCompressed;
2302 S.emitInstruction((Res ? CInst : Inst), getSTI());
2305 void RISCVAsmParser::emitLoadImm(MCRegister DestReg, int64_t Value,
2307 RISCVMatInt::InstSeq Seq =
2308 RISCVMatInt::generateInstSeq(Value, getSTI().getFeatureBits());
2310 MCRegister SrcReg = RISCV::X0;
2311 for (RISCVMatInt::Inst &Inst : Seq) {
2312 switch (Inst.getOpndKind()) {
2313 case RISCVMatInt::Imm:
2315 MCInstBuilder(Inst.Opc).addReg(DestReg).addImm(Inst.Imm));
2317 case RISCVMatInt::RegX0:
2319 Out, MCInstBuilder(Inst.Opc).addReg(DestReg).addReg(SrcReg).addReg(
2322 case RISCVMatInt::RegReg:
2324 Out, MCInstBuilder(Inst.Opc).addReg(DestReg).addReg(SrcReg).addReg(
2327 case RISCVMatInt::RegImm:
2329 Out, MCInstBuilder(Inst.Opc).addReg(DestReg).addReg(SrcReg).addImm(
2334 // Only the first instruction has X0 as its source.
2339 void RISCVAsmParser::emitAuipcInstPair(MCOperand DestReg, MCOperand TmpReg,
2340 const MCExpr *Symbol,
2341 RISCVMCExpr::VariantKind VKHi,
2342 unsigned SecondOpcode, SMLoc IDLoc,
2344 // A pair of instructions for PC-relative addressing; expands to
2345 // TmpLabel: AUIPC TmpReg, VKHi(symbol)
2346 // OP DestReg, TmpReg, %pcrel_lo(TmpLabel)
2347 MCContext &Ctx = getContext();
2349 MCSymbol *TmpLabel = Ctx.createNamedTempSymbol("pcrel_hi
");
2350 Out.emitLabel(TmpLabel);
2352 const RISCVMCExpr *SymbolHi = RISCVMCExpr::create(Symbol, VKHi, Ctx);
2354 Out, MCInstBuilder(RISCV::AUIPC).addOperand(TmpReg).addExpr(SymbolHi));
2356 const MCExpr *RefToLinkTmpLabel =
2357 RISCVMCExpr::create(MCSymbolRefExpr::create(TmpLabel, Ctx),
2358 RISCVMCExpr::VK_RISCV_PCREL_LO, Ctx);
2360 emitToStreamer(Out, MCInstBuilder(SecondOpcode)
2361 .addOperand(DestReg)
2363 .addExpr(RefToLinkTmpLabel));
2366 void RISCVAsmParser::emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc,
2368 // The load local address pseudo-instruction "lla
" is used in PC-relative
2369 // addressing of local symbols:
2370 // lla rdest, symbol
2372 // TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
2373 // ADDI rdest, rdest, %pcrel_lo(TmpLabel)
2374 MCOperand DestReg = Inst.getOperand(0);
2375 const MCExpr *Symbol = Inst.getOperand(1).getExpr();
2376 emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_PCREL_HI,
2377 RISCV::ADDI, IDLoc, Out);
2380 void RISCVAsmParser::emitLoadAddress(MCInst &Inst, SMLoc IDLoc,
2382 // The load address pseudo-instruction "la
" is used in PC-relative and
2383 // GOT-indirect addressing of global symbols:
2385 // expands to either (for non-PIC)
2386 // TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
2387 // ADDI rdest, rdest, %pcrel_lo(TmpLabel)
2389 // TmpLabel: AUIPC rdest, %got_pcrel_hi(symbol)
2390 // Lx rdest, %pcrel_lo(TmpLabel)(rdest)
2391 MCOperand DestReg = Inst.getOperand(0);
2392 const MCExpr *Symbol = Inst.getOperand(1).getExpr();
2393 unsigned SecondOpcode;
2394 RISCVMCExpr::VariantKind VKHi;
2395 if (ParserOptions.IsPicEnabled) {
2396 SecondOpcode = isRV64() ? RISCV::LD : RISCV::LW;
2397 VKHi = RISCVMCExpr::VK_RISCV_GOT_HI;
2399 SecondOpcode = RISCV::ADDI;
2400 VKHi = RISCVMCExpr::VK_RISCV_PCREL_HI;
2402 emitAuipcInstPair(DestReg, DestReg, Symbol, VKHi, SecondOpcode, IDLoc, Out);
2405 void RISCVAsmParser::emitLoadTLSIEAddress(MCInst &Inst, SMLoc IDLoc,
2407 // The load TLS IE address pseudo-instruction "la.tls.ie
" is used in
2408 // initial-exec TLS model addressing of global symbols:
2409 // la.tls.ie rdest, symbol
2411 // TmpLabel: AUIPC rdest, %tls_ie_pcrel_hi(symbol)
2412 // Lx rdest, %pcrel_lo(TmpLabel)(rdest)
2413 MCOperand DestReg = Inst.getOperand(0);
2414 const MCExpr *Symbol = Inst.getOperand(1).getExpr();
2415 unsigned SecondOpcode = isRV64() ? RISCV::LD : RISCV::LW;
2416 emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_TLS_GOT_HI,
2417 SecondOpcode, IDLoc, Out);
2420 void RISCVAsmParser::emitLoadTLSGDAddress(MCInst &Inst, SMLoc IDLoc,
2422 // The load TLS GD address pseudo-instruction "la.tls.gd
" is used in
2423 // global-dynamic TLS model addressing of global symbols:
2424 // la.tls.gd rdest, symbol
2426 // TmpLabel: AUIPC rdest, %tls_gd_pcrel_hi(symbol)
2427 // ADDI rdest, rdest, %pcrel_lo(TmpLabel)
2428 MCOperand DestReg = Inst.getOperand(0);
2429 const MCExpr *Symbol = Inst.getOperand(1).getExpr();
2430 emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_TLS_GD_HI,
2431 RISCV::ADDI, IDLoc, Out);
2434 void RISCVAsmParser::emitLoadStoreSymbol(MCInst &Inst, unsigned Opcode,
2435 SMLoc IDLoc, MCStreamer &Out,
2437 // The load/store pseudo-instruction does a pc-relative load with
2440 // The expansion looks like this
2442 // TmpLabel: AUIPC tmp, %pcrel_hi(symbol)
2443 // [S|L]X rd, %pcrel_lo(TmpLabel)(tmp)
2444 unsigned DestRegOpIdx = HasTmpReg ? 1 : 0;
2445 MCOperand DestReg = Inst.getOperand(DestRegOpIdx);
2446 unsigned SymbolOpIdx = HasTmpReg ? 2 : 1;
2447 MCOperand TmpReg = Inst.getOperand(0);
2448 const MCExpr *Symbol = Inst.getOperand(SymbolOpIdx).getExpr();
2449 emitAuipcInstPair(DestReg, TmpReg, Symbol, RISCVMCExpr::VK_RISCV_PCREL_HI,
2450 Opcode, IDLoc, Out);
2453 void RISCVAsmParser::emitPseudoExtend(MCInst &Inst, bool SignExtend,
2454 int64_t Width, SMLoc IDLoc,
2456 // The sign/zero extend pseudo-instruction does two shifts, with the shift
2457 // amounts dependent on the XLEN.
2459 // The expansion looks like this
2461 // SLLI rd, rs, XLEN - Width
2462 // SR[A|R]I rd, rd, XLEN - Width
2463 MCOperand DestReg = Inst.getOperand(0);
2464 MCOperand SourceReg = Inst.getOperand(1);
2466 unsigned SecondOpcode = SignExtend ? RISCV::SRAI : RISCV::SRLI;
2467 int64_t ShAmt = (isRV64() ? 64 : 32) - Width;
2469 assert(ShAmt > 0 && "Shift amount must
be non-
zero.
");
2471 emitToStreamer(Out, MCInstBuilder(RISCV::SLLI)
2472 .addOperand(DestReg)
2473 .addOperand(SourceReg)
2476 emitToStreamer(Out, MCInstBuilder(SecondOpcode)
2477 .addOperand(DestReg)
2478 .addOperand(DestReg)
2482 void RISCVAsmParser::emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
2484 if (Inst.getNumOperands() == 3) {
2487 // pseudoinstruction: vmsge{u}.vx vd, va, x
2488 // expansion: vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
2489 emitToStreamer(Out, MCInstBuilder(Opcode)
2490 .addOperand(Inst.getOperand(0))
2491 .addOperand(Inst.getOperand(1))
2492 .addOperand(Inst.getOperand(2))
2493 .addReg(RISCV::NoRegister));
2494 emitToStreamer(Out, MCInstBuilder(RISCV::VMNAND_MM)
2495 .addOperand(Inst.getOperand(0))
2496 .addOperand(Inst.getOperand(0))
2497 .addOperand(Inst.getOperand(0)));
2498 } else if (Inst.getNumOperands() == 4) {
2499 // masked va >= x, vd != v0
2501 // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t
2502 // expansion: vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
2503 assert(Inst.getOperand(0).getReg() != RISCV::V0 &&
2504 "The destination
register should
not be V0.
");
2505 emitToStreamer(Out, MCInstBuilder(Opcode)
2506 .addOperand(Inst.getOperand(0))
2507 .addOperand(Inst.getOperand(1))
2508 .addOperand(Inst.getOperand(2))
2509 .addOperand(Inst.getOperand(3)));
2510 emitToStreamer(Out, MCInstBuilder(RISCV::VMXOR_MM)
2511 .addOperand(Inst.getOperand(0))
2512 .addOperand(Inst.getOperand(0))
2513 .addReg(RISCV::V0));
2514 } else if (Inst.getNumOperands() == 5 &&
2515 Inst.getOperand(0).getReg() == RISCV::V0) {
2516 // masked va >= x, vd == v0
2518 // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
2519 // expansion: vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
2520 assert(Inst.getOperand(0).getReg() == RISCV::V0 &&
2521 "The destination
register should
be V0.
");
2522 assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
2523 "The temporary vector
register should
not be V0.
");
2524 emitToStreamer(Out, MCInstBuilder(Opcode)
2525 .addOperand(Inst.getOperand(1))
2526 .addOperand(Inst.getOperand(2))
2527 .addOperand(Inst.getOperand(3))
2528 .addOperand(Inst.getOperand(4)));
2529 emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
2530 .addOperand(Inst.getOperand(0))
2531 .addOperand(Inst.getOperand(0))
2532 .addOperand(Inst.getOperand(1)));
2533 } else if (Inst.getNumOperands() == 5) {
2534 // masked va >= x, any vd
2536 // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
2537 // expansion: vmslt{u}.vx vt, va, x; vmandn.mm vt, v0, vt; vmandn.mm vd,
2538 // vd, v0; vmor.mm vd, vt, vd
2539 assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
2540 "The temporary vector
register should
not be V0.
");
2541 emitToStreamer(Out, MCInstBuilder(Opcode)
2542 .addOperand(Inst.getOperand(1))
2543 .addOperand(Inst.getOperand(2))
2544 .addOperand(Inst.getOperand(3))
2545 .addReg(RISCV::NoRegister));
2546 emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
2547 .addOperand(Inst.getOperand(1))
2549 .addOperand(Inst.getOperand(1)));
2550 emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
2551 .addOperand(Inst.getOperand(0))
2552 .addOperand(Inst.getOperand(0))
2553 .addReg(RISCV::V0));
2554 emitToStreamer(Out, MCInstBuilder(RISCV::VMOR_MM)
2555 .addOperand(Inst.getOperand(0))
2556 .addOperand(Inst.getOperand(1))
2557 .addOperand(Inst.getOperand(0)));
2561 bool RISCVAsmParser::checkPseudoAddTPRel(MCInst &Inst,
2562 OperandVector &Operands) {
2563 assert(Inst.getOpcode() == RISCV::PseudoAddTPRel && "Invalid
instruction");
2564 assert(Inst.getOperand(2).isReg() && "Unexpected second operand kind
");
2565 if (Inst.getOperand(2).getReg() != RISCV::X4) {
2566 SMLoc ErrorLoc = ((RISCVOperand &)*Operands[3]).getStartLoc();
2567 return Error(ErrorLoc, "the second
input operand must
be tp/x4 when
using "
2568 "%tprel_add modifier
");
2574 std::unique_ptr<RISCVOperand> RISCVAsmParser::defaultMaskRegOp() const {
2575 return RISCVOperand::createReg(RISCV::NoRegister, llvm::SMLoc(),
2576 llvm::SMLoc(), isRV64());
2579 bool RISCVAsmParser::validateInstruction(MCInst &Inst,
2580 OperandVector &Operands) {
2581 if (Inst.getOpcode() == RISCV::PseudoVMSGEU_VX_M_T ||
2582 Inst.getOpcode() == RISCV::PseudoVMSGE_VX_M_T) {
2583 unsigned DestReg = Inst.getOperand(0).getReg();
2584 unsigned TempReg = Inst.getOperand(1).getReg();
2585 if (DestReg == TempReg) {
2586 SMLoc Loc = Operands.back()->getStartLoc();
2587 return Error(Loc, "The temporary vector
register cannot
be the same
as "
2588 "the destination
register.
");
2592 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
2593 RISCVII::VConstraintType Constraints = RISCVII::getConstraint(MCID.TSFlags);
2594 if (Constraints == RISCVII::NoConstraint)
2597 unsigned DestReg = Inst.getOperand(0).getReg();
2598 // Operands[1] will be the first operand, DestReg.
2599 SMLoc Loc = Operands[1]->getStartLoc();
2600 if (Constraints & RISCVII::VS2Constraint) {
2601 unsigned CheckReg = Inst.getOperand(1).getReg();
2602 if (DestReg == CheckReg)
2603 return Error(Loc, "The destination vector
register group cannot overlap
"
2604 " the source vector
register group.
");
2606 if ((Constraints & RISCVII::VS1Constraint) && (Inst.getOperand(2).isReg())) {
2607 unsigned CheckReg = Inst.getOperand(2).getReg();
2608 if (DestReg == CheckReg)
2609 return Error(Loc, "The destination vector
register group cannot overlap
"
2610 " the source vector
register group.
");
2612 if ((Constraints & RISCVII::VMConstraint) && (DestReg == RISCV::V0)) {
2613 // vadc, vsbc are special cases. These instructions have no mask register.
2614 // The destination register could not be V0.
2615 unsigned Opcode = Inst.getOpcode();
2616 if (Opcode == RISCV::VADC_VVM || Opcode == RISCV::VADC_VXM ||
2617 Opcode == RISCV::VADC_VIM || Opcode == RISCV::VSBC_VVM ||
2618 Opcode == RISCV::VSBC_VXM || Opcode == RISCV::VFMERGE_VFM ||
2619 Opcode == RISCV::VMERGE_VIM || Opcode == RISCV::VMERGE_VVM ||
2620 Opcode == RISCV::VMERGE_VXM)
2621 return Error(Loc, "The destination vector
register group cannot
be V0.
");
2623 // Regardless masked or unmasked version, the number of operands is the
2624 // same. For example, "viota.m v0,
v2" is "viota.m v0,
v2, NoRegister
"
2625 // actually. We need to check the last operand to ensure whether it is
2627 unsigned CheckReg = Inst.getOperand(Inst.getNumOperands() - 1).getReg();
2628 assert((CheckReg == RISCV::V0 || CheckReg == RISCV::NoRegister) &&
2629 "Unexpected
register for mask operand
");
2631 if (DestReg == CheckReg)
2632 return Error(Loc, "The destination vector
register group cannot overlap
"
2633 " the mask
register.
");
2638 bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
2639 OperandVector &Operands,
2643 switch (Inst.getOpcode()) {
2646 case RISCV::PseudoLI: {
2647 MCRegister Reg = Inst.getOperand(0).getReg();
2648 const MCOperand &Op1 = Inst.getOperand(1);
2650 // We must have li reg, %lo(sym) or li reg, %pcrel_lo(sym) or similar.
2651 // Just convert to an addi. This allows compatibility with gas.
2652 emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
2655 .addExpr(Op1.getExpr()));
2658 int64_t Imm = Inst.getOperand(1).getImm();
2659 // On RV32 the immediate here can either be a signed or an unsigned
2660 // 32-bit number. Sign extension has to be performed to ensure that Imm
2661 // represents the expected signed 64-bit number.
2663 Imm = SignExtend64<32>(Imm);
2664 emitLoadImm(Reg, Imm, Out);
2667 case RISCV::PseudoLLA:
2668 emitLoadLocalAddress(Inst, IDLoc, Out);
2670 case RISCV::PseudoLA:
2671 emitLoadAddress(Inst, IDLoc, Out);
2673 case RISCV::PseudoLA_TLS_IE:
2674 emitLoadTLSIEAddress(Inst, IDLoc, Out);
2676 case RISCV::PseudoLA_TLS_GD:
2677 emitLoadTLSGDAddress(Inst, IDLoc, Out);
2679 case RISCV::PseudoLB:
2680 emitLoadStoreSymbol(Inst, RISCV::LB, IDLoc, Out, /*HasTmpReg=*/false);
2682 case RISCV::PseudoLBU:
2683 emitLoadStoreSymbol(Inst, RISCV::LBU, IDLoc, Out, /*HasTmpReg=*/false);
2685 case RISCV::PseudoLH:
2686 emitLoadStoreSymbol(Inst, RISCV::LH, IDLoc, Out, /*HasTmpReg=*/false);
2688 case RISCV::PseudoLHU:
2689 emitLoadStoreSymbol(Inst, RISCV::LHU, IDLoc, Out, /*HasTmpReg=*/false);
2691 case RISCV::PseudoLW:
2692 emitLoadStoreSymbol(Inst, RISCV::LW, IDLoc, Out, /*HasTmpReg=*/false);
2694 case RISCV::PseudoLWU:
2695 emitLoadStoreSymbol(Inst, RISCV::LWU, IDLoc, Out, /*HasTmpReg=*/false);
2697 case RISCV::PseudoLD:
2698 emitLoadStoreSymbol(Inst, RISCV::LD, IDLoc, Out, /*HasTmpReg=*/false);
2700 case RISCV::PseudoFLH:
2701 emitLoadStoreSymbol(Inst, RISCV::FLH, IDLoc, Out, /*HasTmpReg=*/true);
2703 case RISCV::PseudoFLW:
2704 emitLoadStoreSymbol(Inst, RISCV::FLW, IDLoc, Out, /*HasTmpReg=*/true);
2706 case RISCV::PseudoFLD:
2707 emitLoadStoreSymbol(Inst, RISCV::FLD, IDLoc, Out, /*HasTmpReg=*/true);
2709 case RISCV::PseudoSB:
2710 emitLoadStoreSymbol(Inst, RISCV::SB, IDLoc, Out, /*HasTmpReg=*/true);
2712 case RISCV::PseudoSH:
2713 emitLoadStoreSymbol(Inst, RISCV::SH, IDLoc, Out, /*HasTmpReg=*/true);
2715 case RISCV::PseudoSW:
2716 emitLoadStoreSymbol(Inst, RISCV::SW, IDLoc, Out, /*HasTmpReg=*/true);
2718 case RISCV::PseudoSD:
2719 emitLoadStoreSymbol(Inst, RISCV::SD, IDLoc, Out, /*HasTmpReg=*/true);
2721 case RISCV::PseudoFSH:
2722 emitLoadStoreSymbol(Inst, RISCV::FSH, IDLoc, Out, /*HasTmpReg=*/true);
2724 case RISCV::PseudoFSW:
2725 emitLoadStoreSymbol(Inst, RISCV::FSW, IDLoc, Out, /*HasTmpReg=*/true);
2727 case RISCV::PseudoFSD:
2728 emitLoadStoreSymbol(Inst, RISCV::FSD, IDLoc, Out, /*HasTmpReg=*/true);
2730 case RISCV::PseudoAddTPRel:
2731 if (checkPseudoAddTPRel(Inst, Operands))
2734 case RISCV::PseudoSEXT_B:
2735 emitPseudoExtend(Inst, /*SignExtend=*/true, /*Width=*/8, IDLoc, Out);
2737 case RISCV::PseudoSEXT_H:
2738 emitPseudoExtend(Inst, /*SignExtend=*/true, /*Width=*/16, IDLoc, Out);
2740 case RISCV::PseudoZEXT_H:
2741 emitPseudoExtend(Inst, /*SignExtend=*/false, /*Width=*/16, IDLoc, Out);
2743 case RISCV::PseudoZEXT_W:
2744 emitPseudoExtend(Inst, /*SignExtend=*/false, /*Width=*/32, IDLoc, Out);
2746 case RISCV::PseudoVMSGEU_VX:
2747 case RISCV::PseudoVMSGEU_VX_M:
2748 case RISCV::PseudoVMSGEU_VX_M_T:
2749 emitVMSGE(Inst, RISCV::VMSLTU_VX, IDLoc, Out);
2751 case RISCV::PseudoVMSGE_VX:
2752 case RISCV::PseudoVMSGE_VX_M:
2753 case RISCV::PseudoVMSGE_VX_M_T:
2754 emitVMSGE(Inst, RISCV::VMSLT_VX, IDLoc, Out);
2756 case RISCV::PseudoVMSGE_VI:
2757 case RISCV::PseudoVMSLT_VI: {
2758 // These instructions are signed and so is immediate so we can subtract one
2759 // and change the opcode.
2760 int64_t Imm = Inst.getOperand(2).getImm();
2761 unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGE_VI ? RISCV::VMSGT_VI
2763 emitToStreamer(Out, MCInstBuilder(Opc)
2764 .addOperand(Inst.getOperand(0))
2765 .addOperand(Inst.getOperand(1))
2767 .addOperand(Inst.getOperand(3)));
2770 case RISCV::PseudoVMSGEU_VI:
2771 case RISCV::PseudoVMSLTU_VI: {
2772 int64_t Imm = Inst.getOperand(2).getImm();
2773 // Unsigned comparisons are tricky because the immediate is signed. If the
2774 // immediate is 0 we can't just subtract one. vmsltu.vi v0, v1, 0 is always
2775 // false, but vmsle.vi v0, v1, -1 is always true. Instead we use
2776 // vmsne v0, v1, v1 which is always false.
2778 unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGEU_VI
2781 emitToStreamer(Out, MCInstBuilder(Opc)
2782 .addOperand(Inst.getOperand(0))
2783 .addOperand(Inst.getOperand(1))
2784 .addOperand(Inst.getOperand(1))
2785 .addOperand(Inst.getOperand(3)));
2787 // Other immediate values can subtract one like signed.
2788 unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGEU_VI
2791 emitToStreamer(Out, MCInstBuilder(Opc)
2792 .addOperand(Inst.getOperand(0))
2793 .addOperand(Inst.getOperand(1))
2795 .addOperand(Inst.getOperand(3)));
2802 emitToStreamer(Out, Inst);
2806 extern "C
" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVAsmParser() {
2807 RegisterMCAsmParser<RISCVAsmParser> X(getTheRISCV32Target());
2808 RegisterMCAsmParser<RISCVAsmParser> Y(getTheRISCV64Target());
MCStreamer & getStreamer()
Move duplicate certain instructions close to their use
Generic assembler parser interface, for use by target specific assembly parsers.
const MCObjectFileInfo * getObjectFileInfo() const
should just be implemented with a CLZ instruction Since there are other e that share this it would be best to implement this in a target independent as zero is the default value for the binary encoder e add r0 add r5 Register operands should be distinct That is
compiles conv shl5 shl ret i32 or10 it would be better as
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same constant
This is an optimization pass for GlobalISel generic memory operations.
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 atomic and others It is also currently not done for read modify write instructions It is also current not done if the OF or CF flags are needed The shift operators have the complication that when the shift count is zero
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Reg
All possible values of the reg field in the ModR/M byte.
const_iterator end(StringRef path)
Get end iterator over path.
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
Container class for subtarget features.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_NODISCARD T pop_back_val()
Instances of this class represent a single low-level machine instruction.
static RoundingMode stringToRoundingMode(StringRef Str)
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 and
Target independent representation for an assembler token.
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
=0.0 ? 0.0 :(a > 0.0 ? 1.0 :-1.0) a
Streaming machine code generation interface.
Common register allocation spilling r4
Represents a location in source code.
bool match(Val *V, const Pattern &P)
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Itanium Name Demangler i e convert the string _Z1fv into f()". You can also use the CRTP base ManglingParser to perform some simple analysis on the mangled name
const Triple & getTargetTriple() const
bool isPositionIndependent() const
bitcast float %x to i32 %s=and i32 %t, 2147483647 %d=bitcast i32 %s to float ret float %d } declare float @fabsf(float %n) define float @bar(float %x) nounwind { %d=call float @fabsf(float %x) ret float %d } This IR(from PR6194):target datalayout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple="x86_64-apple-darwin10.0.0" %0=type { double, double } %struct.float3=type { float, float, float } define void @test(%0, %struct.float3 *nocapture %res) nounwind noinline ssp { entry:%tmp18=extractvalue %0 %0, 0 t
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int b
bb420 i The CBE manages to mtctr r0 r11 stbx r9 addi bdz later b loop This could be much the loop would be a single dispatch group
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
const FeatureBitset & getFeatureBits() const
virtual MCContext & getContext()=0
The object format emitted by the WebAssembly backed is documented in
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can be
const char LLVMTargetMachineRef LLVMPassBuilderOptionsRef Options
STATISTIC(NumFunctions, "Total number of functions")
This class implements an extremely fast bulk output stream that can only output to a stream.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
Target specific streamer interface.
Analysis containing CSE Info
The initial backend is deliberately restricted to z10 We should add support for later architectures at some point If an asm ties an i32 r result to an i64 input
Used to provide key value pairs for feature and CPU bit flags.
Promote Memory to Register
the resulting code requires compare and branches when and if the revised code is with conditional branches instead of More there is a byte word extend before each where there should be only and the condition codes are not remembered when the same two values are compared twice More LSR enhancements i8 and i32 load store addressing modes are identical int int c
The initial backend is deliberately restricted to z10 We should add support for later architectures at some point If an asm ties an i32 r result to an i64 the input will be treated as an leaving the upper bits uninitialised For i64 store i32 i32 *dst ret void from CodeGen SystemZ asm ll will use LHI rather than LGHI to load This seems to be a general target independent problem The tuning of the choice between LOAD XC and CLC for constant length block operations We could extend them to variable length operations too
The object format emitted by the WebAssembly backed is documented see the home and packaging for producing WebAssembly applications that can run in browsers and other environments wasi sdk provides a more minimal C C SDK based on llvm and a libc based on for producing WebAssemmbly applictions that use the WASI ABI Rust provides WebAssembly support integrated into Cargo There are two main which provides a relatively minimal environment that has an emphasis on being native wasm32 unknown which uses Emscripten internally and provides standard C C filesystem GL and SDL bindings For more and br_table instructions can support having a value on the value stack across the jump(sometimes). We should(a) model this
mir Rename Register Operands
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
We currently generate an sqrtsd and divsd instructions This is bad
constexpr bool isInt< 32 >(int64_t x)
This is equivalent to the following
multiplies can be turned into SHL s
constexpr bool isUInt< 32 >(uint64_t x)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
@ CE
Windows NT (Windows on ARM)
Base class for user error types.
static bool isReg(const MCInst &MI, unsigned OpNo)
StringRef - Represent a constant reference to a string, i.e.
Since we know that Vector is byte aligned and we know the element offset of we should change the load into a lve *x instruction
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
llvm lib Support Unix the directory structure underneath this directory could look like only those directories actually needing to be created should be created further subdirectories could be created to reflect versions of the various standards For under SUS there could be v2
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Should compile r2 movcc movcs str strb mov lr r1 movcs movcc mov lr not
Wrapper class representing virtual and physical registers.
const CustomOperand< const MCSubtargetInfo & > Msg[]
*Add support for compiling functions in both ARM and Thumb mode
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
void printVType(unsigned VType, raw_ostream &OS)
Interface to description of machine instruction set.
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional ldr LCPI1_0 ldr ldr tst movne lsr ldr LCPI1_1 and r0 bx lr it saves an instruction and a register It might be profitable to cse MOVi16 if there are lots of bit immediates with the same bottom half Robert Muth started working on an alternate jump table implementation that does not put the tables in line in the text This is more like the llvm default jump table implementation This might be useful sometime Several revisions of patches are on the mailing beginning while CMP sets them like a subtract Therefore to be able to use CMN for comparisons other than the Z bit
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
MCTargetAsmParser - Generic interface to target specific assembly parsers.
bool parseImmediate(MCInst &MI, uint64_t &Size, ArrayRef< uint8_t > Bytes)
ABI computeTargetABI(const Triple &TT, FeatureBitset FeatureBits, StringRef ABIName)
MCTargetStreamer * getTargetStreamer()
static bool isMem(const MachineInstr &MI, unsigned Op)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Instances of this class represent operands of the MCInst class.
cond_true lis lo16() lo16() lo16() f1 fsel f2
static const char * getRegisterName(unsigned RegNo)
const SubtargetFeatureKV RISCVFeatureKV[RISCV::NumSubtargetFeatures]
Generic base class for all target subtargets.
LLVM Value Representation.
Base class for the full range of assembler expressions which are needed for parsing.
Should combine to x<=9" (the sub has nsw). Currently notoptimized with "clang -emit-llvm-bc|opt -O3".int g(int x) { return (x + 10) < 0; }Should combine to "x< -10" (the add has nsw). Currently notoptimized with "clang -emit-llvm-bc|opt -O3".int f(int i, int j) { return i < j + 1; }int g(int i, int j) { return j > i - 1; }Should combine to "i<=j" (the add/sub has nsw). Currently notoptimized with "clang -emit-llvm-bc|opt -O3".unsigned f(unsigned x) { return ((x & 7) + 1) & 15; }The & 15 part should be optimized away, it doesn't change the result. Currentlynot optimized with "clang -emit-llvm-bc|opt -O3".This was noticed in the entryblock for grokdeclarator in 403.gcc: %tmp = icmp eq i32 %decl_context, 4 %decl_context_addr.0 = select i1 %tmp, i32 3, i32 %decl_context %tmp1 = icmp eq i32 %decl_context_addr.0, 1 %decl_context_addr.1 = select i1 %tmp1, i32 0, i32 %decl_context_addr.0tmp1 should be simplified to something like: (!tmp || decl_context == 1)This allows recursive simplifications, tmp1 is used all over the place inthe function, e.g. by: %tmp23 = icmp eq i32 %decl_context_addr.1, 0 ; <i1> [#uses=1] %tmp24 = xor i1 %tmp1, true ; <i1> [#uses=1] %or.cond8 = and i1 %tmp23, %tmp24 ; <i1> [#uses=1]later.[STORE SINKING]Store sinking: This code:void f (int n, int *cond, int *res) { int i; *res = 0; for (i = 0; i < n; i++) if (*cond) *res ^= 234; }On this function GVN hoists the fully redundant value of *res, but nothingmoves the store out. This gives us this code:bb: ; preds = %bb2, %entry %.rle = phi i32 [ 0, %entry ], [ %.rle6, %bb2 ] %i.05 = phi i32 [ 0, %entry ], [ %indvar.next, %bb2 ] %1 = load i32* %cond, align 4 %2 = icmp eq i32 %1, 0 br i1 %2, label %bb2, label %bb1bb1: ; preds = %bb %3 = xor i32 %.rle, 234 store i32 %3, i32* %res, align 4 br label %bb2bb2: ; preds = %bb, %bb1 %.rle6 = phi i32 [ %3, %bb1 ], [ %.rle, %bb ] %indvar.next = add i32 %i.05, 1 %exitcond = icmp eq i32 %indvar.next, %n br i1 %exitcond, label %return, label %bbDSE should sink partially dead stores to get the store out of the loop.Here's another partial dead case:http:Scalar PRE hoists the mul in the common block up to the else:int test (int a, int b, int c, int g) { int d, e; if (a) d = b * c; else d = b - c; e = b * c + g; return d + e;}It would be better to do the mul once to reduce codesize above the if.This is GCC PR38204.This simple function from 179.art:int winner, numf2s;struct { double y; int reset; } *Y;void find_match() { int i; winner = 0; for (i=0;i<numf2s;i++) if (Y[i].y > Y[winner].y) winner =i;}Compiles into (with clang TBAA):for.body: ; preds = %for.inc, %bb.nph %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.inc ] %i.01718 = phi i32 [ 0, %bb.nph ], [ %i.01719, %for.inc ] %tmp4 = getelementptr inbounds %struct.anon* %tmp3, i64 %indvar, i32 0 %tmp5 = load double* %tmp4, align 8, !tbaa !4 %idxprom7 = sext i32 %i.01718 to i64 %tmp10 = getelementptr inbounds %struct.anon* %tmp3, i64 %idxprom7, i32 0 %tmp11 = load double* %tmp10, align 8, !tbaa !4 %cmp12 = fcmp ogt double %tmp5, %tmp11 br i1 %cmp12, label %if.then, label %for.incif.then: ; preds = %for.body %i.017 = trunc i64 %indvar to i32 br label %for.incfor.inc: ; preds = %for.body, %if.then %i.01719 = phi i32 [ %i.01718, %for.body ], [ %i.017, %if.then ] %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, %tmp22 br i1 %exitcond, label %for.cond.for.end_crit_edge, label %for.bodyIt is good that we hoisted the reloads of numf2's, and Y out of the loop andsunk the store to winner out.However, this is awful on several levels: the conditional truncate in the loop(-indvars at fault? why can't we completely promote the IV to i64?).Beyond that, we have a partially redundant load in the loop: if "winner" (aka %i.01718) isn't updated, we reload Y[winner].y the next time through the loop.Similarly, the addressing that feeds it (including the sext) is redundant. Inthe end we get this generated assembly:LBB0_2: ## %for.body ## =>This Inner Loop Header: Depth=1 movsd (%rdi), %xmm0 movslq %edx, %r8 shlq $4, %r8 ucomisd (%rcx,%r8), %xmm0 jbe LBB0_4 movl %esi, %edxLBB0_4: ## %for.inc addq $16, %rdi incq %rsi cmpq %rsi, %rax jne LBB0_2All things considered this isn't too bad, but we shouldn't need the movslq orthe shlq instruction, or the load folded into ucomisd every time through theloop.On an x86-specific topic, if the loop can't be restructure, the movl should be acmov.[STORE SINKING]GCC PR37810 is an interesting case where we should sink load/store reloadinto the if block and outside the loop, so we don't reload/store it on thenon-call path.for () { *P += 1; if () call(); else ...->tmp = *Pfor () { tmp += 1; if () { *P = tmp; call(); tmp = *P; } else ...}*P = tmp;We now hoist the reload after the call (Transforms/GVN/lpre-call-wrap.ll), butwe don't sink the store. We need partially dead store sinking.[LOAD PRE CRIT EDGE SPLITTING]GCC PR37166: Sinking of loads prevents SROA'ing the "g" struct on the stackleading to excess stack traffic. This could be handled by GVN with some crazysymbolic phi translation. The code we get looks like (g is on the stack):bb2: ; preds = %bb1.. %9 = getelementptr %struct.f* %g, i32 0, i32 0 store i32 %8, i32* %9, align bel %bb3bb3: ; preds = %bb1, %bb2, %bb %c_addr.0 = phi %struct.f* [ %g, %bb2 ], [ %c, %bb ], [ %c, %bb1 ] %b_addr.0 = phi %struct.f* [ %b, %bb2 ], [ %g, %bb ], [ %b, %bb1 ] %10 = getelementptr %struct.f* %c_addr.0, i32 0, i32 0 %11 = load i32* %10, align 4%11 is partially redundant, an in BB2 it should have the value %8.GCC PR33344 and PR35287 are similar cases.[LOAD PRE]There are many load PRE testcases in testsuite/gcc.dg/tree-ssa/loadpre* in theGCC testsuite, ones we don't get yet are (checked through loadpre25):[CRIT EDGE BREAKING]predcom-4.c[PRE OF READONLY CALL]loadpre5.c[TURN SELECT INTO BRANCH]loadpre14.c loadpre15.c actually a conditional increment: loadpre18.c loadpre19.c[LOAD PRE / STORE SINKING / SPEC HACK]This is a chunk of code from 456.hmmer:int f(int M, int *mc, int *mpp, int *tpmm, int *ip, int *tpim, int *dpp, int *tpdm, int xmb, int *bp, int *ms) { int k, sc; for (k = 1; k <= M; k++) { mc[k] = mpp[k-1] + tpmm[k-1]; if ((sc = ip[k-1] + tpim[k-1]) > mc[k]) mc[k] = sc; if ((sc = dpp[k-1] + tpdm[k-1]) > mc[k]) mc[k] = sc; if ((sc = xmb + bp[k]) > mc[k]) mc[k] = sc; mc[k] += ms[k]; }}It is very profitable for this benchmark to turn the conditional stores to mc[k]into a conditional move (select instr in IR) and allow the final store to do thestore. See GCC PR27313 for more details. Note that this is valid to xform evenwith the new C++ memory model, since mc[k] is previously loaded and laterstored.[SCALAR PRE]There are many PRE testcases in testsuite/gcc.dg/tree-ssa/ssa-pre-*.c in theGCC testsuite.There are some interesting cases in testsuite/gcc.dg/tree-ssa/pred-comm* in theGCC testsuite. For example, we get the first example in predcom-1.c, but miss the second one:unsigned fib[1000];unsigned avg[1000];__attribute__ ((noinline))void count_averages(int n) { int i; for (i = 1; i < n; i++) avg[i] = (((unsigned long) fib[i - 1] + fib[i] + fib[i + 1]) / 3) & 0xffff;}which compiles into two loads instead of one in the loop.predcom-2.c is the same as predcom-1.cpredcom-3.c is very similar but needs loads feeding each other instead ofstore->load.[ALIAS ANALYSIS]Type based alias analysis:http:We should do better analysis of posix_memalign. At the least it shouldno-capture its pointer argument, at best, we should know that the out-valueresult doesn't point to anything (like malloc). One example of this is inSingleSource/Benchmarks/Misc/dt.cInteresting missed case because of control flow flattening (should be 2 loads):http:With: llvm-gcc t2.c -S -o - -O0 -emit-llvm | llvm-as | opt -mem2reg -gvn -instcombine | llvm-diswe miss it because we need 1) CRIT EDGE 2) MULTIPLE DIFFERENTVALS PRODUCED BY ONE BLOCK OVER DIFFERENT PATHShttp:We could eliminate the branch condition here, loading from null is undefined:struct S { int w, x, y, z; };struct T { int r; struct S s; };void bar (struct S, int);void foo (int a, struct T b){ struct S *c = 0; if (a) c = &b.s; bar (*c, a);}simplifylibcalls should do several optimizations for strspn/strcspn:strcspn(x, "a") -> inlined loop for up to letters(similarly for strspn)
Add support for conditional and other related patterns Instead of
Wrapper class representing physical registers. Should be passed by value.