46 "x86-experimental-lvi-inline-asm-hardening",
47 cl::desc(
"Harden inline assembly code that may be vulnerable to Load Value"
48 " Injection (LVI). This feature is experimental."),
cl::Hidden);
51 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
52 ErrMsg =
"scale factor in address must be 1, 2, 4 or 8";
61#define GET_X86_SSE2AVX_TABLE
62#include "X86GenInstrMapping.inc"
64static const char OpPrecedence[] = {
92 unsigned ForcedDataPrefix = 0;
104 OpcodePrefix ForcedOpcodePrefix = OpcodePrefix_Default;
107 DispEncoding_Default,
112 DispEncoding ForcedDispEncoding = DispEncoding_Default;
115 bool UseApxExtendedReg =
false;
117 bool ForcedNoFlag =
false;
120 SMLoc consumeToken() {
129 "do not have a target streamer");
136 bool matchingInlineAsm,
unsigned VariantID = 0) {
139 SwitchMode(X86::Is32Bit);
141 MissingFeatures, matchingInlineAsm,
144 SwitchMode(X86::Is16Bit);
148 enum InfixCalculatorTok {
173 enum IntelOperatorKind {
180 enum MasmOperatorKind {
187 class InfixCalculator {
188 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
192 bool isUnaryOperator(InfixCalculatorTok
Op)
const {
193 return Op == IC_NEG ||
Op == IC_NOT;
197 int64_t popOperand() {
198 assert (!PostfixStack.
empty() &&
"Poped an empty stack!");
200 if (!(
Op.first == IC_IMM ||
Op.first == IC_REGISTER))
204 void pushOperand(InfixCalculatorTok
Op, int64_t Val = 0) {
205 assert ((
Op == IC_IMM ||
Op == IC_REGISTER) &&
206 "Unexpected operand!");
210 void popOperator() { InfixOperatorStack.
pop_back(); }
211 void pushOperator(InfixCalculatorTok
Op) {
213 if (InfixOperatorStack.
empty()) {
221 unsigned Idx = InfixOperatorStack.
size() - 1;
222 InfixCalculatorTok StackOp = InfixOperatorStack[
Idx];
223 if (OpPrecedence[
Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
230 unsigned ParenCount = 0;
233 if (InfixOperatorStack.
empty())
236 Idx = InfixOperatorStack.
size() - 1;
237 StackOp = InfixOperatorStack[
Idx];
238 if (!(OpPrecedence[StackOp] >= OpPrecedence[
Op] || ParenCount))
243 if (!ParenCount && StackOp == IC_LPAREN)
246 if (StackOp == IC_RPAREN) {
249 }
else if (StackOp == IC_LPAREN) {
254 PostfixStack.
push_back(std::make_pair(StackOp, 0));
263 while (!InfixOperatorStack.
empty()) {
264 InfixCalculatorTok StackOp = InfixOperatorStack.
pop_back_val();
265 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
266 PostfixStack.
push_back(std::make_pair(StackOp, 0));
269 if (PostfixStack.
empty())
273 for (
const ICToken &
Op : PostfixStack) {
274 if (
Op.first == IC_IMM ||
Op.first == IC_REGISTER) {
276 }
else if (isUnaryOperator(
Op.first)) {
277 assert (OperandStack.
size() > 0 &&
"Too few operands.");
279 assert (Operand.first == IC_IMM &&
280 "Unary operation with a register!");
286 OperandStack.
push_back(std::make_pair(IC_IMM, -Operand.second));
289 OperandStack.
push_back(std::make_pair(IC_IMM, ~Operand.second));
293 assert (OperandStack.
size() > 1 &&
"Too few operands.");
302 Val = Op1.second + Op2.second;
303 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
306 Val = Op1.second - Op2.second;
307 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
310 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
311 "Multiply operation with an immediate and a register!");
312 Val = Op1.second * Op2.second;
313 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
316 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
317 "Divide operation with an immediate and a register!");
318 assert (Op2.second != 0 &&
"Division by zero!");
319 Val = Op1.second / Op2.second;
320 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
323 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
324 "Modulo operation with an immediate and a register!");
325 Val = Op1.second % Op2.second;
326 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
329 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
330 "Or operation with an immediate and a register!");
331 Val = Op1.second | Op2.second;
332 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
335 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
336 "Xor operation with an immediate and a register!");
337 Val = Op1.second ^ Op2.second;
338 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
341 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
342 "And operation with an immediate and a register!");
343 Val = Op1.second & Op2.second;
344 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
347 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
348 "Left shift operation with an immediate and a register!");
349 Val = Op1.second << Op2.second;
350 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
353 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
354 "Right shift operation with an immediate and a register!");
355 Val = Op1.second >> Op2.second;
356 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
359 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
360 "Equals operation with an immediate and a register!");
361 Val = (Op1.second == Op2.second) ? -1 : 0;
362 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
365 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
366 "Not-equals operation with an immediate and a register!");
367 Val = (Op1.second != Op2.second) ? -1 : 0;
368 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
371 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
372 "Less-than operation with an immediate and a register!");
373 Val = (Op1.second < Op2.second) ? -1 : 0;
374 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
377 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
378 "Less-than-or-equal operation with an immediate and a "
380 Val = (Op1.second <= Op2.second) ? -1 : 0;
381 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
384 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
385 "Greater-than operation with an immediate and a register!");
386 Val = (Op1.second > Op2.second) ? -1 : 0;
387 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
390 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
391 "Greater-than-or-equal operation with an immediate and a "
393 Val = (Op1.second >= Op2.second) ? -1 : 0;
394 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
399 assert (OperandStack.
size() == 1 &&
"Expected a single result.");
404 enum IntelExprState {
434 class IntelExprStateMachine {
435 IntelExprState State = IES_INIT, PrevState = IES_ERROR;
444 bool MemExpr =
false;
445 bool BracketUsed =
false;
446 bool OffsetOperator =
false;
447 bool AttachToOperandIdx =
false;
449 SMLoc OffsetOperatorLoc;
454 ErrMsg =
"cannot use more than one symbol in memory operand";
463 IntelExprStateMachine() =
default;
465 void addImm(int64_t imm) {
Imm += imm; }
466 short getBracCount()
const {
return BracCount; }
467 bool isMemExpr()
const {
return MemExpr; }
468 bool isBracketUsed()
const {
return BracketUsed; }
469 bool isOffsetOperator()
const {
return OffsetOperator; }
470 SMLoc getOffsetLoc()
const {
return OffsetOperatorLoc; }
471 MCRegister getBaseReg()
const {
return BaseReg; }
472 MCRegister getIndexReg()
const {
return IndexReg; }
473 unsigned getScale()
const {
return Scale; }
475 StringRef getSymName()
const {
return SymName; }
478 unsigned getElementSize()
const {
return CurType.
ElementSize; }
479 unsigned getLength()
const {
return CurType.
Length; }
480 int64_t getImm() {
return Imm + IC.execute(); }
481 bool isValidEndState()
const {
482 return State == IES_RBRAC || State == IES_RPAREN ||
483 State == IES_INTEGER || State == IES_REGISTER ||
491 void setAppendAfterOperand() { AttachToOperandIdx =
true; }
493 bool isPIC()
const {
return IsPIC; }
494 void setPIC() { IsPIC =
true; }
496 bool hadError()
const {
return State == IES_ERROR; }
502 if (IsPIC && AttachToOperandIdx)
503 ErrMsg =
"Don't use 2 or more regs for mem offset in PIC model!";
505 ErrMsg =
"BaseReg/IndexReg already set!";
510 IntelExprState CurrState = State;
519 IC.pushOperator(IC_OR);
522 PrevState = CurrState;
525 IntelExprState CurrState = State;
534 IC.pushOperator(IC_XOR);
537 PrevState = CurrState;
540 IntelExprState CurrState = State;
549 IC.pushOperator(IC_AND);
552 PrevState = CurrState;
555 IntelExprState CurrState = State;
564 IC.pushOperator(IC_EQ);
567 PrevState = CurrState;
570 IntelExprState CurrState = State;
579 IC.pushOperator(IC_NE);
582 PrevState = CurrState;
585 IntelExprState CurrState = State;
594 IC.pushOperator(IC_LT);
597 PrevState = CurrState;
600 IntelExprState CurrState = State;
609 IC.pushOperator(IC_LE);
612 PrevState = CurrState;
615 IntelExprState CurrState = State;
624 IC.pushOperator(IC_GT);
627 PrevState = CurrState;
630 IntelExprState CurrState = State;
639 IC.pushOperator(IC_GE);
642 PrevState = CurrState;
645 IntelExprState CurrState = State;
654 IC.pushOperator(IC_LSHIFT);
657 PrevState = CurrState;
660 IntelExprState CurrState = State;
669 IC.pushOperator(IC_RSHIFT);
672 PrevState = CurrState;
675 IntelExprState CurrState = State;
685 IC.pushOperator(IC_PLUS);
686 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
693 return regsUseUpError(ErrMsg);
700 PrevState = CurrState;
704 IntelExprState CurrState = State;
735 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
736 CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
737 CurrState == IES_OFFSET)
738 IC.pushOperator(IC_MINUS);
739 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
741 ErrMsg =
"Scale can't be negative";
744 IC.pushOperator(IC_NEG);
745 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
752 return regsUseUpError(ErrMsg);
759 PrevState = CurrState;
763 IntelExprState CurrState = State;
789 IC.pushOperator(IC_NOT);
792 PrevState = CurrState;
795 IntelExprState CurrState = State;
803 State = IES_REGISTER;
805 IC.pushOperand(IC_REGISTER);
809 if (PrevState == IES_INTEGER) {
811 return regsUseUpError(ErrMsg);
812 State = IES_REGISTER;
815 Scale = IC.popOperand();
818 IC.pushOperand(IC_IMM);
825 PrevState = CurrState;
833 if (ParsingMSInlineAsm)
837 if (
auto *CE = dyn_cast<MCConstantExpr>(SymRef))
838 return onInteger(
CE->getValue(), ErrMsg);
851 if (setSymRef(SymRef, SymRefName, ErrMsg))
855 IC.pushOperand(IC_IMM);
856 if (ParsingMSInlineAsm)
863 bool onInteger(int64_t TmpInt,
StringRef &ErrMsg) {
864 IntelExprState CurrState = State;
890 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
893 return regsUseUpError(ErrMsg);
901 IC.pushOperand(IC_IMM, TmpInt);
905 PrevState = CurrState;
917 State = IES_MULTIPLY;
918 IC.pushOperator(IC_MULTIPLY);
931 IC.pushOperator(IC_DIVIDE);
944 IC.pushOperator(IC_MOD);
960 IC.pushOperator(IC_PLUS);
966 assert(!BracCount &&
"BracCount should be zero on parsing's start");
976 IntelExprState CurrState = State;
985 if (BracCount-- != 1) {
986 ErrMsg =
"unexpected bracket encountered";
990 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
997 return regsUseUpError(ErrMsg);
1004 PrevState = CurrState;
1008 IntelExprState CurrState = State;
1034 IC.pushOperator(IC_LPAREN);
1037 PrevState = CurrState;
1051 IC.pushOperator(IC_RPAREN);
1057 bool ParsingMSInlineAsm,
StringRef &ErrMsg) {
1061 ErrMsg =
"unexpected offset operator expression";
1066 if (setSymRef(Val,
ID, ErrMsg))
1068 OffsetOperator =
true;
1069 OffsetOperatorLoc = OffsetLoc;
1073 IC.pushOperand(IC_IMM);
1074 if (ParsingMSInlineAsm) {
1097 bool MatchingInlineAsm =
false) {
1099 if (MatchingInlineAsm) {
1100 if (!
getLexer().isAtStartOfStatement())
1110 bool RestoreOnFailure);
1112 std::unique_ptr<X86Operand> DefaultMemSIOperand(
SMLoc Loc);
1113 std::unique_ptr<X86Operand> DefaultMemDIOperand(
SMLoc Loc);
1115 MCRegister GetSIDIForRegClass(
unsigned RegClassID,
bool IsSIReg);
1118 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1119 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1127 bool ParseIntelDotOperator(IntelExprStateMachine &SM,
SMLoc &
End);
1129 unsigned ParseIntelInlineAsmOperator(
unsigned OpKind);
1131 bool ParseMasmOperator(
unsigned OpKind, int64_t &Val);
1134 bool ParseIntelNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1136 bool ParseMasmNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1138 void RewriteIntelExpression(IntelExprStateMachine &SM,
SMLoc Start,
1140 bool ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &
End);
1141 bool ParseIntelInlineAsmIdentifier(
const MCExpr *&Val,
StringRef &Identifier,
1143 bool IsUnevaluatedOperand,
SMLoc &
End,
1144 bool IsParsingOffsetOperator =
false);
1146 IntelExprStateMachine &SM);
1153 bool ParseIntelMemoryOperandSize(
unsigned &
Size);
1156 unsigned Scale,
bool NonAbsMem,
SMLoc Start,
1161 bool parseDirectiveArch();
1162 bool parseDirectiveNops(
SMLoc L);
1163 bool parseDirectiveEven(
SMLoc L);
1167 bool parseDirectiveFPOProc(
SMLoc L);
1168 bool parseDirectiveFPOSetFrame(
SMLoc L);
1169 bool parseDirectiveFPOPushReg(
SMLoc L);
1170 bool parseDirectiveFPOStackAlloc(
SMLoc L);
1171 bool parseDirectiveFPOStackAlign(
SMLoc L);
1172 bool parseDirectiveFPOEndPrologue(
SMLoc L);
1173 bool parseDirectiveFPOEndProc(
SMLoc L);
1176 bool parseSEHRegisterNumber(
unsigned RegClassID,
MCRegister &RegNo);
1177 bool parseDirectiveSEHPushReg(
SMLoc);
1178 bool parseDirectiveSEHSetFrame(
SMLoc);
1179 bool parseDirectiveSEHSaveReg(
SMLoc);
1180 bool parseDirectiveSEHSaveXMM(
SMLoc);
1181 bool parseDirectiveSEHPushFrame(
SMLoc);
1189 void emitWarningForSpecialLVIInstruction(
SMLoc Loc);
1200 bool MatchingInlineAsm)
override;
1206 bool MatchingInlineAsm);
1208 bool matchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &Opcode,
MCInst &Inst,
1212 bool matchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &Opcode,
MCInst &Inst,
1215 bool MatchingInlineAsm);
1224 bool ParseZ(std::unique_ptr<X86Operand> &Z,
const SMLoc &StartLoc);
1226 bool is64BitMode()
const {
1230 bool is32BitMode()
const {
1234 bool is16BitMode()
const {
1238 void SwitchMode(
unsigned mode) {
1240 FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
1249 unsigned getPointerWidth() {
1250 if (is16BitMode())
return 16;
1251 if (is32BitMode())
return 32;
1252 if (is64BitMode())
return 64;
1256 bool isParsingIntelSyntax() {
1263#define GET_ASSEMBLER_HEADER
1264#include "X86GenAsmMatcher.inc"
1269 enum X86MatchResultTy {
1271#define GET_OPERAND_DIAGNOSTIC_TYPES
1272#include "X86GenAsmMatcher.inc"
1288 SMLoc &EndLoc)
override;
1299#define GET_REGISTER_MATCHER
1300#define GET_SUBTARGET_FEATURE_NAME
1301#include "X86GenAsmMatcher.inc"
1312 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1313 X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) ||
1314 X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) ||
1315 X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg))) {
1316 ErrMsg =
"invalid base+index expression";
1321 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1322 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1323 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1324 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1325 X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
1326 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
1327 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg))) {
1328 ErrMsg =
"invalid base+index expression";
1332 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg) ||
1333 IndexReg == X86::EIP || IndexReg == X86::RIP || IndexReg == X86::ESP ||
1334 IndexReg == X86::RSP) {
1335 ErrMsg =
"invalid base+index expression";
1341 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
1342 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1343 BaseReg != X86::SI && BaseReg != X86::DI))) {
1344 ErrMsg =
"invalid 16-bit base register";
1349 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg)) {
1350 ErrMsg =
"16-bit memory operand may not include only index register";
1354 if (BaseReg && IndexReg) {
1355 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg) &&
1356 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1357 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1358 IndexReg == X86::EIZ)) {
1359 ErrMsg =
"base register is 64-bit, but index register is not";
1362 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) &&
1363 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1364 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1365 IndexReg == X86::RIZ)) {
1366 ErrMsg =
"base register is 32-bit, but index register is not";
1369 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg)) {
1370 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1371 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg)) {
1372 ErrMsg =
"base register is 16-bit, but index register is not";
1375 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1376 (IndexReg != X86::SI && IndexReg != X86::DI)) {
1377 ErrMsg =
"invalid 16-bit base/index register combination";
1384 if (!Is64BitMode && (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1385 ErrMsg =
"IP-relative addressing requires 64-bit mode";
1406 if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1407 (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1410 if (!is64BitMode()) {
1414 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1415 X86MCRegisterClasses[X86::GR64RegClassID].
contains(RegNo) ||
1418 return Error(StartLoc,
1419 "register %" +
RegName +
" is only available in 64-bit mode",
1425 UseApxExtendedReg =
true;
1429 if (!RegNo &&
RegName.starts_with(
"db")) {
1488 if (isParsingIntelSyntax())
1490 return Error(StartLoc,
"invalid register name",
SMRange(StartLoc, EndLoc));
1496 SMLoc &EndLoc,
bool RestoreOnFailure) {
1502 auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1503 if (RestoreOnFailure) {
1504 while (!Tokens.
empty()) {
1511 StartLoc = PercentTok.
getLoc();
1525 if (isParsingIntelSyntax())
return true;
1526 return Error(StartLoc,
"invalid register name",
1530 if (MatchRegisterByName(RegNo, Tok.
getString(), StartLoc, EndLoc)) {
1536 if (RegNo == X86::ST0) {
1550 return Error(IntTok.
getLoc(),
"expected stack index");
1553 case 0: RegNo = X86::ST0;
break;
1554 case 1: RegNo = X86::ST1;
break;
1555 case 2: RegNo = X86::ST2;
break;
1556 case 3: RegNo = X86::ST3;
break;
1557 case 4: RegNo = X86::ST4;
break;
1558 case 5: RegNo = X86::ST5;
break;
1559 case 6: RegNo = X86::ST6;
break;
1560 case 7: RegNo = X86::ST7;
break;
1563 return Error(IntTok.
getLoc(),
"invalid stack index");
1583 if (isParsingIntelSyntax())
return true;
1584 return Error(StartLoc,
"invalid register name",
1594 return ParseRegister(Reg, StartLoc, EndLoc,
false);
1599 bool Result = ParseRegister(Reg, StartLoc, EndLoc,
true);
1600 bool PendingErrors = getParser().hasPendingError();
1601 getParser().clearPendingErrors();
1609std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(
SMLoc Loc) {
1610 bool Parse32 = is32BitMode() || Code16GCC;
1612 is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
1619std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(
SMLoc Loc) {
1620 bool Parse32 = is32BitMode() || Code16GCC;
1622 is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
1643MCRegister X86AsmParser::GetSIDIForRegClass(
unsigned RegClassID,
bool IsSIReg) {
1644 switch (RegClassID) {
1646 case X86::GR64RegClassID:
1647 return IsSIReg ? X86::RSI : X86::RDI;
1648 case X86::GR32RegClassID:
1649 return IsSIReg ? X86::ESI : X86::EDI;
1650 case X86::GR16RegClassID:
1651 return IsSIReg ? X86::SI : X86::DI;
1655void X86AsmParser::AddDefaultSrcDestOperands(
1657 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1658 if (isParsingIntelSyntax()) {
1659 Operands.push_back(std::move(Dst));
1660 Operands.push_back(std::move(Src));
1663 Operands.push_back(std::move(Src));
1664 Operands.push_back(std::move(Dst));
1668bool X86AsmParser::VerifyAndAdjustOperands(
OperandVector &OrigOperands,
1671 if (OrigOperands.
size() > 1) {
1674 "Operand size mismatch");
1678 int RegClassID = -1;
1679 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i) {
1683 if (FinalOp.
isReg() &&
1688 if (FinalOp.
isMem()) {
1690 if (!OrigOp.
isMem())
1699 if (RegClassID != -1 &&
1700 !X86MCRegisterClasses[RegClassID].
contains(OrigReg)) {
1702 "mismatching source and destination index registers");
1705 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(OrigReg))
1706 RegClassID = X86::GR64RegClassID;
1707 else if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(OrigReg))
1708 RegClassID = X86::GR32RegClassID;
1709 else if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(OrigReg))
1710 RegClassID = X86::GR16RegClassID;
1716 bool IsSI = IsSIReg(FinalReg);
1717 FinalReg = GetSIDIForRegClass(RegClassID, IsSI);
1719 if (FinalReg != OrigReg) {
1720 std::string
RegName = IsSI ?
"ES:(R|E)SI" :
"ES:(R|E)DI";
1723 "memory operand is only for determining the size, " +
RegName +
1724 " will be used for the location"));
1735 for (
auto &WarningMsg : Warnings) {
1736 Warning(WarningMsg.first, WarningMsg.second);
1740 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i)
1744 for (
auto &
Op : FinalOperands)
1751 if (isParsingIntelSyntax())
1757bool X86AsmParser::CreateMemForMSInlineAsm(
1775 unsigned FrontendSize = 0;
1776 void *Decl =
nullptr;
1777 bool IsGlobalLV =
false;
1780 FrontendSize =
Info.Var.Type * 8;
1781 Decl =
Info.Var.Decl;
1782 IsGlobalLV =
Info.Var.IsGlobalLV;
1787 if (BaseReg || IndexReg) {
1789 End,
Size, Identifier, Decl, 0,
1790 BaseReg && IndexReg));
1797 getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start,
End,
1799 X86::RIP, Identifier, Decl, FrontendSize));
1807 IntelExprStateMachine &SM,
1812 !getParser().isParsingMasm())
1814 if (
Name.equals_insensitive(
"not")) {
1816 }
else if (
Name.equals_insensitive(
"or")) {
1818 }
else if (
Name.equals_insensitive(
"shl")) {
1820 }
else if (
Name.equals_insensitive(
"shr")) {
1822 }
else if (
Name.equals_insensitive(
"xor")) {
1824 }
else if (
Name.equals_insensitive(
"and")) {
1826 }
else if (
Name.equals_insensitive(
"mod")) {
1828 }
else if (
Name.equals_insensitive(
"offset")) {
1829 SMLoc OffsetLoc = getTok().getLoc();
1830 const MCExpr *Val =
nullptr;
1833 ParseError = ParseIntelOffsetOperator(Val,
ID, Info,
End);
1838 SM.onOffset(Val, OffsetLoc,
ID, Info, isParsingMSInlineAsm(), ErrMsg);
1844 if (!
Name.equals_insensitive(
"offset"))
1845 End = consumeToken();
1849 IntelExprStateMachine &SM,
1851 if (
Name.equals_insensitive(
"eq")) {
1853 }
else if (
Name.equals_insensitive(
"ne")) {
1855 }
else if (
Name.equals_insensitive(
"lt")) {
1857 }
else if (
Name.equals_insensitive(
"le")) {
1859 }
else if (
Name.equals_insensitive(
"gt")) {
1861 }
else if (
Name.equals_insensitive(
"ge")) {
1866 End = consumeToken();
1873 IntelExprStateMachine &SM) {
1877 SM.setAppendAfterOperand();
1880bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &
End) {
1886 if (getContext().getObjectFileInfo()->isPositionIndependent())
1895 bool UpdateLocLex =
true;
1900 if ((
Done = SM.isValidEndState()))
1902 return Error(Tok.
getLoc(),
"unknown token in expression");
1904 return Error(getLexer().getErrLoc(), getLexer().getErr());
1908 UpdateLocLex =
false;
1909 if (ParseIntelDotOperator(SM,
End))
1914 if ((
Done = SM.isValidEndState()))
1916 return Error(Tok.
getLoc(),
"unknown token in expression");
1920 UpdateLocLex =
false;
1921 if (ParseIntelDotOperator(SM,
End))
1926 if ((
Done = SM.isValidEndState()))
1928 return Error(Tok.
getLoc(),
"unknown token in expression");
1939 UpdateLocLex =
false;
1940 if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1941 return Error(ValueLoc,
"expected absolute value");
1942 if (SM.onInteger(Res, ErrMsg))
1943 return Error(ValueLoc, ErrMsg);
1952 UpdateLocLex =
false;
1954 size_t DotOffset =
Identifier.find_first_of(
'.');
1972 const AsmToken &NextTok = getLexer().peekTok();
1981 End = consumeToken();
1988 if (!ParseRegister(Reg, IdentLoc,
End,
true)) {
1989 if (SM.onRegister(Reg, ErrMsg))
1990 return Error(IdentLoc, ErrMsg);
1994 const std::pair<StringRef, StringRef> IDField =
1998 if (!
Field.empty() &&
1999 !MatchRegisterByName(Reg,
ID, IdentLoc, IDEndLoc)) {
2000 if (SM.onRegister(Reg, ErrMsg))
2001 return Error(IdentLoc, ErrMsg);
2006 return Error(FieldStartLoc,
"unknown offset");
2007 else if (SM.onPlus(ErrMsg))
2008 return Error(getTok().getLoc(), ErrMsg);
2009 else if (SM.onInteger(
Info.Offset, ErrMsg))
2010 return Error(IdentLoc, ErrMsg);
2011 SM.setTypeInfo(
Info.Type);
2013 End = consumeToken();
2019 bool ParseError =
false;
2020 if (ParseIntelNamedOperator(Identifier, SM, ParseError,
End)) {
2026 ParseMasmNamedOperator(Identifier, SM, ParseError,
End)) {
2039 if (ParseIntelDotOperator(SM,
End))
2044 if (isParsingMSInlineAsm()) {
2046 if (
unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2047 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2048 if (SM.onInteger(Val, ErrMsg))
2049 return Error(IdentLoc, ErrMsg);
2058 return Error(IdentLoc,
"expected identifier");
2059 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
false,
End))
2061 else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.
Type,
2063 return Error(IdentLoc, ErrMsg);
2067 if (
unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2069 if (ParseMasmOperator(OpKind, Val))
2071 if (SM.onInteger(Val, ErrMsg))
2072 return Error(IdentLoc, ErrMsg);
2075 if (!getParser().lookUpType(Identifier, FieldInfo.
Type)) {
2081 getParser().parseIdentifier(Identifier);
2085 if (getParser().lookUpField(FieldInfo.
Type.
Name, Identifier,
2089 return Error(IdentLoc,
"Unable to lookup field reference!",
2095 if (SM.onInteger(FieldInfo.
Offset, ErrMsg))
2096 return Error(IdentLoc, ErrMsg);
2100 if (getParser().parsePrimaryExpr(Val,
End, &FieldInfo.
Type)) {
2101 return Error(Tok.
getLoc(),
"Unexpected identifier!");
2102 }
else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.
Type,
2104 return Error(IdentLoc, ErrMsg);
2110 SMLoc Loc = getTok().getLoc();
2111 int64_t
IntVal = getTok().getIntVal();
2112 End = consumeToken();
2113 UpdateLocLex =
false;
2116 if (IDVal ==
"f" || IDVal ==
"b") {
2118 getContext().getDirectionalLocalSymbol(IntVal, IDVal ==
"b");
2122 if (IDVal ==
"b" &&
Sym->isUndefined())
2123 return Error(Loc,
"invalid reference to undefined symbol");
2127 if (SM.onIdentifierExpr(Val, Identifier, Info,
Type,
2128 isParsingMSInlineAsm(), ErrMsg))
2129 return Error(Loc, ErrMsg);
2130 End = consumeToken();
2132 if (SM.onInteger(IntVal, ErrMsg))
2133 return Error(Loc, ErrMsg);
2136 if (SM.onInteger(IntVal, ErrMsg))
2137 return Error(Loc, ErrMsg);
2142 if (SM.onPlus(ErrMsg))
2143 return Error(getTok().getLoc(), ErrMsg);
2146 if (SM.onMinus(ErrMsg))
2147 return Error(getTok().getLoc(), ErrMsg);
2157 SM.onLShift();
break;
2159 SM.onRShift();
break;
2162 return Error(Tok.
getLoc(),
"unexpected bracket encountered");
2163 tryParseOperandIdx(PrevTK, SM);
2166 if (SM.onRBrac(ErrMsg)) {
2174 return Error(Tok.
getLoc(),
"unknown token in expression");
2176 if (!
Done && UpdateLocLex)
2177 End = consumeToken();
2184void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2187 unsigned ExprLen =
End.getPointer() - Start.getPointer();
2189 if (SM.getSym() && !SM.isOffsetOperator()) {
2191 if (
unsigned Len = SymName.
data() - Start.getPointer())
2194 ExprLen =
End.getPointer() - (SymName.
data() + SymName.
size());
2197 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2207 if (SM.getBaseReg())
2209 if (SM.getIndexReg())
2211 if (SM.isOffsetOperator())
2212 OffsetNameStr = SM.getSymName();
2214 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2215 SM.getImm(), SM.isMemExpr());
2216 InstInfo->
AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2220bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2222 bool IsUnevaluatedOperand,
SMLoc &
End,
bool IsParsingOffsetOperator) {
2224 assert(isParsingMSInlineAsm() &&
"Expected to be parsing inline assembly.");
2228 SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
2239 }
while (
End.getPointer() < EndPtr);
2246 "frontend claimed part of a token?");
2252 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2254 assert(InternalName.
size() &&
"We should have an internal name here.");
2257 if (!IsParsingOffsetOperator)
2265 MCSymbol *
Sym = getContext().getOrCreateSymbol(Identifier);
2276 const SMLoc consumedToken = consumeToken();
2278 return Error(Tok.
getLoc(),
"Expected an identifier after {");
2281 .
Case(
"rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
2282 .
Case(
"rd", X86::STATIC_ROUNDING::TO_NEG_INF)
2283 .
Case(
"ru", X86::STATIC_ROUNDING::TO_POS_INF)
2284 .
Case(
"rz", X86::STATIC_ROUNDING::TO_ZERO)
2287 return Error(Tok.
getLoc(),
"Invalid rounding mode.");
2290 return Error(Tok.
getLoc(),
"Expected - at this point");
2294 return Error(Tok.
getLoc(),
"Expected } at this point");
2297 const MCExpr *RndModeOp =
2305 return Error(Tok.
getLoc(),
"Expected } at this point");
2310 return Error(Tok.
getLoc(),
"unknown token in expression");
2320 return Error(Tok.
getLoc(),
"Expected { at this point");
2324 return Error(Tok.
getLoc(),
"Expected dfv at this point");
2328 return Error(Tok.
getLoc(),
"Expected = at this point");
2340 unsigned CFlags = 0;
2341 for (
unsigned I = 0;
I < 4; ++
I) {
2350 return Error(Tok.
getLoc(),
"Invalid conditional flags");
2353 return Error(Tok.
getLoc(),
"Duplicated conditional flag");
2364 }
else if (
I == 3) {
2365 return Error(Tok.
getLoc(),
"Expected } at this point");
2367 return Error(Tok.
getLoc(),
"Expected } or , at this point");
2375bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2391 }
else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2394 TrailingDot = DotDispStr.
substr(DotDispStr.
size() - 1);
2397 const std::pair<StringRef, StringRef> BaseMember = DotDispStr.
split(
'.');
2399 if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
2400 getParser().lookUpField(SM.getSymName(), DotDispStr, Info) &&
2401 getParser().lookUpField(DotDispStr, Info) &&
2403 SemaCallback->LookupInlineAsmField(
Base, Member,
Info.Offset)))
2404 return Error(Tok.
getLoc(),
"Unable to lookup field reference!");
2406 return Error(Tok.
getLoc(),
"Unexpected token type!");
2411 const char *DotExprEndLoc = DotDispStr.
data() + DotDispStr.
size();
2414 if (!TrailingDot.
empty())
2416 SM.addImm(
Info.Offset);
2417 SM.setTypeInfo(
Info.Type);
2427 SMLoc Start = Lex().getLoc();
2428 ID = getTok().getString();
2429 if (!isParsingMSInlineAsm()) {
2432 getParser().parsePrimaryExpr(Val,
End,
nullptr))
2433 return Error(Start,
"unexpected token!");
2434 }
else if (ParseIntelInlineAsmIdentifier(Val,
ID, Info,
false,
End,
true)) {
2435 return Error(Start,
"unable to lookup expression");
2437 return Error(Start,
"offset operator cannot yet handle constants");
2444unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(
StringRef Name) {
2446 .
Cases(
"TYPE",
"type",IOK_TYPE)
2447 .
Cases(
"SIZE",
"size",IOK_SIZE)
2448 .
Cases(
"LENGTH",
"length",IOK_LENGTH)
2458unsigned X86AsmParser::ParseIntelInlineAsmOperator(
unsigned OpKind) {
2463 const MCExpr *Val =
nullptr;
2467 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
2472 Error(Start,
"unable to lookup expression");
2479 case IOK_LENGTH: CVal =
Info.Var.Length;
break;
2480 case IOK_SIZE: CVal =
Info.Var.Size;
break;
2481 case IOK_TYPE: CVal =
Info.Var.Type;
break;
2489unsigned X86AsmParser::IdentifyMasmOperator(
StringRef Name) {
2491 .
Case(
"type", MOK_TYPE)
2492 .
Cases(
"size",
"sizeof", MOK_SIZEOF)
2493 .
Cases(
"length",
"lengthof", MOK_LENGTHOF)
2503bool X86AsmParser::ParseMasmOperator(
unsigned OpKind, int64_t &Val) {
2509 if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2512 const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.
getTok();
2528 IntelExprStateMachine SM;
2530 if (ParseIntelExpression(SM,
End))
2540 Val = SM.getLength();
2543 Val = SM.getElementSize();
2548 return Error(OpLoc,
"expression has unknown type",
SMRange(Start,
End));
2554bool X86AsmParser::ParseIntelMemoryOperandSize(
unsigned &
Size) {
2556 .
Cases(
"BYTE",
"byte", 8)
2557 .
Cases(
"WORD",
"word", 16)
2558 .
Cases(
"DWORD",
"dword", 32)
2559 .
Cases(
"FLOAT",
"float", 32)
2560 .
Cases(
"LONG",
"long", 32)
2561 .
Cases(
"FWORD",
"fword", 48)
2562 .
Cases(
"DOUBLE",
"double", 64)
2563 .
Cases(
"QWORD",
"qword", 64)
2564 .
Cases(
"MMWORD",
"mmword", 64)
2565 .
Cases(
"XWORD",
"xword", 80)
2566 .
Cases(
"TBYTE",
"tbyte", 80)
2567 .
Cases(
"XMMWORD",
"xmmword", 128)
2568 .
Cases(
"YMMWORD",
"ymmword", 256)
2569 .
Cases(
"ZMMWORD",
"zmmword", 512)
2574 return Error(Tok.
getLoc(),
"Expected 'PTR' or 'ptr' token!");
2587 if (ParseIntelMemoryOperandSize(
Size))
2595 return ParseRoundingModeOp(Start,
Operands);
2600 if (RegNo == X86::RIP)
2601 return Error(Start,
"rip can only be used as a base register");
2605 return Error(Start,
"expected memory operand after 'ptr', "
2606 "found register operand instead");
2611 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(RegNo))
2612 return Error(Start,
"invalid segment register");
2614 Start = Lex().getLoc();
2618 IntelExprStateMachine SM;
2619 if (ParseIntelExpression(SM,
End))
2622 if (isParsingMSInlineAsm())
2623 RewriteIntelExpression(SM, Start, Tok.
getLoc());
2625 int64_t
Imm = SM.getImm();
2626 const MCExpr *Disp = SM.getSym();
2635 if (!SM.isMemExpr() && !RegNo) {
2636 if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2642 SM.getSymName(),
Info.Var.Decl,
2643 Info.Var.IsGlobalLV));
2655 if (IndexReg && BaseReg == X86::RIP)
2657 unsigned Scale = SM.getScale();
2659 Size = SM.getElementSize() << 3;
2661 if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
2662 (IndexReg == X86::ESP || IndexReg == X86::RSP))
2668 !(X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
2669 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
2670 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg)) &&
2671 (X86MCRegisterClasses[X86::VR128XRegClassID].
contains(BaseReg) ||
2672 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(BaseReg) ||
2673 X86MCRegisterClasses[X86::VR512RegClassID].
contains(BaseReg)))
2677 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg))
2678 return Error(Start,
"16-bit addresses cannot have a scale");
2687 if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
2688 (IndexReg == X86::BX || IndexReg == X86::BP))
2691 if ((BaseReg || IndexReg) &&
2694 return Error(Start, ErrMsg);
2695 bool IsUnconditionalBranch =
2696 Name.equals_insensitive(
"jmp") ||
Name.equals_insensitive(
"call");
2697 if (isParsingMSInlineAsm())
2698 return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale,
2699 IsUnconditionalBranch && is64BitMode(),
2700 Start,
End,
Size, SM.getSymName(),
2706 bool MaybeDirectBranchDest =
true;
2709 if (is64BitMode() &&
2710 ((PtrInOperand && !IndexReg) || SM.getElementSize() > 0)) {
2711 DefaultBaseReg = X86::RIP;
2713 if (IsUnconditionalBranch) {
2715 MaybeDirectBranchDest =
false;
2717 DefaultBaseReg = X86::RIP;
2718 }
else if (!BaseReg && !IndexReg && Disp &&
2720 if (is64BitMode()) {
2721 if (SM.getSize() == 8) {
2722 MaybeDirectBranchDest =
false;
2723 DefaultBaseReg = X86::RIP;
2726 if (SM.getSize() == 4 || SM.getSize() == 2)
2727 MaybeDirectBranchDest =
false;
2731 }
else if (IsUnconditionalBranch) {
2733 if (!PtrInOperand && SM.isOffsetOperator())
2735 Start,
"`OFFSET` operator cannot be used in an unconditional branch");
2736 if (PtrInOperand || SM.isBracketUsed())
2737 MaybeDirectBranchDest =
false;
2740 if ((BaseReg || IndexReg || RegNo || DefaultBaseReg))
2742 getPointerWidth(), RegNo, Disp, BaseReg, IndexReg, Scale, Start,
End,
2744 0,
false, MaybeDirectBranchDest));
2749 MaybeDirectBranchDest));
2755 switch (getLexer().getKind()) {
2765 "expected immediate expression") ||
2766 getParser().parseExpression(Val,
End) ||
2767 check(isa<X86MCExpr>(Val), L,
"expected immediate expression"))
2774 return ParseRoundingModeOp(Start,
Operands);
2783 const MCExpr *Expr =
nullptr;
2789 if (
auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2795 if (Reg == X86::EIZ || Reg == X86::RIZ)
2797 Loc,
"%eiz and %riz can only be used as index registers",
2799 if (Reg == X86::RIP)
2800 return Error(Loc,
"%rip can only be used as a base register",
2807 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(Reg))
2808 return Error(Loc,
"invalid segment register");
2816 return ParseMemOperand(Reg, Expr, Loc, EndLoc,
Operands);
2846bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2847 const SMLoc &StartLoc) {
2853 (getLexer().getTok().getIdentifier() ==
"z")))
2858 return Error(getLexer().getLoc(),
"Expected } at this point");
2870 const SMLoc consumedToken = consumeToken();
2874 if (getLexer().getTok().getIntVal() != 1)
2875 return TokError(
"Expected 1to<NUM> at this point");
2879 return TokError(
"Expected 1to<NUM> at this point");
2882 StringRef BroadcastString = (
Prefix + getLexer().getTok().getIdentifier())
2885 return TokError(
"Expected 1to<NUM> at this point");
2886 const char *BroadcastPrimitive =
2888 .
Case(
"1to2",
"{1to2}")
2889 .
Case(
"1to4",
"{1to4}")
2890 .
Case(
"1to8",
"{1to8}")
2891 .
Case(
"1to16",
"{1to16}")
2892 .
Case(
"1to32",
"{1to32}")
2894 if (!BroadcastPrimitive)
2895 return TokError(
"Invalid memory broadcast primitive.");
2898 return TokError(
"Expected } at this point");
2909 std::unique_ptr<X86Operand>
Z;
2910 if (ParseZ(Z, consumedToken))
2916 SMLoc StartLoc =
Z ? consumeToken() : consumedToken;
2921 if (!parseRegister(RegNo, RegLoc, StartLoc) &&
2922 X86MCRegisterClasses[X86::VK1RegClassID].
contains(RegNo)) {
2923 if (RegNo == X86::K0)
2924 return Error(RegLoc,
"Register k0 can't be used as write mask");
2926 return Error(getLexer().getLoc(),
"Expected } at this point");
2932 return Error(getLexer().getLoc(),
2933 "Expected an op-mask register at this point");
2938 if (ParseZ(Z, consumeToken()) || !Z)
2939 return Error(getLexer().getLoc(),
2940 "Expected a {z} mark at this point");
2977 auto isAtMemOperand = [
this]() {
2982 auto TokCount = this->getLexer().peekTokens(Buf,
true);
2985 switch (Buf[0].getKind()) {
2992 if ((TokCount > 1) &&
2994 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2996 Buf[1].getIdentifier().
size() + 1);
3007 MCSymbol *
Sym = this->getContext().getOrCreateSymbol(Id);
3008 if (
Sym->isVariable()) {
3009 auto V =
Sym->getVariableValue(
false);
3010 return isa<X86MCExpr>(V);
3018 if (!isAtMemOperand()) {
3021 assert(!isa<X86MCExpr>(Disp) &&
"Expected non-register here.");
3037 0, 0, 1, StartLoc, EndLoc));
3045 SMLoc BaseLoc = getLexer().getLoc();
3052 check(!isa<X86MCExpr>(E), BaseLoc,
"expected register here"))
3056 BaseReg = cast<X86MCExpr>(E)->getReg();
3057 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
3058 return Error(BaseLoc,
"eiz and riz can only be used as index registers",
3073 if (!isa<X86MCExpr>(E)) {
3077 if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
3078 return Error(Loc,
"expected absolute expression");
3080 Warning(Loc,
"scale factor without index register is ignored");
3083 IndexReg = cast<X86MCExpr>(E)->getReg();
3085 if (BaseReg == X86::RIP)
3087 "%rip as base register can not have an index register");
3088 if (IndexReg == X86::RIP)
3089 return Error(Loc,
"%rip is not allowed as an index register");
3100 return Error(Loc,
"expected scale expression");
3103 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
3105 return Error(Loc,
"scale factor in 16-bit address must be 1");
3107 return Error(Loc, ErrMsg);
3121 if (BaseReg == X86::DX && !IndexReg && Scale == 1 && !SegReg &&
3122 isa<MCConstantExpr>(Disp) &&
3123 cast<MCConstantExpr>(Disp)->getValue() == 0) {
3130 return Error(BaseLoc, ErrMsg);
3137 if (BaseReg || IndexReg) {
3138 if (
auto CE = dyn_cast<MCConstantExpr>(Disp)) {
3139 auto Imm =
CE->getValue();
3140 bool Is64 = X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) ||
3141 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg);
3142 bool Is16 = X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg);
3144 if (!isInt<32>(Imm))
3145 return Error(BaseLoc,
"displacement " +
Twine(Imm) +
3146 " is not within [-2147483648, 2147483647]");
3150 " shortened to 32-bit signed " +
3151 Twine(
static_cast<int32_t
>(Imm)));
3155 " shortened to 16-bit signed " +
3156 Twine(
static_cast<int16_t
>(Imm)));
3161 if (SegReg || BaseReg || IndexReg)
3163 BaseReg, IndexReg, Scale, StartLoc,
3172bool X86AsmParser::parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc) {
3180 if (parseRegister(RegNo, StartLoc, EndLoc))
3194 ForcedOpcodePrefix = OpcodePrefix_Default;
3195 ForcedDispEncoding = DispEncoding_Default;
3196 UseApxExtendedReg =
false;
3197 ForcedNoFlag =
false;
3210 if (Prefix ==
"rex")
3211 ForcedOpcodePrefix = OpcodePrefix_REX;
3212 else if (Prefix ==
"rex2")
3213 ForcedOpcodePrefix = OpcodePrefix_REX2;
3214 else if (Prefix ==
"vex")
3215 ForcedOpcodePrefix = OpcodePrefix_VEX;
3216 else if (Prefix ==
"vex2")
3217 ForcedOpcodePrefix = OpcodePrefix_VEX2;
3218 else if (Prefix ==
"vex3")
3219 ForcedOpcodePrefix = OpcodePrefix_VEX3;
3220 else if (Prefix ==
"evex")
3221 ForcedOpcodePrefix = OpcodePrefix_EVEX;
3222 else if (Prefix ==
"disp8")
3223 ForcedDispEncoding = DispEncoding_Disp8;
3224 else if (Prefix ==
"disp32")
3225 ForcedDispEncoding = DispEncoding_Disp32;
3226 else if (Prefix ==
"nf")
3227 ForcedNoFlag =
true;
3229 return Error(NameLoc,
"unknown prefix");
3245 if (isParsingMSInlineAsm()) {
3246 if (
Name.equals_insensitive(
"vex"))
3247 ForcedOpcodePrefix = OpcodePrefix_VEX;
3248 else if (
Name.equals_insensitive(
"vex2"))
3249 ForcedOpcodePrefix = OpcodePrefix_VEX2;
3250 else if (
Name.equals_insensitive(
"vex3"))
3251 ForcedOpcodePrefix = OpcodePrefix_VEX3;
3252 else if (
Name.equals_insensitive(
"evex"))
3253 ForcedOpcodePrefix = OpcodePrefix_EVEX;
3255 if (ForcedOpcodePrefix != OpcodePrefix_Default) {
3268 if (
Name.consume_back(
".d32")) {
3269 ForcedDispEncoding = DispEncoding_Disp32;
3270 }
else if (
Name.consume_back(
".d8")) {
3271 ForcedDispEncoding = DispEncoding_Disp8;
3277 if (isParsingIntelSyntax() &&
3278 (PatchedName ==
"jmp" || PatchedName ==
"jc" || PatchedName ==
"jnc" ||
3279 PatchedName ==
"jcxz" || PatchedName ==
"jecxz" ||
3284 : NextTok ==
"short") {
3293 NextTok.
size() + 1);
3299 PatchedName !=
"setzub" && PatchedName !=
"setzunb" &&
3300 PatchedName !=
"setb" && PatchedName !=
"setnb")
3301 PatchedName = PatchedName.
substr(0,
Name.size()-1);
3303 unsigned ComparisonPredicate = ~0
U;
3311 bool IsVCMP = PatchedName[0] ==
'v';
3312 unsigned CCIdx =
IsVCMP ? 4 : 3;
3313 unsigned suffixLength = PatchedName.
ends_with(
"pbf16") ? 5 : 2;
3315 PatchedName.
slice(CCIdx, PatchedName.
size() - suffixLength))
3317 .
Case(
"eq_oq", 0x00)
3319 .
Case(
"lt_os", 0x01)
3321 .
Case(
"le_os", 0x02)
3322 .
Case(
"unord", 0x03)
3323 .
Case(
"unord_q", 0x03)
3325 .
Case(
"neq_uq", 0x04)
3327 .
Case(
"nlt_us", 0x05)
3329 .
Case(
"nle_us", 0x06)
3331 .
Case(
"ord_q", 0x07)
3333 .
Case(
"eq_uq", 0x08)
3335 .
Case(
"nge_us", 0x09)
3337 .
Case(
"ngt_us", 0x0A)
3338 .
Case(
"false", 0x0B)
3339 .
Case(
"false_oq", 0x0B)
3340 .
Case(
"neq_oq", 0x0C)
3342 .
Case(
"ge_os", 0x0D)
3344 .
Case(
"gt_os", 0x0E)
3346 .
Case(
"true_uq", 0x0F)
3347 .
Case(
"eq_os", 0x10)
3348 .
Case(
"lt_oq", 0x11)
3349 .
Case(
"le_oq", 0x12)
3350 .
Case(
"unord_s", 0x13)
3351 .
Case(
"neq_us", 0x14)
3352 .
Case(
"nlt_uq", 0x15)
3353 .
Case(
"nle_uq", 0x16)
3354 .
Case(
"ord_s", 0x17)
3355 .
Case(
"eq_us", 0x18)
3356 .
Case(
"nge_uq", 0x19)
3357 .
Case(
"ngt_uq", 0x1A)
3358 .
Case(
"false_os", 0x1B)
3359 .
Case(
"neq_os", 0x1C)
3360 .
Case(
"ge_oq", 0x1D)
3361 .
Case(
"gt_oq", 0x1E)
3362 .
Case(
"true_us", 0x1F)
3367 PatchedName =
IsVCMP ?
"vcmpss" :
"cmpss";
3369 PatchedName =
IsVCMP ?
"vcmpsd" :
"cmpsd";
3371 PatchedName =
IsVCMP ?
"vcmpps" :
"cmpps";
3373 PatchedName =
IsVCMP ?
"vcmppd" :
"cmppd";
3375 PatchedName =
"vcmpsh";
3377 PatchedName =
"vcmpph";
3378 else if (PatchedName.
ends_with(
"pbf16"))
3379 PatchedName =
"vcmppbf16";
3383 ComparisonPredicate =
CC;
3389 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3390 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3391 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3393 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3403 if (
CC != ~0U && (
CC != 0 || SuffixSize == 2)) {
3404 switch (PatchedName.
back()) {
3406 case 'b': PatchedName = SuffixSize == 2 ?
"vpcmpub" :
"vpcmpb";
break;
3407 case 'w': PatchedName = SuffixSize == 2 ?
"vpcmpuw" :
"vpcmpw";
break;
3408 case 'd': PatchedName = SuffixSize == 2 ?
"vpcmpud" :
"vpcmpd";
break;
3409 case 'q': PatchedName = SuffixSize == 2 ?
"vpcmpuq" :
"vpcmpq";
break;
3412 ComparisonPredicate =
CC;
3418 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3419 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3420 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3422 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3433 switch (PatchedName.
back()) {
3435 case 'b': PatchedName = SuffixSize == 2 ?
"vpcomub" :
"vpcomb";
break;
3436 case 'w': PatchedName = SuffixSize == 2 ?
"vpcomuw" :
"vpcomw";
break;
3437 case 'd': PatchedName = SuffixSize == 2 ?
"vpcomud" :
"vpcomd";
break;
3438 case 'q': PatchedName = SuffixSize == 2 ?
"vpcomuq" :
"vpcomq";
break;
3441 ComparisonPredicate =
CC;
3454 .
Cases(
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
true)
3455 .
Cases(
"rex64",
"data32",
"data16",
"addr32",
"addr16",
true)
3456 .
Cases(
"xacquire",
"xrelease",
true)
3457 .
Cases(
"acquire",
"release", isParsingIntelSyntax())
3460 auto isLockRepeatNtPrefix = [](
StringRef N) {
3462 .
Cases(
"lock",
"rep",
"repe",
"repz",
"repne",
"repnz",
"notrack",
true)
3466 bool CurlyAsEndOfStatement =
false;
3469 while (isLockRepeatNtPrefix(
Name.lower())) {
3490 while (
Name.starts_with(
";") ||
Name.starts_with(
"\n") ||
3491 Name.starts_with(
"#") ||
Name.starts_with(
"\t") ||
3492 Name.starts_with(
"/")) {
3503 if (PatchedName ==
"data16" && is16BitMode()) {
3504 return Error(NameLoc,
"redundant data16 prefix");
3506 if (PatchedName ==
"data32") {
3508 return Error(NameLoc,
"redundant data32 prefix");
3510 return Error(NameLoc,
"'data32' is not supported in 64-bit mode");
3512 PatchedName =
"data16";
3519 if (Next ==
"callw")
3521 if (Next ==
"ljmpw")
3526 ForcedDataPrefix = X86::Is32Bit;
3534 if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3536 getParser().getContext());
3541 if ((
Name.starts_with(
"ccmp") ||
Name.starts_with(
"ctest")) &&
3570 CurlyAsEndOfStatement =
3571 isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3574 return TokError(
"unexpected token in argument list");
3578 if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3580 getParser().getContext());
3588 else if (CurlyAsEndOfStatement)
3591 getLexer().getTok().getLoc(), 0);
3598 if (IsFp &&
Operands.size() == 1) {
3600 .
Case(
"fsub",
"fsubp")
3601 .
Case(
"fdiv",
"fdivp")
3602 .
Case(
"fsubr",
"fsubrp")
3603 .
Case(
"fdivr",
"fdivrp");
3607 if ((
Name ==
"mov" ||
Name ==
"movw" ||
Name ==
"movl") &&
3615 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3617 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(Op1.
getReg()) ||
3618 X86MCRegisterClasses[X86::GR32RegClassID].
contains(Op1.
getReg()))) {
3620 if (
Name !=
"mov" &&
Name[3] == (is16BitMode() ?
'l' :
'w')) {
3621 Name = is16BitMode() ?
"movw" :
"movl";
3634 if ((
Name ==
"outb" ||
Name ==
"outsb" ||
Name ==
"outw" ||
Name ==
"outsw" ||
3653 bool HadVerifyError =
false;
3656 if (
Name.starts_with(
"ins") &&
3661 AddDefaultSrcDestOperands(TmpOperands,
3663 DefaultMemDIOperand(NameLoc));
3664 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3668 if (
Name.starts_with(
"outs") &&
3670 (
Name ==
"outsb" ||
Name ==
"outsw" ||
Name ==
"outsl" ||
3671 Name ==
"outsd" ||
Name ==
"outs")) {
3672 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3674 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3680 if (
Name.starts_with(
"lods") &&
3682 (
Name ==
"lods" ||
Name ==
"lodsb" ||
Name ==
"lodsw" ||
3683 Name ==
"lodsl" ||
Name ==
"lodsd" ||
Name ==
"lodsq")) {
3684 TmpOperands.
push_back(DefaultMemSIOperand(NameLoc));
3685 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3691 if (
Name.starts_with(
"stos") &&
3693 (
Name ==
"stos" ||
Name ==
"stosb" ||
Name ==
"stosw" ||
3694 Name ==
"stosl" ||
Name ==
"stosd" ||
Name ==
"stosq")) {
3695 TmpOperands.
push_back(DefaultMemDIOperand(NameLoc));
3696 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3702 if (
Name.starts_with(
"scas") &&
3704 (
Name ==
"scas" ||
Name ==
"scasb" ||
Name ==
"scasw" ||
3705 Name ==
"scasl" ||
Name ==
"scasd" ||
Name ==
"scasq")) {
3706 TmpOperands.
push_back(DefaultMemDIOperand(NameLoc));
3707 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3711 if (
Name.starts_with(
"cmps") &&
3713 (
Name ==
"cmps" ||
Name ==
"cmpsb" ||
Name ==
"cmpsw" ||
3714 Name ==
"cmpsl" ||
Name ==
"cmpsd" ||
Name ==
"cmpsq")) {
3715 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3716 DefaultMemSIOperand(NameLoc));
3717 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3721 if (((
Name.starts_with(
"movs") &&
3722 (
Name ==
"movs" ||
Name ==
"movsb" ||
Name ==
"movsw" ||
3723 Name ==
"movsl" ||
Name ==
"movsd" ||
Name ==
"movsq")) ||
3724 (
Name.starts_with(
"smov") &&
3725 (
Name ==
"smov" ||
Name ==
"smovb" ||
Name ==
"smovw" ||
3726 Name ==
"smovl" ||
Name ==
"smovd" ||
Name ==
"smovq"))) &&
3728 if (
Name ==
"movsd" &&
Operands.size() == 1 && !isParsingIntelSyntax())
3730 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3731 DefaultMemDIOperand(NameLoc));
3732 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3736 if (HadVerifyError) {
3737 return HadVerifyError;
3745 "size, (R|E)BX will be used for the location");
3760 if (
I == Table.end() ||
I->OldOpc != Opcode)
3766 if (X86::isBLENDVPD(Opcode) || X86::isBLENDVPS(Opcode) ||
3767 X86::isPBLENDVB(Opcode))
3777 if (ForcedOpcodePrefix != OpcodePrefix_VEX3 &&
3784 auto replaceWithCCMPCTEST = [&](
unsigned Opcode) ->
bool {
3785 if (ForcedOpcodePrefix == OpcodePrefix_EVEX) {
3796 default:
return false;
3801 if (ForcedDispEncoding == DispEncoding_Disp32) {
3802 Inst.
setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3811 if (ForcedDispEncoding == DispEncoding_Disp32) {
3812 Inst.
setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3828#define FROM_TO(FROM, TO) \
3830 return replaceWithCCMPCTEST(X86::TO);
3832 FROM_TO(CMP64mi32, CCMP64mi32)
3835 FROM_TO(CMP64ri32, CCMP64ri32)
3862 FROM_TO(TEST64mi32, CTEST64mi32)
3864 FROM_TO(TEST64ri32, CTEST64ri32)
3885 using namespace X86;
3888 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3889 if (isVFCMADDCPH(Opcode) || isVFCMADDCSH(Opcode) || isVFMADDCPH(Opcode) ||
3890 isVFMADDCSH(Opcode)) {
3894 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3895 "distinct from source registers");
3896 }
else if (isVFCMULCPH(Opcode) || isVFCMULCSH(Opcode) || isVFMULCPH(Opcode) ||
3897 isVFMULCSH(Opcode)) {
3907 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3908 "distinct from source registers");
3909 }
else if (isV4FMADDPS(Opcode) || isV4FMADDSS(Opcode) ||
3910 isV4FNMADDPS(Opcode) || isV4FNMADDSS(Opcode) ||
3911 isVP4DPWSSDS(Opcode) || isVP4DPWSSD(Opcode)) {
3915 unsigned Src2Enc =
MRI->getEncodingValue(Src2);
3916 if (Src2Enc % 4 != 0) {
3918 unsigned GroupStart = (Src2Enc / 4) * 4;
3919 unsigned GroupEnd = GroupStart + 3;
3920 return Warning(Ops[0]->getStartLoc(),
3921 "source register '" +
RegName +
"' implicitly denotes '" +
3926 }
else if (isVGATHERDPD(Opcode) || isVGATHERDPS(Opcode) ||
3927 isVGATHERQPD(Opcode) || isVGATHERQPS(Opcode) ||
3928 isVPGATHERDD(Opcode) || isVPGATHERDQ(Opcode) ||
3929 isVPGATHERQD(Opcode) || isVPGATHERQQ(Opcode)) {
3933 unsigned Index =
MRI->getEncodingValue(
3936 return Warning(Ops[0]->getStartLoc(),
"index and destination registers "
3937 "should be distinct");
3941 unsigned Index =
MRI->getEncodingValue(
3943 if (Dest == Mask || Dest == Index || Mask == Index)
3944 return Warning(Ops[0]->getStartLoc(),
"mask, index, and destination "
3945 "registers should be distinct");
3947 }
else if (isTCMMIMFP16PS(Opcode) || isTCMMRLFP16PS(Opcode) ||
3948 isTDPBF16PS(Opcode) || isTDPFP16PS(Opcode) || isTDPBSSD(Opcode) ||
3949 isTDPBSUD(Opcode) || isTDPBUSD(Opcode) || isTDPBUUD(Opcode)) {
3953 if (SrcDest == Src1 || SrcDest == Src2 || Src1 == Src2)
3954 return Error(Ops[0]->getStartLoc(),
"all tmm registers must be distinct");
3963 for (
unsigned i = 0; i != NumOps; ++i) {
3968 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
3975 if (UsesRex && HReg) {
3977 return Error(Ops[0]->getStartLoc(),
3978 "can't encode '" +
RegName +
"' in an instruction requiring "
3983 if ((Opcode == X86::PREFETCHIT0 || Opcode == X86::PREFETCHIT1)) {
3987 Ops[0]->getStartLoc(),
3989 :
"'prefetchit1'")) +
3990 " only supports RIP-relative address");
3995void X86AsmParser::emitWarningForSpecialLVIInstruction(
SMLoc Loc) {
3996 Warning(Loc,
"Instruction may be vulnerable to LVI and "
3997 "requires manual mitigation");
3998 Note(
SMLoc(),
"See https://software.intel.com/"
3999 "security-software-guidance/insights/"
4000 "deep-dive-load-value-injection#specialinstructions"
4001 " for more information");
4025 bool Parse32 = is32BitMode() || Code16GCC;
4027 is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP);
4033 ShlMemOp->addMemOperands(ShlInst, 5);
4046 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4058void X86AsmParser::applyLVILoadHardeningMitigation(
MCInst &Inst,
4075 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4078 }
else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
4081 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4103 getSTI().hasFeature(X86::FeatureLVIControlFlowIntegrity))
4104 applyLVICFIMitigation(Inst, Out);
4109 getSTI().hasFeature(X86::FeatureLVILoadHardening))
4110 applyLVILoadHardeningMitigation(Inst, Out);
4114 unsigned Result = 0;
4116 if (Prefix.isPrefix()) {
4117 Result = Prefix.getPrefix();
4123bool X86AsmParser::matchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4126 bool MatchingInlineAsm) {
4128 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4132 Out, MatchingInlineAsm);
4139 if (ForcedOpcodePrefix == OpcodePrefix_REX)
4141 else if (ForcedOpcodePrefix == OpcodePrefix_REX2)
4143 else if (ForcedOpcodePrefix == OpcodePrefix_VEX)
4145 else if (ForcedOpcodePrefix == OpcodePrefix_VEX2)
4147 else if (ForcedOpcodePrefix == OpcodePrefix_VEX3)
4149 else if (ForcedOpcodePrefix == OpcodePrefix_EVEX)
4153 if (ForcedDispEncoding == DispEncoding_Disp8)
4155 else if (ForcedDispEncoding == DispEncoding_Disp32)
4161 return isParsingIntelSyntax()
4162 ? matchAndEmitIntelInstruction(IDLoc, Opcode, Inst,
Operands, Out,
4164 : matchAndEmitATTInstruction(IDLoc, Opcode, Inst,
Operands, Out,
4170 bool MatchingInlineAsm) {
4175 .
Case(
"finit",
"fninit")
4176 .
Case(
"fsave",
"fnsave")
4177 .
Case(
"fstcw",
"fnstcw")
4178 .
Case(
"fstcww",
"fnstcw")
4179 .
Case(
"fstenv",
"fnstenv")
4180 .
Case(
"fstsw",
"fnstsw")
4181 .
Case(
"fstsww",
"fnstsw")
4182 .
Case(
"fclex",
"fnclex")
4188 if (!MatchingInlineAsm)
4194bool X86AsmParser::ErrorMissingFeature(
SMLoc IDLoc,
4196 bool MatchingInlineAsm) {
4197 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
4200 OS <<
"instruction requires:";
4201 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i) {
4202 if (MissingFeatures[i])
4208unsigned X86AsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
4214 return Match_Unsupported;
4215 if (ForcedNoFlag == !(TSFlags &
X86II::EVEX_NF) && !X86::isCFCMOVCC(Opc))
4216 return Match_Unsupported;
4218 switch (ForcedOpcodePrefix) {
4219 case OpcodePrefix_Default:
4221 case OpcodePrefix_REX:
4222 case OpcodePrefix_REX2:
4224 return Match_Unsupported;
4226 case OpcodePrefix_VEX:
4227 case OpcodePrefix_VEX2:
4228 case OpcodePrefix_VEX3:
4230 return Match_Unsupported;
4232 case OpcodePrefix_EVEX:
4234 !X86::isCMP(Opc) && !X86::isTEST(Opc))
4235 return Match_Unsupported;
4237 return Match_Unsupported;
4242 (ForcedOpcodePrefix != OpcodePrefix_VEX &&
4243 ForcedOpcodePrefix != OpcodePrefix_VEX2 &&
4244 ForcedOpcodePrefix != OpcodePrefix_VEX3))
4245 return Match_Unsupported;
4247 return Match_Success;
4250bool X86AsmParser::matchAndEmitATTInstruction(
4254 SMRange EmptyRange = std::nullopt;
4257 if (ForcedDataPrefix == X86::Is32Bit)
4258 SwitchMode(X86::Is32Bit);
4262 MissingFeatures, MatchingInlineAsm,
4263 isParsingIntelSyntax());
4264 if (ForcedDataPrefix == X86::Is32Bit) {
4265 SwitchMode(X86::Is16Bit);
4266 ForcedDataPrefix = 0;
4268 switch (OriginalError) {
4271 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4276 if (!MatchingInlineAsm)
4277 while (processInstruction(Inst,
Operands))
4281 if (!MatchingInlineAsm)
4285 case Match_InvalidImmUnsignedi4: {
4287 if (ErrorLoc ==
SMLoc())
4289 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4290 EmptyRange, MatchingInlineAsm);
4292 case Match_MissingFeature:
4293 return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4294 case Match_InvalidOperand:
4295 case Match_MnemonicFail:
4296 case Match_Unsupported:
4299 if (
Op.getToken().empty()) {
4300 Error(IDLoc,
"instruction must have size higher than 0", EmptyRange,
4315 Op.setTokenValue(Tmp);
4323 const char *Suffixes =
Base[0] !=
'f' ?
"bwlq" :
"slt\0";
4325 const char *MemSize =
Base[0] !=
'f' ?
"\x08\x10\x20\x40" :
"\x20\x40\x50\0";
4337 bool HasVectorReg =
false;
4342 HasVectorReg =
true;
4343 else if (X86Op->
isMem()) {
4345 assert(
MemOp->Mem.Size == 0 &&
"Memory size always 0 under ATT syntax");
4352 for (
unsigned I = 0, E = std::size(
Match);
I != E; ++
I) {
4353 Tmp.
back() = Suffixes[
I];
4354 if (
MemOp && HasVectorReg)
4355 MemOp->Mem.Size = MemSize[
I];
4356 Match[
I] = Match_MnemonicFail;
4357 if (
MemOp || !HasVectorReg) {
4359 MatchInstruction(
Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4360 MatchingInlineAsm, isParsingIntelSyntax());
4362 if (
Match[
I] == Match_MissingFeature)
4363 ErrorInfoMissingFeatures = MissingFeatures;
4374 if (NumSuccessfulMatches == 1) {
4375 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4380 if (!MatchingInlineAsm)
4381 while (processInstruction(Inst,
Operands))
4385 if (!MatchingInlineAsm)
4395 if (NumSuccessfulMatches > 1) {
4397 unsigned NumMatches = 0;
4398 for (
unsigned I = 0, E = std::size(
Match);
I != E; ++
I)
4399 if (
Match[
I] == Match_Success)
4400 MatchChars[NumMatches++] = Suffixes[
I];
4404 OS <<
"ambiguous instructions require an explicit suffix (could be ";
4405 for (
unsigned i = 0; i != NumMatches; ++i) {
4408 if (i + 1 == NumMatches)
4410 OS <<
"'" <<
Base << MatchChars[i] <<
"'";
4413 Error(IDLoc,
OS.str(), EmptyRange, MatchingInlineAsm);
4422 if (OriginalError == Match_MnemonicFail)
4423 return Error(IDLoc,
"invalid instruction mnemonic '" +
Base +
"'",
4424 Op.getLocRange(), MatchingInlineAsm);
4426 if (OriginalError == Match_Unsupported)
4427 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4430 assert(OriginalError == Match_InvalidOperand &&
"Unexpected error");
4434 return Error(IDLoc,
"too few operands for instruction", EmptyRange,
4441 OperandRange, MatchingInlineAsm);
4445 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4451 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4459 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4466 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4471 Error(IDLoc,
"unknown use of instruction mnemonic without a size suffix",
4472 EmptyRange, MatchingInlineAsm);
4476bool X86AsmParser::matchAndEmitIntelInstruction(
4480 SMRange EmptyRange = std::nullopt;
4486 UnsizedMemOp = X86Op;
4497 static const char *
const PtrSizedInstrs[] = {
"call",
"jmp",
"push",
"pop"};
4498 for (
const char *Instr : PtrSizedInstrs) {
4499 if (Mnemonic == Instr) {
4500 UnsizedMemOp->
Mem.
Size = getPointerWidth();
4513 if (Mnemonic ==
"push" &&
Operands.size() == 2) {
4515 if (X86Op->
isImm()) {
4517 const auto *
CE = dyn_cast<MCConstantExpr>(X86Op->
getImm());
4518 unsigned Size = getPointerWidth();
4523 Tmp += (is64BitMode())
4525 : (is32BitMode()) ?
"l" : (is16BitMode()) ?
"w" :
" ";
4526 Op.setTokenValue(Tmp);
4529 MissingFeatures, MatchingInlineAsm,
4540 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4541 for (
unsigned Size : MopSizes) {
4545 unsigned M = MatchInstruction(
Operands, Inst, ErrorInfoIgnore,
4546 MissingFeatures, MatchingInlineAsm,
4547 isParsingIntelSyntax());
4552 if (
Match.back() == Match_MissingFeature)
4553 ErrorInfoMissingFeatures = MissingFeatures;
4563 if (
Match.empty()) {
4564 Match.push_back(MatchInstruction(
4566 isParsingIntelSyntax()));
4568 if (
Match.back() == Match_MissingFeature)
4569 ErrorInfoMissingFeatures = MissingFeatures;
4577 if (
Match.back() == Match_MnemonicFail) {
4578 return Error(IDLoc,
"invalid instruction mnemonic '" + Mnemonic +
"'",
4579 Op.getLocRange(), MatchingInlineAsm);
4586 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4589 unsigned M = MatchInstruction(
4591 isParsingIntelSyntax());
4592 if (M == Match_Success)
4593 NumSuccessfulMatches = 1;
4605 if (NumSuccessfulMatches == 1) {
4606 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4611 if (!MatchingInlineAsm)
4612 while (processInstruction(Inst,
Operands))
4615 if (!MatchingInlineAsm)
4619 }
else if (NumSuccessfulMatches > 1) {
4621 "multiple matches only possible with unsized memory operands");
4623 "ambiguous operand size for instruction '" + Mnemonic +
"\'",
4629 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4637 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4644 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4650 if (ErrorLoc ==
SMLoc())
4652 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4653 EmptyRange, MatchingInlineAsm);
4657 return Error(IDLoc,
"unknown instruction mnemonic", EmptyRange,
4661bool X86AsmParser::omitRegisterFromClobberLists(
MCRegister Reg) {
4662 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg);
4665bool X86AsmParser::ParseDirective(
AsmToken DirectiveID) {
4669 return parseDirectiveArch();
4671 return ParseDirectiveCode(IDVal, DirectiveID.
getLoc());
4677 return Error(DirectiveID.
getLoc(),
"'.att_syntax noprefix' is not "
4678 "supported: registers must have a "
4679 "'%' prefix in .att_syntax");
4681 getParser().setAssemblerDialect(0);
4684 getParser().setAssemblerDialect(1);
4689 return Error(DirectiveID.
getLoc(),
"'.intel_syntax prefix' is not "
4690 "supported: registers must not have "
4691 "a '%' prefix in .intel_syntax");
4694 }
else if (IDVal ==
".nops")
4695 return parseDirectiveNops(DirectiveID.
getLoc());
4696 else if (IDVal ==
".even")
4697 return parseDirectiveEven(DirectiveID.
getLoc());
4698 else if (IDVal ==
".cv_fpo_proc")
4699 return parseDirectiveFPOProc(DirectiveID.
getLoc());
4700 else if (IDVal ==
".cv_fpo_setframe")
4701 return parseDirectiveFPOSetFrame(DirectiveID.
getLoc());
4702 else if (IDVal ==
".cv_fpo_pushreg")
4703 return parseDirectiveFPOPushReg(DirectiveID.
getLoc());
4704 else if (IDVal ==
".cv_fpo_stackalloc")
4705 return parseDirectiveFPOStackAlloc(DirectiveID.
getLoc());
4706 else if (IDVal ==
".cv_fpo_stackalign")
4707 return parseDirectiveFPOStackAlign(DirectiveID.
getLoc());
4708 else if (IDVal ==
".cv_fpo_endprologue")
4709 return parseDirectiveFPOEndPrologue(DirectiveID.
getLoc());
4710 else if (IDVal ==
".cv_fpo_endproc")
4711 return parseDirectiveFPOEndProc(DirectiveID.
getLoc());
4712 else if (IDVal ==
".seh_pushreg" ||
4714 return parseDirectiveSEHPushReg(DirectiveID.
getLoc());
4715 else if (IDVal ==
".seh_setframe" ||
4717 return parseDirectiveSEHSetFrame(DirectiveID.
getLoc());
4718 else if (IDVal ==
".seh_savereg" ||
4720 return parseDirectiveSEHSaveReg(DirectiveID.
getLoc());
4721 else if (IDVal ==
".seh_savexmm" ||
4723 return parseDirectiveSEHSaveXMM(DirectiveID.
getLoc());
4724 else if (IDVal ==
".seh_pushframe" ||
4726 return parseDirectiveSEHPushFrame(DirectiveID.
getLoc());
4731bool X86AsmParser::parseDirectiveArch() {
4733 getParser().parseStringToEndOfStatement();
4739bool X86AsmParser::parseDirectiveNops(
SMLoc L) {
4740 int64_t NumBytes = 0, Control = 0;
4741 SMLoc NumBytesLoc, ControlLoc;
4743 NumBytesLoc = getTok().getLoc();
4744 if (getParser().checkForValidSection() ||
4745 getParser().parseAbsoluteExpression(NumBytes))
4749 ControlLoc = getTok().getLoc();
4750 if (getParser().parseAbsoluteExpression(Control))
4753 if (getParser().parseEOL())
4756 if (NumBytes <= 0) {
4757 Error(NumBytesLoc,
"'.nops' directive with non-positive size");
4762 Error(ControlLoc,
"'.nops' directive with negative NOP size");
4767 getParser().getStreamer().emitNops(NumBytes, Control, L, STI);
4774bool X86AsmParser::parseDirectiveEven(
SMLoc L) {
4780 getStreamer().initSections(
false, getSTI());
4781 Section = getStreamer().getCurrentSectionOnly();
4784 getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
4786 getStreamer().emitValueToAlignment(
Align(2), 0, 1, 0);
4795 if (IDVal ==
".code16") {
4797 if (!is16BitMode()) {
4798 SwitchMode(X86::Is16Bit);
4799 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4801 }
else if (IDVal ==
".code16gcc") {
4805 if (!is16BitMode()) {
4806 SwitchMode(X86::Is16Bit);
4807 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4809 }
else if (IDVal ==
".code32") {
4811 if (!is32BitMode()) {
4812 SwitchMode(X86::Is32Bit);
4813 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
4815 }
else if (IDVal ==
".code64") {
4817 if (!is64BitMode()) {
4818 SwitchMode(X86::Is64Bit);
4819 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code64);
4822 Error(L,
"unknown directive " + IDVal);
4830bool X86AsmParser::parseDirectiveFPOProc(
SMLoc L) {
4835 return Parser.
TokError(
"expected symbol name");
4836 if (Parser.
parseIntToken(ParamsSize,
"expected parameter byte count"))
4839 return Parser.
TokError(
"parameters size out of range");
4842 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4843 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
4847bool X86AsmParser::parseDirectiveFPOSetFrame(
SMLoc L) {
4850 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4852 return getTargetStreamer().emitFPOSetFrame(Reg, L);
4856bool X86AsmParser::parseDirectiveFPOPushReg(
SMLoc L) {
4859 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4861 return getTargetStreamer().emitFPOPushReg(Reg, L);
4865bool X86AsmParser::parseDirectiveFPOStackAlloc(
SMLoc L) {
4870 return getTargetStreamer().emitFPOStackAlloc(
Offset, L);
4874bool X86AsmParser::parseDirectiveFPOStackAlign(
SMLoc L) {
4879 return getTargetStreamer().emitFPOStackAlign(
Offset, L);
4883bool X86AsmParser::parseDirectiveFPOEndPrologue(
SMLoc L) {
4887 return getTargetStreamer().emitFPOEndPrologue(L);
4891bool X86AsmParser::parseDirectiveFPOEndProc(
SMLoc L) {
4895 return getTargetStreamer().emitFPOEndProc(L);
4898bool X86AsmParser::parseSEHRegisterNumber(
unsigned RegClassID,
4900 SMLoc startLoc = getLexer().getLoc();
4906 if (parseRegister(RegNo, startLoc, endLoc))
4909 if (!X86MCRegisterClasses[RegClassID].
contains(RegNo)) {
4910 return Error(startLoc,
4911 "register is not supported for use with this directive");
4917 if (getParser().parseAbsoluteExpression(EncodedReg))
4923 for (
MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
4924 if (
MRI->getEncodingValue(Reg) == EncodedReg) {
4930 return Error(startLoc,
4931 "incorrect register number for use with this directive");
4938bool X86AsmParser::parseDirectiveSEHPushReg(
SMLoc Loc) {
4940 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4944 return TokError(
"expected end of directive");
4947 getStreamer().emitWinCFIPushReg(Reg, Loc);
4951bool X86AsmParser::parseDirectiveSEHSetFrame(
SMLoc Loc) {
4954 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4957 return TokError(
"you must specify a stack pointer offset");
4960 if (getParser().parseAbsoluteExpression(Off))
4964 return TokError(
"expected end of directive");
4967 getStreamer().emitWinCFISetFrame(Reg, Off, Loc);
4971bool X86AsmParser::parseDirectiveSEHSaveReg(
SMLoc Loc) {
4974 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4977 return TokError(
"you must specify an offset on the stack");
4980 if (getParser().parseAbsoluteExpression(Off))
4984 return TokError(
"expected end of directive");
4987 getStreamer().emitWinCFISaveReg(Reg, Off, Loc);
4991bool X86AsmParser::parseDirectiveSEHSaveXMM(
SMLoc Loc) {
4994 if (parseSEHRegisterNumber(X86::VR128XRegClassID, Reg))
4997 return TokError(
"you must specify an offset on the stack");
5000 if (getParser().parseAbsoluteExpression(Off))
5004 return TokError(
"expected end of directive");
5007 getStreamer().emitWinCFISaveXMM(Reg, Off, Loc);
5011bool X86AsmParser::parseDirectiveSEHPushFrame(
SMLoc Loc) {
5015 SMLoc startLoc = getLexer().getLoc();
5017 if (!getParser().parseIdentifier(CodeID)) {
5018 if (CodeID !=
"code")
5019 return Error(startLoc,
"expected @code");
5025 return TokError(
"expected end of directive");
5028 getStreamer().emitWinCFIPushFrame(Code, Loc);
5038#define GET_MATCHER_IMPLEMENTATION
5039#include "X86GenAsmMatcher.inc"
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
static ModuleSymbolTable::Symbol getSym(DataRefImpl &Symb)
mir Rename Register Operands
static bool IsVCMP(unsigned Opcode)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallString class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static SymbolRef::Type getType(const Symbol *Sym)
static cl::opt< bool > LVIInlineAsmHardening("x86-experimental-lvi-inline-asm-hardening", cl::desc("Harden inline assembly code that may be vulnerable to Load Value" " Injection (LVI). This feature is experimental."), cl::Hidden)
static bool checkScale(unsigned Scale, StringRef &ErrMsg)
LLVM_C_ABI void LLVMInitializeX86AsmParser()
static bool convertSSEToAVX(MCInst &Inst)
static unsigned getPrefixes(OperandVector &Operands)
static bool CheckBaseRegAndIndexRegAndScale(MCRegister BaseReg, MCRegister IndexReg, unsigned Scale, bool Is64BitMode, StringRef &ErrMsg)
#define FROM_TO(FROM, TO)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
bool is(TokenKind K) const
TokenKind getKind() const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr size_t size() const
An instruction for ordering other memory operations.
Generic assembler lexer interface, for use by target specific assembly lexers.
void UnLex(AsmToken const &Token)
bool isNot(AsmToken::TokenKind K) const
Check if the current token has kind K.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
virtual void eatToEndOfStatement()=0
Skip to the end of the current statement, for error recovery.
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool isParsingMasm() const
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
bool parseIntToken(int64_t &V, const Twine &ErrMsg)
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual unsigned getAssemblerDialect()
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
virtual bool lookUpType(StringRef Name, AsmTypeInfo &Info) const
bool TokError(const Twine &Msg, SMRange Range=std::nullopt)
Report an error at the current lexer location.
virtual bool parseAbsoluteExpression(int64_t &Res)=0
Parse an expression which must evaluate to an absolute value.
virtual bool lookUpField(StringRef Name, AsmFieldInfo &Info) const
bool parseTokenLoc(SMLoc &Loc)
virtual MCContext & getContext()=0
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Base class for the full range of assembler expressions which are needed for parsing.
@ SymbolRef
References to labels and assigned expressions.
Instances of this class represent a single low-level machine instruction.
unsigned getNumOperands() const
unsigned getFlags() const
unsigned getOpcode() const
void setFlags(unsigned F)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
Interface to description of machine instruction set.
Instances of this class represent operands of the MCInst class.
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
@ FIRST_TARGET_MATCH_RESULT_TY
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual bool omitRegisterFromClobberLists(MCRegister Reg)
Allows targets to let registers opt out of clobber lists.
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
Target specific streamer interface.
Ternary parse status returned by various parse* methods.
static constexpr StatusTy Failure
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
constexpr bool isValid() const
Represents a range in source code.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
char back() const
back - Get the last character in the string.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static const char * getRegisterName(MCRegister Reg)
static const X86MCExpr * create(MCRegister Reg, MCContext &Ctx)
X86 target streamer implementing x86-only assembly directives.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
@ CE
Windows NT (Windows on ARM)
@ X86
Windows x64, Windows Itanium (IA-64)
Reg
All possible values of the reg field in the ModR/M byte.
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ VEX
VEX - encoding using 0xC4/0xC5.
@ ExplicitVEXPrefix
For instructions that use VEX encoding only when {vex}, {vex2} or {vex3} is present.
bool isX86_64NonExtLowByteReg(MCRegister Reg)
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isX86_64ExtendedReg(MCRegister Reg)
bool isApxExtendedReg(MCRegister Reg)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
StringRef toStringRef(const std::optional< DWARFFormValue > &V, StringRef Default={})
Take an optional DWARFFormValue and try to extract a string value from it.
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
Target & getTheX86_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
@ MCAF_Code64
.code64 (X86)
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Target & getTheX86_64Target()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isKind(IdKind kind) const
SmallVectorImpl< AsmRewrite > * AsmRewrites
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
X86Operand - Instances of this class represent a parsed X86 machine instruction.
SMLoc getStartLoc() const override
getStartLoc - Get the location of the first token of this operand.
bool isImm() const override
isImm - Is this an immediate operand?
static std::unique_ptr< X86Operand > CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, StringRef SymName=StringRef(), void *OpDecl=nullptr, bool GlobalRef=true)
static std::unique_ptr< X86Operand > CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc)
static std::unique_ptr< X86Operand > CreateDXReg(SMLoc StartLoc, SMLoc EndLoc)
static std::unique_ptr< X86Operand > CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc, bool AddressOf=false, SMLoc OffsetOfLoc=SMLoc(), StringRef SymName=StringRef(), void *OpDecl=nullptr)
SMRange getLocRange() const
getLocRange - Get the range between the first and last token of this operand.
SMLoc getEndLoc() const override
getEndLoc - Get the location of the last token of this operand.
bool isReg() const override
isReg - Is this a register operand?
bool isMem() const override
isMem - Is this a memory operand?
static std::unique_ptr< X86Operand > CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size=0, StringRef SymName=StringRef(), void *OpDecl=nullptr, unsigned FrontendSize=0, bool UseUpRegs=false, bool MaybeDirectBranchDest=true)
Create an absolute memory operand.
static std::unique_ptr< X86Operand > CreateToken(StringRef Str, SMLoc Loc)
bool isMemUnsized() const
const MCExpr * getImm() const
unsigned getMemFrontendSize() const
MCRegister getReg() const override