47 "x86-experimental-lvi-inline-asm-hardening",
48 cl::desc(
"Harden inline assembly code that may be vulnerable to Load Value"
49 " Injection (LVI). This feature is experimental."),
cl::Hidden);
52 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
53 ErrMsg =
"scale factor in address must be 1, 2, 4 or 8";
62#define GET_X86_SSE2AVX_TABLE
63#include "X86GenInstrMapping.inc"
65static const char OpPrecedence[] = {
93 unsigned ForcedDataPrefix = 0;
105 OpcodePrefix ForcedOpcodePrefix = OpcodePrefix_Default;
108 DispEncoding_Default,
113 DispEncoding ForcedDispEncoding = DispEncoding_Default;
116 bool UseApxExtendedReg =
false;
118 bool ForcedNoFlag =
false;
121 SMLoc consumeToken() {
130 "do not have a target streamer");
137 bool matchingInlineAsm,
unsigned VariantID = 0) {
140 SwitchMode(X86::Is32Bit);
142 MissingFeatures, matchingInlineAsm,
145 SwitchMode(X86::Is16Bit);
149 enum InfixCalculatorTok {
174 enum IntelOperatorKind {
181 enum MasmOperatorKind {
188 class InfixCalculator {
189 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
193 bool isUnaryOperator(InfixCalculatorTok
Op)
const {
194 return Op == IC_NEG ||
Op == IC_NOT;
198 int64_t popOperand() {
199 assert (!PostfixStack.
empty() &&
"Poped an empty stack!");
201 if (!(
Op.first == IC_IMM ||
Op.first == IC_REGISTER))
205 void pushOperand(InfixCalculatorTok
Op, int64_t Val = 0) {
206 assert ((
Op == IC_IMM ||
Op == IC_REGISTER) &&
207 "Unexpected operand!");
211 void popOperator() { InfixOperatorStack.
pop_back(); }
212 void pushOperator(InfixCalculatorTok
Op) {
214 if (InfixOperatorStack.
empty()) {
222 unsigned Idx = InfixOperatorStack.
size() - 1;
223 InfixCalculatorTok StackOp = InfixOperatorStack[
Idx];
224 if (OpPrecedence[
Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
231 unsigned ParenCount = 0;
234 if (InfixOperatorStack.
empty())
237 Idx = InfixOperatorStack.
size() - 1;
238 StackOp = InfixOperatorStack[
Idx];
239 if (!(OpPrecedence[StackOp] >= OpPrecedence[
Op] || ParenCount))
244 if (!ParenCount && StackOp == IC_LPAREN)
247 if (StackOp == IC_RPAREN) {
250 }
else if (StackOp == IC_LPAREN) {
255 PostfixStack.
push_back(std::make_pair(StackOp, 0));
264 while (!InfixOperatorStack.
empty()) {
265 InfixCalculatorTok StackOp = InfixOperatorStack.
pop_back_val();
266 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
267 PostfixStack.
push_back(std::make_pair(StackOp, 0));
270 if (PostfixStack.
empty())
274 for (
const ICToken &
Op : PostfixStack) {
275 if (
Op.first == IC_IMM ||
Op.first == IC_REGISTER) {
277 }
else if (isUnaryOperator(
Op.first)) {
278 assert (OperandStack.
size() > 0 &&
"Too few operands.");
280 assert (Operand.first == IC_IMM &&
281 "Unary operation with a register!");
287 OperandStack.
push_back(std::make_pair(IC_IMM, -Operand.second));
290 OperandStack.
push_back(std::make_pair(IC_IMM, ~Operand.second));
294 assert (OperandStack.
size() > 1 &&
"Too few operands.");
303 Val = Op1.second + Op2.second;
304 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
307 Val = Op1.second - Op2.second;
308 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
311 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
312 "Multiply operation with an immediate and a register!");
313 Val = Op1.second * Op2.second;
314 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
317 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
318 "Divide operation with an immediate and a register!");
319 assert (Op2.second != 0 &&
"Division by zero!");
320 Val = Op1.second / Op2.second;
321 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
324 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
325 "Modulo operation with an immediate and a register!");
326 Val = Op1.second % Op2.second;
327 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
330 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
331 "Or operation with an immediate and a register!");
332 Val = Op1.second | Op2.second;
333 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
336 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
337 "Xor operation with an immediate and a register!");
338 Val = Op1.second ^ Op2.second;
339 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
342 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
343 "And operation with an immediate and a register!");
344 Val = Op1.second & Op2.second;
345 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
348 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
349 "Left shift operation with an immediate and a register!");
350 Val = Op1.second << Op2.second;
351 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
354 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
355 "Right shift operation with an immediate and a register!");
356 Val = Op1.second >> Op2.second;
357 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
360 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
361 "Equals operation with an immediate and a register!");
362 Val = (Op1.second == Op2.second) ? -1 : 0;
363 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
366 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
367 "Not-equals operation with an immediate and a register!");
368 Val = (Op1.second != Op2.second) ? -1 : 0;
369 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
372 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
373 "Less-than operation with an immediate and a register!");
374 Val = (Op1.second < Op2.second) ? -1 : 0;
375 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
378 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
379 "Less-than-or-equal operation with an immediate and a "
381 Val = (Op1.second <= Op2.second) ? -1 : 0;
382 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
385 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
386 "Greater-than operation with an immediate and a register!");
387 Val = (Op1.second > Op2.second) ? -1 : 0;
388 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
391 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
392 "Greater-than-or-equal operation with an immediate and a "
394 Val = (Op1.second >= Op2.second) ? -1 : 0;
395 OperandStack.
push_back(std::make_pair(IC_IMM, Val));
400 assert (OperandStack.
size() == 1 &&
"Expected a single result.");
405 enum IntelExprState {
435 class IntelExprStateMachine {
436 IntelExprState State = IES_INIT, PrevState = IES_ERROR;
437 unsigned BaseReg = 0, IndexReg = 0, TmpReg = 0, Scale = 0;
444 bool MemExpr =
false;
445 bool BracketUsed =
false;
446 bool OffsetOperator =
false;
447 bool AttachToOperandIdx =
false;
449 SMLoc OffsetOperatorLoc;
454 ErrMsg =
"cannot use more than one symbol in memory operand";
463 IntelExprStateMachine() =
default;
465 void addImm(int64_t imm) {
Imm += imm; }
466 short getBracCount()
const {
return BracCount; }
467 bool isMemExpr()
const {
return MemExpr; }
468 bool isBracketUsed()
const {
return BracketUsed; }
469 bool isOffsetOperator()
const {
return OffsetOperator; }
470 SMLoc getOffsetLoc()
const {
return OffsetOperatorLoc; }
471 unsigned getBaseReg()
const {
return BaseReg; }
472 unsigned getIndexReg()
const {
return IndexReg; }
473 unsigned getScale()
const {
return Scale; }
475 StringRef getSymName()
const {
return SymName; }
478 unsigned getElementSize()
const {
return CurType.
ElementSize; }
479 unsigned getLength()
const {
return CurType.
Length; }
480 int64_t getImm() {
return Imm + IC.execute(); }
481 bool isValidEndState()
const {
482 return State == IES_RBRAC || State == IES_RPAREN ||
483 State == IES_INTEGER || State == IES_REGISTER ||
491 void setAppendAfterOperand() { AttachToOperandIdx =
true; }
493 bool isPIC()
const {
return IsPIC; }
494 void setPIC() { IsPIC =
true; }
496 bool hadError()
const {
return State == IES_ERROR; }
502 if (IsPIC && AttachToOperandIdx)
503 ErrMsg =
"Don't use 2 or more regs for mem offset in PIC model!";
505 ErrMsg =
"BaseReg/IndexReg already set!";
510 IntelExprState CurrState = State;
519 IC.pushOperator(IC_OR);
522 PrevState = CurrState;
525 IntelExprState CurrState = State;
534 IC.pushOperator(IC_XOR);
537 PrevState = CurrState;
540 IntelExprState CurrState = State;
549 IC.pushOperator(IC_AND);
552 PrevState = CurrState;
555 IntelExprState CurrState = State;
564 IC.pushOperator(IC_EQ);
567 PrevState = CurrState;
570 IntelExprState CurrState = State;
579 IC.pushOperator(IC_NE);
582 PrevState = CurrState;
585 IntelExprState CurrState = State;
594 IC.pushOperator(IC_LT);
597 PrevState = CurrState;
600 IntelExprState CurrState = State;
609 IC.pushOperator(IC_LE);
612 PrevState = CurrState;
615 IntelExprState CurrState = State;
624 IC.pushOperator(IC_GT);
627 PrevState = CurrState;
630 IntelExprState CurrState = State;
639 IC.pushOperator(IC_GE);
642 PrevState = CurrState;
645 IntelExprState CurrState = State;
654 IC.pushOperator(IC_LSHIFT);
657 PrevState = CurrState;
660 IntelExprState CurrState = State;
669 IC.pushOperator(IC_RSHIFT);
672 PrevState = CurrState;
675 IntelExprState CurrState = State;
685 IC.pushOperator(IC_PLUS);
686 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
693 return regsUseUpError(ErrMsg);
700 PrevState = CurrState;
704 IntelExprState CurrState = State;
735 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
736 CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
737 CurrState == IES_OFFSET)
738 IC.pushOperator(IC_MINUS);
739 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
741 ErrMsg =
"Scale can't be negative";
744 IC.pushOperator(IC_NEG);
745 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
752 return regsUseUpError(ErrMsg);
759 PrevState = CurrState;
763 IntelExprState CurrState = State;
789 IC.pushOperator(IC_NOT);
792 PrevState = CurrState;
794 bool onRegister(
unsigned Reg,
StringRef &ErrMsg) {
795 IntelExprState CurrState = State;
803 State = IES_REGISTER;
805 IC.pushOperand(IC_REGISTER);
809 if (PrevState == IES_INTEGER) {
811 return regsUseUpError(ErrMsg);
812 State = IES_REGISTER;
815 Scale = IC.popOperand();
818 IC.pushOperand(IC_IMM);
825 PrevState = CurrState;
833 if (ParsingMSInlineAsm)
837 if (
auto *CE = dyn_cast<MCConstantExpr>(SymRef))
838 return onInteger(
CE->getValue(), ErrMsg);
851 if (setSymRef(SymRef, SymRefName, ErrMsg))
855 IC.pushOperand(IC_IMM);
856 if (ParsingMSInlineAsm)
863 bool onInteger(int64_t TmpInt,
StringRef &ErrMsg) {
864 IntelExprState CurrState = State;
890 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
893 return regsUseUpError(ErrMsg);
901 IC.pushOperand(IC_IMM, TmpInt);
905 PrevState = CurrState;
917 State = IES_MULTIPLY;
918 IC.pushOperator(IC_MULTIPLY);
931 IC.pushOperator(IC_DIVIDE);
944 IC.pushOperator(IC_MOD);
960 IC.pushOperator(IC_PLUS);
966 assert(!BracCount &&
"BracCount should be zero on parsing's start");
976 IntelExprState CurrState = State;
985 if (BracCount-- != 1) {
986 ErrMsg =
"unexpected bracket encountered";
990 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
997 return regsUseUpError(ErrMsg);
1004 PrevState = CurrState;
1008 IntelExprState CurrState = State;
1034 IC.pushOperator(IC_LPAREN);
1037 PrevState = CurrState;
1051 IC.pushOperator(IC_RPAREN);
1057 bool ParsingMSInlineAsm,
StringRef &ErrMsg) {
1061 ErrMsg =
"unexpected offset operator expression";
1066 if (setSymRef(Val,
ID, ErrMsg))
1068 OffsetOperator =
true;
1069 OffsetOperatorLoc = OffsetLoc;
1073 IC.pushOperand(IC_IMM);
1074 if (ParsingMSInlineAsm) {
1097 bool MatchingInlineAsm =
false) {
1099 if (MatchingInlineAsm) {
1100 if (!
getLexer().isAtStartOfStatement())
1110 bool RestoreOnFailure);
1112 std::unique_ptr<X86Operand> DefaultMemSIOperand(
SMLoc Loc);
1113 std::unique_ptr<X86Operand> DefaultMemDIOperand(
SMLoc Loc);
1114 bool IsSIReg(
unsigned Reg);
1115 unsigned GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
bool IsSIReg);
1118 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1119 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1127 bool ParseIntelDotOperator(IntelExprStateMachine &SM,
SMLoc &
End);
1129 unsigned ParseIntelInlineAsmOperator(
unsigned OpKind);
1131 bool ParseMasmOperator(
unsigned OpKind, int64_t &Val);
1134 bool ParseIntelNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1136 bool ParseMasmNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1138 void RewriteIntelExpression(IntelExprStateMachine &SM,
SMLoc Start,
1140 bool ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &
End);
1141 bool ParseIntelInlineAsmIdentifier(
const MCExpr *&Val,
StringRef &Identifier,
1143 bool IsUnevaluatedOperand,
SMLoc &
End,
1144 bool IsParsingOffsetOperator =
false);
1146 IntelExprStateMachine &SM);
1148 bool ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
SMLoc StartLoc,
1153 bool ParseIntelMemoryOperandSize(
unsigned &
Size);
1154 bool CreateMemForMSInlineAsm(
unsigned SegReg,
const MCExpr *Disp,
1155 unsigned BaseReg,
unsigned IndexReg,
1156 unsigned Scale,
bool NonAbsMem,
SMLoc Start,
1161 bool parseDirectiveArch();
1162 bool parseDirectiveNops(
SMLoc L);
1163 bool parseDirectiveEven(
SMLoc L);
1167 bool parseDirectiveFPOProc(
SMLoc L);
1168 bool parseDirectiveFPOSetFrame(
SMLoc L);
1169 bool parseDirectiveFPOPushReg(
SMLoc L);
1170 bool parseDirectiveFPOStackAlloc(
SMLoc L);
1171 bool parseDirectiveFPOStackAlign(
SMLoc L);
1172 bool parseDirectiveFPOEndPrologue(
SMLoc L);
1173 bool parseDirectiveFPOEndProc(
SMLoc L);
1176 bool parseSEHRegisterNumber(
unsigned RegClassID,
MCRegister &RegNo);
1177 bool parseDirectiveSEHPushReg(
SMLoc);
1178 bool parseDirectiveSEHSetFrame(
SMLoc);
1179 bool parseDirectiveSEHSaveReg(
SMLoc);
1180 bool parseDirectiveSEHSaveXMM(
SMLoc);
1181 bool parseDirectiveSEHPushFrame(
SMLoc);
1189 void emitWarningForSpecialLVIInstruction(
SMLoc Loc);
1200 bool MatchingInlineAsm)
override;
1206 bool MatchingInlineAsm);
1208 bool matchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &Opcode,
MCInst &Inst,
1212 bool matchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &Opcode,
MCInst &Inst,
1215 bool MatchingInlineAsm);
1224 bool ParseZ(std::unique_ptr<X86Operand> &Z,
const SMLoc &StartLoc);
1226 bool is64BitMode()
const {
1230 bool is32BitMode()
const {
1234 bool is16BitMode()
const {
1238 void SwitchMode(
unsigned mode) {
1240 FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
1249 unsigned getPointerWidth() {
1250 if (is16BitMode())
return 16;
1251 if (is32BitMode())
return 32;
1252 if (is64BitMode())
return 64;
1256 bool isParsingIntelSyntax() {
1263#define GET_ASSEMBLER_HEADER
1264#include "X86GenAsmMatcher.inc"
1269 enum X86MatchResultTy {
1271#define GET_OPERAND_DIAGNOSTIC_TYPES
1272#include "X86GenAsmMatcher.inc"
1288 SMLoc &EndLoc)
override;
1299#define GET_REGISTER_MATCHER
1300#define GET_SUBTARGET_FEATURE_NAME
1301#include "X86GenAsmMatcher.inc"
1304 unsigned Scale,
bool Is64BitMode,
1311 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1312 X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) ||
1313 X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) ||
1314 X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg))) {
1315 ErrMsg =
"invalid base+index expression";
1319 if (IndexReg != 0 &&
1320 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1321 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1322 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1323 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1324 X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
1325 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
1326 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg))) {
1327 ErrMsg =
"invalid base+index expression";
1331 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
1332 IndexReg == X86::EIP || IndexReg == X86::RIP ||
1333 IndexReg == X86::ESP || IndexReg == X86::RSP) {
1334 ErrMsg =
"invalid base+index expression";
1340 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
1341 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1342 BaseReg != X86::SI && BaseReg != X86::DI))) {
1343 ErrMsg =
"invalid 16-bit base register";
1348 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg)) {
1349 ErrMsg =
"16-bit memory operand may not include only index register";
1353 if (BaseReg != 0 && IndexReg != 0) {
1354 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg) &&
1355 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1356 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1357 IndexReg == X86::EIZ)) {
1358 ErrMsg =
"base register is 64-bit, but index register is not";
1361 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) &&
1362 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1363 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1364 IndexReg == X86::RIZ)) {
1365 ErrMsg =
"base register is 32-bit, but index register is not";
1368 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg)) {
1369 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1370 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg)) {
1371 ErrMsg =
"base register is 16-bit, but index register is not";
1374 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1375 (IndexReg != X86::SI && IndexReg != X86::DI)) {
1376 ErrMsg =
"invalid 16-bit base/index register combination";
1383 if (!Is64BitMode && BaseReg != 0 &&
1384 (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1385 ErrMsg =
"IP-relative addressing requires 64-bit mode";
1406 if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1407 (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1410 if (!is64BitMode()) {
1414 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1415 X86MCRegisterClasses[X86::GR64RegClassID].
contains(RegNo) ||
1418 return Error(StartLoc,
1419 "register %" +
RegName +
" is only available in 64-bit mode",
1425 UseApxExtendedReg =
true;
1429 if (RegNo == 0 &&
RegName.starts_with(
"db")) {
1488 if (isParsingIntelSyntax())
1490 return Error(StartLoc,
"invalid register name",
SMRange(StartLoc, EndLoc));
1496 SMLoc &EndLoc,
bool RestoreOnFailure) {
1502 auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1503 if (RestoreOnFailure) {
1504 while (!Tokens.
empty()) {
1511 StartLoc = PercentTok.
getLoc();
1525 if (isParsingIntelSyntax())
return true;
1526 return Error(StartLoc,
"invalid register name",
1530 if (MatchRegisterByName(RegNo, Tok.
getString(), StartLoc, EndLoc)) {
1536 if (RegNo == X86::ST0) {
1550 return Error(IntTok.
getLoc(),
"expected stack index");
1553 case 0: RegNo = X86::ST0;
break;
1554 case 1: RegNo = X86::ST1;
break;
1555 case 2: RegNo = X86::ST2;
break;
1556 case 3: RegNo = X86::ST3;
break;
1557 case 4: RegNo = X86::ST4;
break;
1558 case 5: RegNo = X86::ST5;
break;
1559 case 6: RegNo = X86::ST6;
break;
1560 case 7: RegNo = X86::ST7;
break;
1563 return Error(IntTok.
getLoc(),
"invalid stack index");
1583 if (isParsingIntelSyntax())
return true;
1584 return Error(StartLoc,
"invalid register name",
1594 return ParseRegister(Reg, StartLoc, EndLoc,
false);
1599 bool Result = ParseRegister(Reg, StartLoc, EndLoc,
true);
1600 bool PendingErrors = getParser().hasPendingError();
1601 getParser().clearPendingErrors();
1609std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(
SMLoc Loc) {
1610 bool Parse32 = is32BitMode() || Code16GCC;
1611 unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
1618std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(
SMLoc Loc) {
1619 bool Parse32 = is32BitMode() || Code16GCC;
1620 unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
1627bool X86AsmParser::IsSIReg(
unsigned Reg) {
1641unsigned X86AsmParser::GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
1643 switch (RegClassID) {
1645 case X86::GR64RegClassID:
1646 return IsSIReg ? X86::RSI : X86::RDI;
1647 case X86::GR32RegClassID:
1648 return IsSIReg ? X86::ESI : X86::EDI;
1649 case X86::GR16RegClassID:
1650 return IsSIReg ? X86::SI : X86::DI;
1654void X86AsmParser::AddDefaultSrcDestOperands(
1656 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1657 if (isParsingIntelSyntax()) {
1658 Operands.push_back(std::move(Dst));
1659 Operands.push_back(std::move(Src));
1662 Operands.push_back(std::move(Src));
1663 Operands.push_back(std::move(Dst));
1667bool X86AsmParser::VerifyAndAdjustOperands(
OperandVector &OrigOperands,
1670 if (OrigOperands.
size() > 1) {
1673 "Operand size mismatch");
1677 int RegClassID = -1;
1678 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i) {
1682 if (FinalOp.
isReg() &&
1687 if (FinalOp.
isMem()) {
1689 if (!OrigOp.
isMem())
1698 if (RegClassID != -1 &&
1699 !X86MCRegisterClasses[RegClassID].
contains(OrigReg)) {
1701 "mismatching source and destination index registers");
1704 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(OrigReg))
1705 RegClassID = X86::GR64RegClassID;
1706 else if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(OrigReg))
1707 RegClassID = X86::GR32RegClassID;
1708 else if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(OrigReg))
1709 RegClassID = X86::GR16RegClassID;
1715 bool IsSI = IsSIReg(FinalReg);
1716 FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
1718 if (FinalReg != OrigReg) {
1719 std::string
RegName = IsSI ?
"ES:(R|E)SI" :
"ES:(R|E)DI";
1722 "memory operand is only for determining the size, " +
RegName +
1723 " will be used for the location"));
1734 for (
auto &WarningMsg : Warnings) {
1735 Warning(WarningMsg.first, WarningMsg.second);
1739 for (
unsigned int i = 0; i < FinalOperands.
size(); ++i)
1743 for (
auto &
Op : FinalOperands)
1750 if (isParsingIntelSyntax())
1756bool X86AsmParser::CreateMemForMSInlineAsm(
unsigned SegReg,
const MCExpr *Disp,
1757 unsigned BaseReg,
unsigned IndexReg,
1758 unsigned Scale,
bool NonAbsMem,
1776 unsigned FrontendSize = 0;
1777 void *Decl =
nullptr;
1778 bool IsGlobalLV =
false;
1781 FrontendSize =
Info.Var.Type * 8;
1782 Decl =
Info.Var.Decl;
1783 IsGlobalLV =
Info.Var.IsGlobalLV;
1788 if (BaseReg || IndexReg) {
1790 End,
Size, Identifier, Decl, 0,
1791 BaseReg && IndexReg));
1798 getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start,
End,
1800 X86::RIP, Identifier, Decl, FrontendSize));
1808 IntelExprStateMachine &SM,
1813 !getParser().isParsingMasm())
1815 if (
Name.equals_insensitive(
"not")) {
1817 }
else if (
Name.equals_insensitive(
"or")) {
1819 }
else if (
Name.equals_insensitive(
"shl")) {
1821 }
else if (
Name.equals_insensitive(
"shr")) {
1823 }
else if (
Name.equals_insensitive(
"xor")) {
1825 }
else if (
Name.equals_insensitive(
"and")) {
1827 }
else if (
Name.equals_insensitive(
"mod")) {
1829 }
else if (
Name.equals_insensitive(
"offset")) {
1830 SMLoc OffsetLoc = getTok().getLoc();
1831 const MCExpr *Val =
nullptr;
1834 ParseError = ParseIntelOffsetOperator(Val,
ID, Info,
End);
1839 SM.onOffset(Val, OffsetLoc,
ID, Info, isParsingMSInlineAsm(), ErrMsg);
1845 if (!
Name.equals_insensitive(
"offset"))
1846 End = consumeToken();
1850 IntelExprStateMachine &SM,
1852 if (
Name.equals_insensitive(
"eq")) {
1854 }
else if (
Name.equals_insensitive(
"ne")) {
1856 }
else if (
Name.equals_insensitive(
"lt")) {
1858 }
else if (
Name.equals_insensitive(
"le")) {
1860 }
else if (
Name.equals_insensitive(
"gt")) {
1862 }
else if (
Name.equals_insensitive(
"ge")) {
1867 End = consumeToken();
1874 IntelExprStateMachine &SM) {
1878 SM.setAppendAfterOperand();
1881bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &
End) {
1887 if (getContext().getObjectFileInfo()->isPositionIndependent())
1896 bool UpdateLocLex =
true;
1901 if ((
Done = SM.isValidEndState()))
1903 return Error(Tok.
getLoc(),
"unknown token in expression");
1905 return Error(getLexer().getErrLoc(), getLexer().getErr());
1909 UpdateLocLex =
false;
1910 if (ParseIntelDotOperator(SM,
End))
1915 if ((
Done = SM.isValidEndState()))
1917 return Error(Tok.
getLoc(),
"unknown token in expression");
1921 UpdateLocLex =
false;
1922 if (ParseIntelDotOperator(SM,
End))
1927 if ((
Done = SM.isValidEndState()))
1929 return Error(Tok.
getLoc(),
"unknown token in expression");
1940 UpdateLocLex =
false;
1941 if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1942 return Error(ValueLoc,
"expected absolute value");
1943 if (SM.onInteger(Res, ErrMsg))
1944 return Error(ValueLoc, ErrMsg);
1953 UpdateLocLex =
false;
1955 size_t DotOffset =
Identifier.find_first_of(
'.');
1973 const AsmToken &NextTok = getLexer().peekTok();
1982 End = consumeToken();
1989 if (!ParseRegister(Reg, IdentLoc,
End,
true)) {
1990 if (SM.onRegister(Reg, ErrMsg))
1991 return Error(IdentLoc, ErrMsg);
1995 const std::pair<StringRef, StringRef> IDField =
1999 if (!
Field.empty() &&
2000 !MatchRegisterByName(Reg,
ID, IdentLoc, IDEndLoc)) {
2001 if (SM.onRegister(Reg, ErrMsg))
2002 return Error(IdentLoc, ErrMsg);
2007 return Error(FieldStartLoc,
"unknown offset");
2008 else if (SM.onPlus(ErrMsg))
2009 return Error(getTok().getLoc(), ErrMsg);
2010 else if (SM.onInteger(
Info.Offset, ErrMsg))
2011 return Error(IdentLoc, ErrMsg);
2012 SM.setTypeInfo(
Info.Type);
2014 End = consumeToken();
2020 bool ParseError =
false;
2021 if (ParseIntelNamedOperator(Identifier, SM, ParseError,
End)) {
2027 ParseMasmNamedOperator(Identifier, SM, ParseError,
End)) {
2040 if (ParseIntelDotOperator(SM,
End))
2045 if (isParsingMSInlineAsm()) {
2047 if (
unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2048 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2049 if (SM.onInteger(Val, ErrMsg))
2050 return Error(IdentLoc, ErrMsg);
2059 return Error(IdentLoc,
"expected identifier");
2060 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
false,
End))
2062 else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.
Type,
2064 return Error(IdentLoc, ErrMsg);
2068 if (
unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2070 if (ParseMasmOperator(OpKind, Val))
2072 if (SM.onInteger(Val, ErrMsg))
2073 return Error(IdentLoc, ErrMsg);
2076 if (!getParser().lookUpType(Identifier, FieldInfo.
Type)) {
2082 getParser().parseIdentifier(Identifier);
2086 if (getParser().lookUpField(FieldInfo.
Type.
Name, Identifier,
2090 return Error(IdentLoc,
"Unable to lookup field reference!",
2096 if (SM.onInteger(FieldInfo.
Offset, ErrMsg))
2097 return Error(IdentLoc, ErrMsg);
2101 if (getParser().parsePrimaryExpr(Val,
End, &FieldInfo.
Type)) {
2102 return Error(Tok.
getLoc(),
"Unexpected identifier!");
2103 }
else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.
Type,
2105 return Error(IdentLoc, ErrMsg);
2111 SMLoc Loc = getTok().getLoc();
2112 int64_t
IntVal = getTok().getIntVal();
2113 End = consumeToken();
2114 UpdateLocLex =
false;
2117 if (IDVal ==
"f" || IDVal ==
"b") {
2119 getContext().getDirectionalLocalSymbol(IntVal, IDVal ==
"b");
2123 if (IDVal ==
"b" &&
Sym->isUndefined())
2124 return Error(Loc,
"invalid reference to undefined symbol");
2128 if (SM.onIdentifierExpr(Val, Identifier, Info,
Type,
2129 isParsingMSInlineAsm(), ErrMsg))
2130 return Error(Loc, ErrMsg);
2131 End = consumeToken();
2133 if (SM.onInteger(IntVal, ErrMsg))
2134 return Error(Loc, ErrMsg);
2137 if (SM.onInteger(IntVal, ErrMsg))
2138 return Error(Loc, ErrMsg);
2143 if (SM.onPlus(ErrMsg))
2144 return Error(getTok().getLoc(), ErrMsg);
2147 if (SM.onMinus(ErrMsg))
2148 return Error(getTok().getLoc(), ErrMsg);
2158 SM.onLShift();
break;
2160 SM.onRShift();
break;
2163 return Error(Tok.
getLoc(),
"unexpected bracket encountered");
2164 tryParseOperandIdx(PrevTK, SM);
2167 if (SM.onRBrac(ErrMsg)) {
2175 return Error(Tok.
getLoc(),
"unknown token in expression");
2177 if (!
Done && UpdateLocLex)
2178 End = consumeToken();
2185void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2188 unsigned ExprLen =
End.getPointer() - Start.getPointer();
2190 if (SM.getSym() && !SM.isOffsetOperator()) {
2192 if (
unsigned Len = SymName.
data() - Start.getPointer())
2195 ExprLen =
End.getPointer() - (SymName.
data() + SymName.
size());
2198 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2208 if (SM.getBaseReg())
2210 if (SM.getIndexReg())
2212 if (SM.isOffsetOperator())
2213 OffsetNameStr = SM.getSymName();
2215 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2216 SM.getImm(), SM.isMemExpr());
2217 InstInfo->
AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2221bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2223 bool IsUnevaluatedOperand,
SMLoc &
End,
bool IsParsingOffsetOperator) {
2225 assert(isParsingMSInlineAsm() &&
"Expected to be parsing inline assembly.");
2229 SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
2240 }
while (
End.getPointer() < EndPtr);
2247 "frontend claimed part of a token?");
2253 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2255 assert(InternalName.
size() &&
"We should have an internal name here.");
2258 if (!IsParsingOffsetOperator)
2266 MCSymbol *
Sym = getContext().getOrCreateSymbol(Identifier);
2277 const SMLoc consumedToken = consumeToken();
2279 return Error(Tok.
getLoc(),
"Expected an identifier after {");
2282 .
Case(
"rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
2283 .
Case(
"rd", X86::STATIC_ROUNDING::TO_NEG_INF)
2284 .
Case(
"ru", X86::STATIC_ROUNDING::TO_POS_INF)
2285 .
Case(
"rz", X86::STATIC_ROUNDING::TO_ZERO)
2288 return Error(Tok.
getLoc(),
"Invalid rounding mode.");
2291 return Error(Tok.
getLoc(),
"Expected - at this point");
2295 return Error(Tok.
getLoc(),
"Expected } at this point");
2298 const MCExpr *RndModeOp =
2306 return Error(Tok.
getLoc(),
"Expected } at this point");
2311 return Error(Tok.
getLoc(),
"unknown token in expression");
2321 return Error(Tok.
getLoc(),
"Expected { at this point");
2325 return Error(Tok.
getLoc(),
"Expected dfv at this point");
2329 return Error(Tok.
getLoc(),
"Expected = at this point");
2341 unsigned CFlags = 0;
2342 for (
unsigned I = 0;
I < 4; ++
I) {
2351 return Error(Tok.
getLoc(),
"Invalid conditional flags");
2354 return Error(Tok.
getLoc(),
"Duplicated conditional flag");
2365 }
else if (
I == 3) {
2366 return Error(Tok.
getLoc(),
"Expected } at this point");
2368 return Error(Tok.
getLoc(),
"Expected } or , at this point");
2376bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2392 }
else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2395 TrailingDot = DotDispStr.
substr(DotDispStr.
size() - 1);
2398 const std::pair<StringRef, StringRef> BaseMember = DotDispStr.
split(
'.');
2400 if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
2401 getParser().lookUpField(SM.getSymName(), DotDispStr, Info) &&
2402 getParser().lookUpField(DotDispStr, Info) &&
2404 SemaCallback->LookupInlineAsmField(
Base, Member,
Info.Offset)))
2405 return Error(Tok.
getLoc(),
"Unable to lookup field reference!");
2407 return Error(Tok.
getLoc(),
"Unexpected token type!");
2412 const char *DotExprEndLoc = DotDispStr.
data() + DotDispStr.
size();
2415 if (!TrailingDot.
empty())
2417 SM.addImm(
Info.Offset);
2418 SM.setTypeInfo(
Info.Type);
2428 SMLoc Start = Lex().getLoc();
2429 ID = getTok().getString();
2430 if (!isParsingMSInlineAsm()) {
2433 getParser().parsePrimaryExpr(Val,
End,
nullptr))
2434 return Error(Start,
"unexpected token!");
2435 }
else if (ParseIntelInlineAsmIdentifier(Val,
ID, Info,
false,
End,
true)) {
2436 return Error(Start,
"unable to lookup expression");
2438 return Error(Start,
"offset operator cannot yet handle constants");
2445unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(
StringRef Name) {
2447 .
Cases(
"TYPE",
"type",IOK_TYPE)
2448 .
Cases(
"SIZE",
"size",IOK_SIZE)
2449 .
Cases(
"LENGTH",
"length",IOK_LENGTH)
2459unsigned X86AsmParser::ParseIntelInlineAsmOperator(
unsigned OpKind) {
2464 const MCExpr *Val =
nullptr;
2468 if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
2473 Error(Start,
"unable to lookup expression");
2480 case IOK_LENGTH: CVal =
Info.Var.Length;
break;
2481 case IOK_SIZE: CVal =
Info.Var.Size;
break;
2482 case IOK_TYPE: CVal =
Info.Var.Type;
break;
2490unsigned X86AsmParser::IdentifyMasmOperator(
StringRef Name) {
2492 .
Case(
"type", MOK_TYPE)
2493 .
Cases(
"size",
"sizeof", MOK_SIZEOF)
2494 .
Cases(
"length",
"lengthof", MOK_LENGTHOF)
2504bool X86AsmParser::ParseMasmOperator(
unsigned OpKind, int64_t &Val) {
2510 if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2513 const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.
getTok();
2529 IntelExprStateMachine SM;
2531 if (ParseIntelExpression(SM,
End))
2541 Val = SM.getLength();
2544 Val = SM.getElementSize();
2549 return Error(OpLoc,
"expression has unknown type",
SMRange(Start,
End));
2555bool X86AsmParser::ParseIntelMemoryOperandSize(
unsigned &
Size) {
2557 .
Cases(
"BYTE",
"byte", 8)
2558 .
Cases(
"WORD",
"word", 16)
2559 .
Cases(
"DWORD",
"dword", 32)
2560 .
Cases(
"FLOAT",
"float", 32)
2561 .
Cases(
"LONG",
"long", 32)
2562 .
Cases(
"FWORD",
"fword", 48)
2563 .
Cases(
"DOUBLE",
"double", 64)
2564 .
Cases(
"QWORD",
"qword", 64)
2565 .
Cases(
"MMWORD",
"mmword", 64)
2566 .
Cases(
"XWORD",
"xword", 80)
2567 .
Cases(
"TBYTE",
"tbyte", 80)
2568 .
Cases(
"XMMWORD",
"xmmword", 128)
2569 .
Cases(
"YMMWORD",
"ymmword", 256)
2570 .
Cases(
"ZMMWORD",
"zmmword", 512)
2575 return Error(Tok.
getLoc(),
"Expected 'PTR' or 'ptr' token!");
2588 if (ParseIntelMemoryOperandSize(
Size))
2596 return ParseRoundingModeOp(Start,
Operands);
2601 if (RegNo == X86::RIP)
2602 return Error(Start,
"rip can only be used as a base register");
2606 return Error(Start,
"expected memory operand after 'ptr', "
2607 "found register operand instead");
2612 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(RegNo))
2613 return Error(Start,
"invalid segment register");
2615 Start = Lex().getLoc();
2619 IntelExprStateMachine SM;
2620 if (ParseIntelExpression(SM,
End))
2623 if (isParsingMSInlineAsm())
2624 RewriteIntelExpression(SM, Start, Tok.
getLoc());
2626 int64_t
Imm = SM.getImm();
2627 const MCExpr *Disp = SM.getSym();
2636 if (!SM.isMemExpr() && !RegNo) {
2637 if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2643 SM.getSymName(),
Info.Var.Decl,
2644 Info.Var.IsGlobalLV));
2654 unsigned BaseReg = SM.getBaseReg();
2655 unsigned IndexReg = SM.getIndexReg();
2656 if (IndexReg && BaseReg == X86::RIP)
2658 unsigned Scale = SM.getScale();
2660 Size = SM.getElementSize() << 3;
2662 if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
2663 (IndexReg == X86::ESP || IndexReg == X86::RSP))
2669 !(X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
2670 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
2671 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg)) &&
2672 (X86MCRegisterClasses[X86::VR128XRegClassID].
contains(BaseReg) ||
2673 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(BaseReg) ||
2674 X86MCRegisterClasses[X86::VR512RegClassID].
contains(BaseReg)))
2678 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg))
2679 return Error(Start,
"16-bit addresses cannot have a scale");
2688 if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
2689 (IndexReg == X86::BX || IndexReg == X86::BP))
2692 if ((BaseReg || IndexReg) &&
2695 return Error(Start, ErrMsg);
2696 bool IsUnconditionalBranch =
2697 Name.equals_insensitive(
"jmp") ||
Name.equals_insensitive(
"call");
2698 if (isParsingMSInlineAsm())
2699 return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale,
2700 IsUnconditionalBranch && is64BitMode(),
2701 Start,
End,
Size, SM.getSymName(),
2706 unsigned DefaultBaseReg = X86::NoRegister;
2707 bool MaybeDirectBranchDest =
true;
2710 if (is64BitMode() && SM.getElementSize() > 0) {
2711 DefaultBaseReg = X86::RIP;
2713 if (IsUnconditionalBranch) {
2715 MaybeDirectBranchDest =
false;
2717 DefaultBaseReg = X86::RIP;
2718 }
else if (!BaseReg && !IndexReg && Disp &&
2720 if (is64BitMode()) {
2721 if (SM.getSize() == 8) {
2722 MaybeDirectBranchDest =
false;
2723 DefaultBaseReg = X86::RIP;
2726 if (SM.getSize() == 4 || SM.getSize() == 2)
2727 MaybeDirectBranchDest =
false;
2731 }
else if (IsUnconditionalBranch) {
2733 if (!PtrInOperand && SM.isOffsetOperator())
2735 Start,
"`OFFSET` operator cannot be used in an unconditional branch");
2736 if (PtrInOperand || SM.isBracketUsed())
2737 MaybeDirectBranchDest =
false;
2740 if ((BaseReg || IndexReg || RegNo || DefaultBaseReg != X86::NoRegister))
2742 getPointerWidth(), RegNo, Disp, BaseReg, IndexReg, Scale, Start,
End,
2744 0,
false, MaybeDirectBranchDest));
2749 MaybeDirectBranchDest));
2755 switch (getLexer().getKind()) {
2765 "expected immediate expression") ||
2766 getParser().parseExpression(Val,
End) ||
2767 check(isa<X86MCExpr>(Val), L,
"expected immediate expression"))
2774 return ParseRoundingModeOp(Start,
Operands);
2783 const MCExpr *Expr =
nullptr;
2789 if (
auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2792 Reg = RE->getRegNo();
2795 if (Reg == X86::EIZ || Reg == X86::RIZ)
2797 Loc,
"%eiz and %riz can only be used as index registers",
2799 if (Reg == X86::RIP)
2800 return Error(Loc,
"%rip can only be used as a base register",
2807 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(Reg))
2808 return Error(Loc,
"invalid segment register");
2816 return ParseMemOperand(Reg, Expr, Loc, EndLoc,
Operands);
2846bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2847 const SMLoc &StartLoc) {
2853 (getLexer().getTok().getIdentifier() ==
"z")))
2858 return Error(getLexer().getLoc(),
"Expected } at this point");
2870 const SMLoc consumedToken = consumeToken();
2874 if (getLexer().getTok().getIntVal() != 1)
2875 return TokError(
"Expected 1to<NUM> at this point");
2879 return TokError(
"Expected 1to<NUM> at this point");
2882 StringRef BroadcastString = (
Prefix + getLexer().getTok().getIdentifier())
2885 return TokError(
"Expected 1to<NUM> at this point");
2886 const char *BroadcastPrimitive =
2888 .
Case(
"1to2",
"{1to2}")
2889 .
Case(
"1to4",
"{1to4}")
2890 .
Case(
"1to8",
"{1to8}")
2891 .
Case(
"1to16",
"{1to16}")
2892 .
Case(
"1to32",
"{1to32}")
2894 if (!BroadcastPrimitive)
2895 return TokError(
"Invalid memory broadcast primitive.");
2898 return TokError(
"Expected } at this point");
2909 std::unique_ptr<X86Operand>
Z;
2910 if (ParseZ(Z, consumedToken))
2916 SMLoc StartLoc =
Z ? consumeToken() : consumedToken;
2921 if (!parseRegister(RegNo, RegLoc, StartLoc) &&
2922 X86MCRegisterClasses[X86::VK1RegClassID].
contains(RegNo)) {
2923 if (RegNo == X86::K0)
2924 return Error(RegLoc,
"Register k0 can't be used as write mask");
2926 return Error(getLexer().getLoc(),
"Expected } at this point");
2932 return Error(getLexer().getLoc(),
2933 "Expected an op-mask register at this point");
2938 if (ParseZ(Z, consumeToken()) || !Z)
2939 return Error(getLexer().getLoc(),
2940 "Expected a {z} mark at this point");
2956bool X86AsmParser::ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
2977 auto isAtMemOperand = [
this]() {
2982 auto TokCount = this->getLexer().peekTokens(Buf,
true);
2985 switch (Buf[0].getKind()) {
2992 if ((TokCount > 1) &&
2994 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2996 Buf[1].getIdentifier().
size() + 1);
3007 MCSymbol *
Sym = this->getContext().getOrCreateSymbol(Id);
3008 if (
Sym->isVariable()) {
3009 auto V =
Sym->getVariableValue(
false);
3010 return isa<X86MCExpr>(V);
3018 if (!isAtMemOperand()) {
3021 assert(!isa<X86MCExpr>(Disp) &&
"Expected non-register here.");
3037 0, 0, 1, StartLoc, EndLoc));
3043 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
3044 SMLoc BaseLoc = getLexer().getLoc();
3051 check(!isa<X86MCExpr>(E), BaseLoc,
"expected register here"))
3055 BaseReg = cast<X86MCExpr>(E)->getRegNo();
3056 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
3057 return Error(BaseLoc,
"eiz and riz can only be used as index registers",
3072 if (!isa<X86MCExpr>(E)) {
3076 if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
3077 return Error(Loc,
"expected absolute expression");
3079 Warning(Loc,
"scale factor without index register is ignored");
3082 IndexReg = cast<X86MCExpr>(E)->getRegNo();
3084 if (BaseReg == X86::RIP)
3086 "%rip as base register can not have an index register");
3087 if (IndexReg == X86::RIP)
3088 return Error(Loc,
"%rip is not allowed as an index register");
3099 return Error(Loc,
"expected scale expression");
3102 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
3104 return Error(Loc,
"scale factor in 16-bit address must be 1");
3106 return Error(Loc, ErrMsg);
3120 if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
3121 isa<MCConstantExpr>(Disp) &&
3122 cast<MCConstantExpr>(Disp)->getValue() == 0) {
3129 return Error(BaseLoc, ErrMsg);
3136 if (BaseReg || IndexReg) {
3137 if (
auto CE = dyn_cast<MCConstantExpr>(Disp)) {
3138 auto Imm =
CE->getValue();
3139 bool Is64 = X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) ||
3140 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg);
3141 bool Is16 = X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg);
3143 if (!isInt<32>(Imm))
3144 return Error(BaseLoc,
"displacement " +
Twine(Imm) +
3145 " is not within [-2147483648, 2147483647]");
3149 " shortened to 32-bit signed " +
3150 Twine(
static_cast<int32_t
>(Imm)));
3154 " shortened to 16-bit signed " +
3155 Twine(
static_cast<int16_t
>(Imm)));
3160 if (SegReg || BaseReg || IndexReg)
3162 BaseReg, IndexReg, Scale, StartLoc,
3171bool X86AsmParser::parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc) {
3179 if (parseRegister(RegNo, StartLoc, EndLoc))
3193 ForcedOpcodePrefix = OpcodePrefix_Default;
3194 ForcedDispEncoding = DispEncoding_Default;
3195 UseApxExtendedReg =
false;
3196 ForcedNoFlag =
false;
3209 if (Prefix ==
"rex")
3210 ForcedOpcodePrefix = OpcodePrefix_REX;
3211 else if (Prefix ==
"rex2")
3212 ForcedOpcodePrefix = OpcodePrefix_REX2;
3213 else if (Prefix ==
"vex")
3214 ForcedOpcodePrefix = OpcodePrefix_VEX;
3215 else if (Prefix ==
"vex2")
3216 ForcedOpcodePrefix = OpcodePrefix_VEX2;
3217 else if (Prefix ==
"vex3")
3218 ForcedOpcodePrefix = OpcodePrefix_VEX3;
3219 else if (Prefix ==
"evex")
3220 ForcedOpcodePrefix = OpcodePrefix_EVEX;
3221 else if (Prefix ==
"disp8")
3222 ForcedDispEncoding = DispEncoding_Disp8;
3223 else if (Prefix ==
"disp32")
3224 ForcedDispEncoding = DispEncoding_Disp32;
3225 else if (Prefix ==
"nf")
3226 ForcedNoFlag =
true;
3228 return Error(NameLoc,
"unknown prefix");
3244 if (isParsingMSInlineAsm()) {
3245 if (
Name.equals_insensitive(
"vex"))
3246 ForcedOpcodePrefix = OpcodePrefix_VEX;
3247 else if (
Name.equals_insensitive(
"vex2"))
3248 ForcedOpcodePrefix = OpcodePrefix_VEX2;
3249 else if (
Name.equals_insensitive(
"vex3"))
3250 ForcedOpcodePrefix = OpcodePrefix_VEX3;
3251 else if (
Name.equals_insensitive(
"evex"))
3252 ForcedOpcodePrefix = OpcodePrefix_EVEX;
3254 if (ForcedOpcodePrefix != OpcodePrefix_Default) {
3267 if (
Name.consume_back(
".d32")) {
3268 ForcedDispEncoding = DispEncoding_Disp32;
3269 }
else if (
Name.consume_back(
".d8")) {
3270 ForcedDispEncoding = DispEncoding_Disp8;
3276 if (isParsingIntelSyntax() &&
3277 (PatchedName ==
"jmp" || PatchedName ==
"jc" || PatchedName ==
"jnc" ||
3278 PatchedName ==
"jcxz" || PatchedName ==
"jecxz" ||
3283 : NextTok ==
"short") {
3292 NextTok.
size() + 1);
3298 PatchedName !=
"setzub" && PatchedName !=
"setzunb" &&
3299 PatchedName !=
"setb" && PatchedName !=
"setnb")
3300 PatchedName = PatchedName.
substr(0,
Name.size()-1);
3302 unsigned ComparisonPredicate = ~0
U;
3309 bool IsVCMP = PatchedName[0] ==
'v';
3310 unsigned CCIdx =
IsVCMP ? 4 : 3;
3312 PatchedName.
slice(CCIdx, PatchedName.
size() - 2))
3314 .
Case(
"eq_oq", 0x00)
3316 .
Case(
"lt_os", 0x01)
3318 .
Case(
"le_os", 0x02)
3319 .
Case(
"unord", 0x03)
3320 .
Case(
"unord_q", 0x03)
3322 .
Case(
"neq_uq", 0x04)
3324 .
Case(
"nlt_us", 0x05)
3326 .
Case(
"nle_us", 0x06)
3328 .
Case(
"ord_q", 0x07)
3330 .
Case(
"eq_uq", 0x08)
3332 .
Case(
"nge_us", 0x09)
3334 .
Case(
"ngt_us", 0x0A)
3335 .
Case(
"false", 0x0B)
3336 .
Case(
"false_oq", 0x0B)
3337 .
Case(
"neq_oq", 0x0C)
3339 .
Case(
"ge_os", 0x0D)
3341 .
Case(
"gt_os", 0x0E)
3343 .
Case(
"true_uq", 0x0F)
3344 .
Case(
"eq_os", 0x10)
3345 .
Case(
"lt_oq", 0x11)
3346 .
Case(
"le_oq", 0x12)
3347 .
Case(
"unord_s", 0x13)
3348 .
Case(
"neq_us", 0x14)
3349 .
Case(
"nlt_uq", 0x15)
3350 .
Case(
"nle_uq", 0x16)
3351 .
Case(
"ord_s", 0x17)
3352 .
Case(
"eq_us", 0x18)
3353 .
Case(
"nge_uq", 0x19)
3354 .
Case(
"ngt_uq", 0x1A)
3355 .
Case(
"false_os", 0x1B)
3356 .
Case(
"neq_os", 0x1C)
3357 .
Case(
"ge_oq", 0x1D)
3358 .
Case(
"gt_oq", 0x1E)
3359 .
Case(
"true_us", 0x1F)
3364 PatchedName =
IsVCMP ?
"vcmpss" :
"cmpss";
3366 PatchedName =
IsVCMP ?
"vcmpsd" :
"cmpsd";
3368 PatchedName =
IsVCMP ?
"vcmpps" :
"cmpps";
3370 PatchedName =
IsVCMP ?
"vcmppd" :
"cmppd";
3372 PatchedName =
"vcmpsh";
3374 PatchedName =
"vcmpph";
3378 ComparisonPredicate =
CC;
3384 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3385 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3386 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3388 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3398 if (
CC != ~0U && (
CC != 0 || SuffixSize == 2)) {
3399 switch (PatchedName.
back()) {
3401 case 'b': PatchedName = SuffixSize == 2 ?
"vpcmpub" :
"vpcmpb";
break;
3402 case 'w': PatchedName = SuffixSize == 2 ?
"vpcmpuw" :
"vpcmpw";
break;
3403 case 'd': PatchedName = SuffixSize == 2 ?
"vpcmpud" :
"vpcmpd";
break;
3404 case 'q': PatchedName = SuffixSize == 2 ?
"vpcmpuq" :
"vpcmpq";
break;
3407 ComparisonPredicate =
CC;
3413 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3414 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3415 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3417 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3428 switch (PatchedName.
back()) {
3430 case 'b': PatchedName = SuffixSize == 2 ?
"vpcomub" :
"vpcomb";
break;
3431 case 'w': PatchedName = SuffixSize == 2 ?
"vpcomuw" :
"vpcomw";
break;
3432 case 'd': PatchedName = SuffixSize == 2 ?
"vpcomud" :
"vpcomd";
break;
3433 case 'q': PatchedName = SuffixSize == 2 ?
"vpcomuq" :
"vpcomq";
break;
3436 ComparisonPredicate =
CC;
3449 .
Cases(
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
true)
3450 .
Cases(
"rex64",
"data32",
"data16",
"addr32",
"addr16",
true)
3451 .
Cases(
"xacquire",
"xrelease",
true)
3452 .
Cases(
"acquire",
"release", isParsingIntelSyntax())
3455 auto isLockRepeatNtPrefix = [](
StringRef N) {
3457 .
Cases(
"lock",
"rep",
"repe",
"repz",
"repne",
"repnz",
"notrack",
true)
3461 bool CurlyAsEndOfStatement =
false;
3464 while (isLockRepeatNtPrefix(
Name.lower())) {
3485 while (
Name.starts_with(
";") ||
Name.starts_with(
"\n") ||
3486 Name.starts_with(
"#") ||
Name.starts_with(
"\t") ||
3487 Name.starts_with(
"/")) {
3498 if (PatchedName ==
"data16" && is16BitMode()) {
3499 return Error(NameLoc,
"redundant data16 prefix");
3501 if (PatchedName ==
"data32") {
3503 return Error(NameLoc,
"redundant data32 prefix");
3505 return Error(NameLoc,
"'data32' is not supported in 64-bit mode");
3507 PatchedName =
"data16";
3514 if (Next ==
"callw")
3516 if (Next ==
"ljmpw")
3521 ForcedDataPrefix = X86::Is32Bit;
3529 if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3531 getParser().getContext());
3536 if ((
Name.starts_with(
"ccmp") ||
Name.starts_with(
"ctest")) &&
3565 CurlyAsEndOfStatement =
3566 isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3569 return TokError(
"unexpected token in argument list");
3573 if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3575 getParser().getContext());
3583 else if (CurlyAsEndOfStatement)
3586 getLexer().getTok().getLoc(), 0);
3593 if (IsFp &&
Operands.size() == 1) {
3595 .
Case(
"fsub",
"fsubp")
3596 .
Case(
"fdiv",
"fdivp")
3597 .
Case(
"fsubr",
"fsubrp")
3598 .
Case(
"fdivr",
"fdivrp");
3602 if ((
Name ==
"mov" ||
Name ==
"movw" ||
Name ==
"movl") &&
3610 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3612 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(Op1.
getReg()) ||
3613 X86MCRegisterClasses[X86::GR32RegClassID].
contains(Op1.
getReg()))) {
3615 if (
Name !=
"mov" &&
Name[3] == (is16BitMode() ?
'l' :
'w')) {
3616 Name = is16BitMode() ?
"movw" :
"movl";
3629 if ((
Name ==
"outb" ||
Name ==
"outsb" ||
Name ==
"outw" ||
Name ==
"outsw" ||
3648 bool HadVerifyError =
false;
3651 if (
Name.starts_with(
"ins") &&
3656 AddDefaultSrcDestOperands(TmpOperands,
3658 DefaultMemDIOperand(NameLoc));
3659 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3663 if (
Name.starts_with(
"outs") &&
3665 (
Name ==
"outsb" ||
Name ==
"outsw" ||
Name ==
"outsl" ||
3666 Name ==
"outsd" ||
Name ==
"outs")) {
3667 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3669 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3675 if (
Name.starts_with(
"lods") &&
3677 (
Name ==
"lods" ||
Name ==
"lodsb" ||
Name ==
"lodsw" ||
3678 Name ==
"lodsl" ||
Name ==
"lodsd" ||
Name ==
"lodsq")) {
3679 TmpOperands.
push_back(DefaultMemSIOperand(NameLoc));
3680 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3686 if (
Name.starts_with(
"stos") &&
3688 (
Name ==
"stos" ||
Name ==
"stosb" ||
Name ==
"stosw" ||
3689 Name ==
"stosl" ||
Name ==
"stosd" ||
Name ==
"stosq")) {
3690 TmpOperands.
push_back(DefaultMemDIOperand(NameLoc));
3691 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3697 if (
Name.starts_with(
"scas") &&
3699 (
Name ==
"scas" ||
Name ==
"scasb" ||
Name ==
"scasw" ||
3700 Name ==
"scasl" ||
Name ==
"scasd" ||
Name ==
"scasq")) {
3701 TmpOperands.
push_back(DefaultMemDIOperand(NameLoc));
3702 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3706 if (
Name.starts_with(
"cmps") &&
3708 (
Name ==
"cmps" ||
Name ==
"cmpsb" ||
Name ==
"cmpsw" ||
3709 Name ==
"cmpsl" ||
Name ==
"cmpsd" ||
Name ==
"cmpsq")) {
3710 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3711 DefaultMemSIOperand(NameLoc));
3712 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3716 if (((
Name.starts_with(
"movs") &&
3717 (
Name ==
"movs" ||
Name ==
"movsb" ||
Name ==
"movsw" ||
3718 Name ==
"movsl" ||
Name ==
"movsd" ||
Name ==
"movsq")) ||
3719 (
Name.starts_with(
"smov") &&
3720 (
Name ==
"smov" ||
Name ==
"smovb" ||
Name ==
"smovw" ||
3721 Name ==
"smovl" ||
Name ==
"smovd" ||
Name ==
"smovq"))) &&
3723 if (
Name ==
"movsd" &&
Operands.size() == 1 && !isParsingIntelSyntax())
3725 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3726 DefaultMemDIOperand(NameLoc));
3727 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3731 if (HadVerifyError) {
3732 return HadVerifyError;
3740 "size, (R|E)BX will be used for the location");
3755 if (
I == Table.end() ||
I->OldOpc != Opcode)
3761 if (X86::isBLENDVPD(Opcode) || X86::isBLENDVPS(Opcode) ||
3762 X86::isPBLENDVB(Opcode))
3772 if (ForcedOpcodePrefix != OpcodePrefix_VEX3 &&
3779 auto replaceWithCCMPCTEST = [&](
unsigned Opcode) ->
bool {
3780 if (ForcedOpcodePrefix == OpcodePrefix_EVEX) {
3791 default:
return false;
3796 if (ForcedDispEncoding == DispEncoding_Disp32) {
3797 Inst.
setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3806 if (ForcedDispEncoding == DispEncoding_Disp32) {
3807 Inst.
setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3823#define FROM_TO(FROM, TO) \
3825 return replaceWithCCMPCTEST(X86::TO);
3827 FROM_TO(CMP64mi32, CCMP64mi32)
3830 FROM_TO(CMP64ri32, CCMP64ri32)
3857 FROM_TO(TEST64mi32, CTEST64mi32)
3859 FROM_TO(TEST64ri32, CTEST64ri32)
3880 using namespace X86;
3883 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3884 if (isVFCMADDCPH(Opcode) || isVFCMADDCSH(Opcode) || isVFMADDCPH(Opcode) ||
3885 isVFMADDCSH(Opcode)) {
3889 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3890 "distinct from source registers");
3891 }
else if (isVFCMULCPH(Opcode) || isVFCMULCSH(Opcode) || isVFMULCPH(Opcode) ||
3892 isVFMULCSH(Opcode)) {
3902 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3903 "distinct from source registers");
3904 }
else if (isV4FMADDPS(Opcode) || isV4FMADDSS(Opcode) ||
3905 isV4FNMADDPS(Opcode) || isV4FNMADDSS(Opcode) ||
3906 isVP4DPWSSDS(Opcode) || isVP4DPWSSD(Opcode)) {
3909 unsigned Src2Enc =
MRI->getEncodingValue(Src2);
3910 if (Src2Enc % 4 != 0) {
3912 unsigned GroupStart = (Src2Enc / 4) * 4;
3913 unsigned GroupEnd = GroupStart + 3;
3914 return Warning(Ops[0]->getStartLoc(),
3915 "source register '" +
RegName +
"' implicitly denotes '" +
3920 }
else if (isVGATHERDPD(Opcode) || isVGATHERDPS(Opcode) ||
3921 isVGATHERQPD(Opcode) || isVGATHERQPS(Opcode) ||
3922 isVPGATHERDD(Opcode) || isVPGATHERDQ(Opcode) ||
3923 isVPGATHERQD(Opcode) || isVPGATHERQQ(Opcode)) {
3927 unsigned Index =
MRI->getEncodingValue(
3930 return Warning(Ops[0]->getStartLoc(),
"index and destination registers "
3931 "should be distinct");
3935 unsigned Index =
MRI->getEncodingValue(
3937 if (Dest == Mask || Dest ==
Index || Mask ==
Index)
3938 return Warning(Ops[0]->getStartLoc(),
"mask, index, and destination "
3939 "registers should be distinct");
3941 }
else if (isTCMMIMFP16PS(Opcode) || isTCMMRLFP16PS(Opcode) ||
3942 isTDPBF16PS(Opcode) || isTDPFP16PS(Opcode) || isTDPBSSD(Opcode) ||
3943 isTDPBSUD(Opcode) || isTDPBUSD(Opcode) || isTDPBUUD(Opcode)) {
3947 if (SrcDest == Src1 || SrcDest == Src2 || Src1 == Src2)
3948 return Error(Ops[0]->getStartLoc(),
"all tmm registers must be distinct");
3957 for (
unsigned i = 0; i != NumOps; ++i) {
3962 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
3969 if (UsesRex && HReg != X86::NoRegister) {
3971 return Error(Ops[0]->getStartLoc(),
3972 "can't encode '" +
RegName +
"' in an instruction requiring "
3977 if ((Opcode == X86::PREFETCHIT0 || Opcode == X86::PREFETCHIT1)) {
3981 Ops[0]->getStartLoc(),
3983 :
"'prefetchit1'")) +
3984 " only supports RIP-relative address");
3989void X86AsmParser::emitWarningForSpecialLVIInstruction(
SMLoc Loc) {
3990 Warning(Loc,
"Instruction may be vulnerable to LVI and "
3991 "requires manual mitigation");
3992 Note(
SMLoc(),
"See https://software.intel.com/"
3993 "security-software-guidance/insights/"
3994 "deep-dive-load-value-injection#specialinstructions"
3995 " for more information");
4019 bool Parse32 = is32BitMode() || Code16GCC;
4021 is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP);
4027 ShlMemOp->addMemOperands(ShlInst, 5);
4040 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4052void X86AsmParser::applyLVILoadHardeningMitigation(
MCInst &Inst,
4069 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4072 }
else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
4075 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4097 getSTI().hasFeature(X86::FeatureLVIControlFlowIntegrity))
4098 applyLVICFIMitigation(Inst, Out);
4103 getSTI().hasFeature(X86::FeatureLVILoadHardening))
4104 applyLVILoadHardeningMitigation(Inst, Out);
4108 unsigned Result = 0;
4110 if (Prefix.isPrefix()) {
4111 Result = Prefix.getPrefix();
4117bool X86AsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4120 bool MatchingInlineAsm) {
4122 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4126 Out, MatchingInlineAsm);
4133 if (ForcedOpcodePrefix == OpcodePrefix_REX)
4135 else if (ForcedOpcodePrefix == OpcodePrefix_REX2)
4137 else if (ForcedOpcodePrefix == OpcodePrefix_VEX)
4139 else if (ForcedOpcodePrefix == OpcodePrefix_VEX2)
4141 else if (ForcedOpcodePrefix == OpcodePrefix_VEX3)
4143 else if (ForcedOpcodePrefix == OpcodePrefix_EVEX)
4147 if (ForcedDispEncoding == DispEncoding_Disp8)
4149 else if (ForcedDispEncoding == DispEncoding_Disp32)
4155 return isParsingIntelSyntax()
4156 ? matchAndEmitIntelInstruction(IDLoc, Opcode, Inst,
Operands, Out,
4158 : matchAndEmitATTInstruction(IDLoc, Opcode, Inst,
Operands, Out,
4164 bool MatchingInlineAsm) {
4169 .
Case(
"finit",
"fninit")
4170 .
Case(
"fsave",
"fnsave")
4171 .
Case(
"fstcw",
"fnstcw")
4172 .
Case(
"fstcww",
"fnstcw")
4173 .
Case(
"fstenv",
"fnstenv")
4174 .
Case(
"fstsw",
"fnstsw")
4175 .
Case(
"fstsww",
"fnstsw")
4176 .
Case(
"fclex",
"fnclex")
4182 if (!MatchingInlineAsm)
4188bool X86AsmParser::ErrorMissingFeature(
SMLoc IDLoc,
4190 bool MatchingInlineAsm) {
4191 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
4194 OS <<
"instruction requires:";
4195 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i) {
4196 if (MissingFeatures[i])
4202unsigned X86AsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
4208 return Match_Unsupported;
4209 if (ForcedNoFlag == !(TSFlags &
X86II::EVEX_NF) && !X86::isCFCMOVCC(Opc))
4210 return Match_Unsupported;
4212 switch (ForcedOpcodePrefix) {
4213 case OpcodePrefix_Default:
4215 case OpcodePrefix_REX:
4216 case OpcodePrefix_REX2:
4218 return Match_Unsupported;
4220 case OpcodePrefix_VEX:
4221 case OpcodePrefix_VEX2:
4222 case OpcodePrefix_VEX3:
4224 return Match_Unsupported;
4226 case OpcodePrefix_EVEX:
4228 !X86::isCMP(Opc) && !X86::isTEST(Opc))
4229 return Match_Unsupported;
4231 return Match_Unsupported;
4236 (ForcedOpcodePrefix != OpcodePrefix_VEX &&
4237 ForcedOpcodePrefix != OpcodePrefix_VEX2 &&
4238 ForcedOpcodePrefix != OpcodePrefix_VEX3))
4239 return Match_Unsupported;
4241 return Match_Success;
4244bool X86AsmParser::matchAndEmitATTInstruction(
4248 SMRange EmptyRange = std::nullopt;
4251 if (ForcedDataPrefix == X86::Is32Bit)
4252 SwitchMode(X86::Is32Bit);
4256 MissingFeatures, MatchingInlineAsm,
4257 isParsingIntelSyntax());
4258 if (ForcedDataPrefix == X86::Is32Bit) {
4259 SwitchMode(X86::Is16Bit);
4260 ForcedDataPrefix = 0;
4262 switch (OriginalError) {
4265 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4270 if (!MatchingInlineAsm)
4271 while (processInstruction(Inst,
Operands))
4275 if (!MatchingInlineAsm)
4279 case Match_InvalidImmUnsignedi4: {
4281 if (ErrorLoc ==
SMLoc())
4283 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4284 EmptyRange, MatchingInlineAsm);
4286 case Match_MissingFeature:
4287 return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4288 case Match_InvalidOperand:
4289 case Match_MnemonicFail:
4290 case Match_Unsupported:
4293 if (
Op.getToken().empty()) {
4294 Error(IDLoc,
"instruction must have size higher than 0", EmptyRange,
4309 Op.setTokenValue(Tmp);
4317 const char *Suffixes =
Base[0] !=
'f' ?
"bwlq" :
"slt\0";
4319 const char *MemSize =
Base[0] !=
'f' ?
"\x08\x10\x20\x40" :
"\x20\x40\x50\0";
4331 bool HasVectorReg =
false;
4336 HasVectorReg =
true;
4337 else if (X86Op->
isMem()) {
4339 assert(
MemOp->Mem.Size == 0 &&
"Memory size always 0 under ATT syntax");
4346 for (
unsigned I = 0, E = std::size(
Match);
I != E; ++
I) {
4347 Tmp.
back() = Suffixes[
I];
4348 if (
MemOp && HasVectorReg)
4349 MemOp->Mem.Size = MemSize[
I];
4350 Match[
I] = Match_MnemonicFail;
4351 if (
MemOp || !HasVectorReg) {
4353 MatchInstruction(
Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4354 MatchingInlineAsm, isParsingIntelSyntax());
4356 if (
Match[
I] == Match_MissingFeature)
4357 ErrorInfoMissingFeatures = MissingFeatures;
4368 if (NumSuccessfulMatches == 1) {
4369 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4374 if (!MatchingInlineAsm)
4375 while (processInstruction(Inst,
Operands))
4379 if (!MatchingInlineAsm)
4389 if (NumSuccessfulMatches > 1) {
4391 unsigned NumMatches = 0;
4392 for (
unsigned I = 0, E = std::size(
Match);
I != E; ++
I)
4393 if (
Match[
I] == Match_Success)
4394 MatchChars[NumMatches++] = Suffixes[
I];
4398 OS <<
"ambiguous instructions require an explicit suffix (could be ";
4399 for (
unsigned i = 0; i != NumMatches; ++i) {
4402 if (i + 1 == NumMatches)
4404 OS <<
"'" <<
Base << MatchChars[i] <<
"'";
4407 Error(IDLoc,
OS.str(), EmptyRange, MatchingInlineAsm);
4416 if (OriginalError == Match_MnemonicFail)
4417 return Error(IDLoc,
"invalid instruction mnemonic '" +
Base +
"'",
4418 Op.getLocRange(), MatchingInlineAsm);
4420 if (OriginalError == Match_Unsupported)
4421 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4424 assert(OriginalError == Match_InvalidOperand &&
"Unexpected error");
4428 return Error(IDLoc,
"too few operands for instruction", EmptyRange,
4435 OperandRange, MatchingInlineAsm);
4439 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4445 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4453 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4460 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4465 Error(IDLoc,
"unknown use of instruction mnemonic without a size suffix",
4466 EmptyRange, MatchingInlineAsm);
4470bool X86AsmParser::matchAndEmitIntelInstruction(
4474 SMRange EmptyRange = std::nullopt;
4480 UnsizedMemOp = X86Op;
4491 static const char *
const PtrSizedInstrs[] = {
"call",
"jmp",
"push"};
4492 for (
const char *Instr : PtrSizedInstrs) {
4493 if (Mnemonic == Instr) {
4494 UnsizedMemOp->
Mem.
Size = getPointerWidth();
4507 if (Mnemonic ==
"push" &&
Operands.size() == 2) {
4509 if (X86Op->
isImm()) {
4511 const auto *
CE = dyn_cast<MCConstantExpr>(X86Op->
getImm());
4512 unsigned Size = getPointerWidth();
4517 Tmp += (is64BitMode())
4519 : (is32BitMode()) ?
"l" : (is16BitMode()) ?
"w" :
" ";
4520 Op.setTokenValue(Tmp);
4523 MissingFeatures, MatchingInlineAsm,
4534 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4535 for (
unsigned Size : MopSizes) {
4539 unsigned M = MatchInstruction(
Operands, Inst, ErrorInfoIgnore,
4540 MissingFeatures, MatchingInlineAsm,
4541 isParsingIntelSyntax());
4546 if (
Match.back() == Match_MissingFeature)
4547 ErrorInfoMissingFeatures = MissingFeatures;
4557 if (
Match.empty()) {
4558 Match.push_back(MatchInstruction(
4560 isParsingIntelSyntax()));
4562 if (
Match.back() == Match_MissingFeature)
4563 ErrorInfoMissingFeatures = MissingFeatures;
4571 if (
Match.back() == Match_MnemonicFail) {
4572 return Error(IDLoc,
"invalid instruction mnemonic '" + Mnemonic +
"'",
4573 Op.getLocRange(), MatchingInlineAsm);
4580 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4583 unsigned M = MatchInstruction(
4585 isParsingIntelSyntax());
4586 if (M == Match_Success)
4587 NumSuccessfulMatches = 1;
4599 if (NumSuccessfulMatches == 1) {
4600 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4605 if (!MatchingInlineAsm)
4606 while (processInstruction(Inst,
Operands))
4609 if (!MatchingInlineAsm)
4613 }
else if (NumSuccessfulMatches > 1) {
4615 "multiple matches only possible with unsized memory operands");
4617 "ambiguous operand size for instruction '" + Mnemonic +
"\'",
4623 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4631 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4638 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4644 if (ErrorLoc ==
SMLoc())
4646 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4647 EmptyRange, MatchingInlineAsm);
4651 return Error(IDLoc,
"unknown instruction mnemonic", EmptyRange,
4655bool X86AsmParser::OmitRegisterFromClobberLists(
unsigned RegNo) {
4656 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
4659bool X86AsmParser::ParseDirective(
AsmToken DirectiveID) {
4663 return parseDirectiveArch();
4665 return ParseDirectiveCode(IDVal, DirectiveID.
getLoc());
4671 return Error(DirectiveID.
getLoc(),
"'.att_syntax noprefix' is not "
4672 "supported: registers must have a "
4673 "'%' prefix in .att_syntax");
4675 getParser().setAssemblerDialect(0);
4678 getParser().setAssemblerDialect(1);
4683 return Error(DirectiveID.
getLoc(),
"'.intel_syntax prefix' is not "
4684 "supported: registers must not have "
4685 "a '%' prefix in .intel_syntax");
4688 }
else if (IDVal ==
".nops")
4689 return parseDirectiveNops(DirectiveID.
getLoc());
4690 else if (IDVal ==
".even")
4691 return parseDirectiveEven(DirectiveID.
getLoc());
4692 else if (IDVal ==
".cv_fpo_proc")
4693 return parseDirectiveFPOProc(DirectiveID.
getLoc());
4694 else if (IDVal ==
".cv_fpo_setframe")
4695 return parseDirectiveFPOSetFrame(DirectiveID.
getLoc());
4696 else if (IDVal ==
".cv_fpo_pushreg")
4697 return parseDirectiveFPOPushReg(DirectiveID.
getLoc());
4698 else if (IDVal ==
".cv_fpo_stackalloc")
4699 return parseDirectiveFPOStackAlloc(DirectiveID.
getLoc());
4700 else if (IDVal ==
".cv_fpo_stackalign")
4701 return parseDirectiveFPOStackAlign(DirectiveID.
getLoc());
4702 else if (IDVal ==
".cv_fpo_endprologue")
4703 return parseDirectiveFPOEndPrologue(DirectiveID.
getLoc());
4704 else if (IDVal ==
".cv_fpo_endproc")
4705 return parseDirectiveFPOEndProc(DirectiveID.
getLoc());
4706 else if (IDVal ==
".seh_pushreg" ||
4708 return parseDirectiveSEHPushReg(DirectiveID.
getLoc());
4709 else if (IDVal ==
".seh_setframe" ||
4711 return parseDirectiveSEHSetFrame(DirectiveID.
getLoc());
4712 else if (IDVal ==
".seh_savereg" ||
4714 return parseDirectiveSEHSaveReg(DirectiveID.
getLoc());
4715 else if (IDVal ==
".seh_savexmm" ||
4717 return parseDirectiveSEHSaveXMM(DirectiveID.
getLoc());
4718 else if (IDVal ==
".seh_pushframe" ||
4720 return parseDirectiveSEHPushFrame(DirectiveID.
getLoc());
4725bool X86AsmParser::parseDirectiveArch() {
4727 getParser().parseStringToEndOfStatement();
4733bool X86AsmParser::parseDirectiveNops(
SMLoc L) {
4734 int64_t NumBytes = 0, Control = 0;
4735 SMLoc NumBytesLoc, ControlLoc;
4737 NumBytesLoc = getTok().getLoc();
4738 if (getParser().checkForValidSection() ||
4739 getParser().parseAbsoluteExpression(NumBytes))
4743 ControlLoc = getTok().getLoc();
4744 if (getParser().parseAbsoluteExpression(Control))
4747 if (getParser().parseEOL())
4750 if (NumBytes <= 0) {
4751 Error(NumBytesLoc,
"'.nops' directive with non-positive size");
4756 Error(ControlLoc,
"'.nops' directive with negative NOP size");
4761 getParser().getStreamer().emitNops(NumBytes, Control, L, STI);
4768bool X86AsmParser::parseDirectiveEven(
SMLoc L) {
4774 getStreamer().initSections(
false, getSTI());
4775 Section = getStreamer().getCurrentSectionOnly();
4778 getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
4780 getStreamer().emitValueToAlignment(
Align(2), 0, 1, 0);
4789 if (IDVal ==
".code16") {
4791 if (!is16BitMode()) {
4792 SwitchMode(X86::Is16Bit);
4793 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4795 }
else if (IDVal ==
".code16gcc") {
4799 if (!is16BitMode()) {
4800 SwitchMode(X86::Is16Bit);
4801 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4803 }
else if (IDVal ==
".code32") {
4805 if (!is32BitMode()) {
4806 SwitchMode(X86::Is32Bit);
4807 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
4809 }
else if (IDVal ==
".code64") {
4811 if (!is64BitMode()) {
4812 SwitchMode(X86::Is64Bit);
4813 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code64);
4816 Error(L,
"unknown directive " + IDVal);
4824bool X86AsmParser::parseDirectiveFPOProc(
SMLoc L) {
4829 return Parser.
TokError(
"expected symbol name");
4830 if (Parser.
parseIntToken(ParamsSize,
"expected parameter byte count"))
4833 return Parser.
TokError(
"parameters size out of range");
4836 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4837 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
4841bool X86AsmParser::parseDirectiveFPOSetFrame(
SMLoc L) {
4844 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4846 return getTargetStreamer().emitFPOSetFrame(Reg, L);
4850bool X86AsmParser::parseDirectiveFPOPushReg(
SMLoc L) {
4853 if (parseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4855 return getTargetStreamer().emitFPOPushReg(Reg, L);
4859bool X86AsmParser::parseDirectiveFPOStackAlloc(
SMLoc L) {
4864 return getTargetStreamer().emitFPOStackAlloc(
Offset, L);
4868bool X86AsmParser::parseDirectiveFPOStackAlign(
SMLoc L) {
4873 return getTargetStreamer().emitFPOStackAlign(
Offset, L);
4877bool X86AsmParser::parseDirectiveFPOEndPrologue(
SMLoc L) {
4881 return getTargetStreamer().emitFPOEndPrologue(L);
4885bool X86AsmParser::parseDirectiveFPOEndProc(
SMLoc L) {
4889 return getTargetStreamer().emitFPOEndProc(L);
4892bool X86AsmParser::parseSEHRegisterNumber(
unsigned RegClassID,
4894 SMLoc startLoc = getLexer().getLoc();
4900 if (parseRegister(RegNo, startLoc, endLoc))
4903 if (!X86MCRegisterClasses[RegClassID].
contains(RegNo)) {
4904 return Error(startLoc,
4905 "register is not supported for use with this directive");
4911 if (getParser().parseAbsoluteExpression(EncodedReg))
4917 for (
MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
4918 if (
MRI->getEncodingValue(Reg) == EncodedReg) {
4924 return Error(startLoc,
4925 "incorrect register number for use with this directive");
4932bool X86AsmParser::parseDirectiveSEHPushReg(
SMLoc Loc) {
4934 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4938 return TokError(
"expected end of directive");
4941 getStreamer().emitWinCFIPushReg(Reg, Loc);
4945bool X86AsmParser::parseDirectiveSEHSetFrame(
SMLoc Loc) {
4948 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4951 return TokError(
"you must specify a stack pointer offset");
4954 if (getParser().parseAbsoluteExpression(Off))
4958 return TokError(
"expected end of directive");
4961 getStreamer().emitWinCFISetFrame(Reg, Off, Loc);
4965bool X86AsmParser::parseDirectiveSEHSaveReg(
SMLoc Loc) {
4968 if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4971 return TokError(
"you must specify an offset on the stack");
4974 if (getParser().parseAbsoluteExpression(Off))
4978 return TokError(
"expected end of directive");
4981 getStreamer().emitWinCFISaveReg(Reg, Off, Loc);
4985bool X86AsmParser::parseDirectiveSEHSaveXMM(
SMLoc Loc) {
4988 if (parseSEHRegisterNumber(X86::VR128XRegClassID, Reg))
4991 return TokError(
"you must specify an offset on the stack");
4994 if (getParser().parseAbsoluteExpression(Off))
4998 return TokError(
"expected end of directive");
5001 getStreamer().emitWinCFISaveXMM(Reg, Off, Loc);
5005bool X86AsmParser::parseDirectiveSEHPushFrame(
SMLoc Loc) {
5009 SMLoc startLoc = getLexer().getLoc();
5011 if (!getParser().parseIdentifier(CodeID)) {
5012 if (CodeID !=
"code")
5013 return Error(startLoc,
"expected @code");
5019 return TokError(
"expected end of directive");
5022 getStreamer().emitWinCFIPushFrame(Code, Loc);
5032#define GET_MATCHER_IMPLEMENTATION
5033#include "X86GenAsmMatcher.inc"
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
Analysis containing CSE Info
#define LLVM_EXTERNAL_VISIBILITY
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
amode Optimize addressing mode
static ModuleSymbolTable::Symbol getSym(DataRefImpl &Symb)
mir Rename Register Operands
static bool IsVCMP(unsigned Opcode)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static SymbolRef::Type getType(const Symbol *Sym)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static cl::opt< bool > LVIInlineAsmHardening("x86-experimental-lvi-inline-asm-hardening", cl::desc("Harden inline assembly code that may be vulnerable to Load Value" " Injection (LVI). This feature is experimental."), cl::Hidden)
static bool checkScale(unsigned Scale, StringRef &ErrMsg)
static bool convertSSEToAVX(MCInst &Inst)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86AsmParser()
static unsigned getPrefixes(OperandVector &Operands)
static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg, unsigned Scale, bool Is64BitMode, StringRef &ErrMsg)
#define FROM_TO(FROM, TO)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
bool is(TokenKind K) const
TokenKind getKind() const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr size_t size() const
An instruction for ordering other memory operations.
Generic assembler lexer interface, for use by target specific assembly lexers.
void UnLex(AsmToken const &Token)
bool isNot(AsmToken::TokenKind K) const
Check if the current token has kind K.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
virtual void eatToEndOfStatement()=0
Skip to the end of the current statement, for error recovery.
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool isParsingMasm() const
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
bool parseIntToken(int64_t &V, const Twine &ErrMsg)
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual unsigned getAssemblerDialect()
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
virtual bool lookUpType(StringRef Name, AsmTypeInfo &Info) const
bool TokError(const Twine &Msg, SMRange Range=std::nullopt)
Report an error at the current lexer location.
virtual bool parseAbsoluteExpression(int64_t &Res)=0
Parse an expression which must evaluate to an absolute value.
virtual bool lookUpField(StringRef Name, AsmFieldInfo &Info) const
bool parseTokenLoc(SMLoc &Loc)
virtual MCContext & getContext()=0
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Base class for the full range of assembler expressions which are needed for parsing.
@ SymbolRef
References to labels and assigned expressions.
Instances of this class represent a single low-level machine instruction.
unsigned getNumOperands() const
unsigned getFlags() const
unsigned getOpcode() const
void setFlags(unsigned F)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
Interface to description of machine instruction set.
Instances of this class represent operands of the MCInst class.
static MCOperand createImm(int64_t Val)
unsigned getReg() const
Returns the register number.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MCTargetAsmParser - Generic interface to target specific assembly parsers.
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
@ FIRST_TARGET_MATCH_RESULT_TY
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual bool OmitRegisterFromClobberLists(unsigned RegNo)
Allows targets to let registers opt out of clobber lists.
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Ternary parse status returned by various parse* methods.
static constexpr StatusTy Failure
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
constexpr bool isValid() const
Represents a range in source code.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
char back() const
back - Get the last character in the string.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static const char * getRegisterName(MCRegister Reg)
static const X86MCExpr * create(int64_t RegNo, MCContext &Ctx)
X86 target streamer implementing x86-only assembly directives.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
std::variant< std::monostate, Loc::Single, Loc::Multi, Loc::MMI, Loc::EntryValue > Variant
Alias for the std::variant specialization base class of DbgVariable.
@ CE
Windows NT (Windows on ARM)
@ X86
Windows x64, Windows Itanium (IA-64)
Reg
All possible values of the reg field in the ModR/M byte.
bool isX86_64ExtendedReg(unsigned RegNo)
bool isX86_64NonExtLowByteReg(unsigned reg)
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isApxExtendedReg(unsigned RegNo)
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ VEX
VEX - encoding using 0xC4/0xC5.
@ ExplicitVEXPrefix
For instructions that use VEX encoding only when {vex}, {vex2} or {vex3} is present.
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
StringRef toStringRef(const std::optional< DWARFFormValue > &V, StringRef Default={})
Take an optional DWARFFormValue and try to extract a string value from it.
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
Target & getTheX86_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
@ MCAF_Code64
.code64 (X86)
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
Target & getTheX86_64Target()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isKind(IdKind kind) const
SmallVectorImpl< AsmRewrite > * AsmRewrites
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
X86Operand - Instances of this class represent a parsed X86 machine instruction.
SMLoc getStartLoc() const override
getStartLoc - Get the location of the first token of this operand.
bool isImm() const override
isImm - Is this an immediate operand?
static std::unique_ptr< X86Operand > CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, StringRef SymName=StringRef(), void *OpDecl=nullptr, bool GlobalRef=true)
static std::unique_ptr< X86Operand > CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc)
static std::unique_ptr< X86Operand > CreateDXReg(SMLoc StartLoc, SMLoc EndLoc)
SMRange getLocRange() const
getLocRange - Get the range between the first and last token of this operand.
SMLoc getEndLoc() const override
getEndLoc - Get the location of the last token of this operand.
bool isReg() const override
isReg - Is this a register operand?
bool isMem() const override
isMem - Is this a memory operand?
static std::unique_ptr< X86Operand > CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size=0, StringRef SymName=StringRef(), void *OpDecl=nullptr, unsigned FrontendSize=0, bool UseUpRegs=false, bool MaybeDirectBranchDest=true)
Create an absolute memory operand.
static std::unique_ptr< X86Operand > CreateToken(StringRef Str, SMLoc Loc)
bool isMemUnsized() const
const MCExpr * getImm() const
unsigned getMemFrontendSize() const
MCRegister getReg() const override
static std::unique_ptr< X86Operand > CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, bool AddressOf=false, SMLoc OffsetOfLoc=SMLoc(), StringRef SymName=StringRef(), void *OpDecl=nullptr)