46 "x86-experimental-lvi-inline-asm-hardening",
47 cl::desc(
"Harden inline assembly code that may be vulnerable to Load Value"
48 " Injection (LVI). This feature is experimental."),
cl::Hidden);
51 if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
52 ErrMsg =
"scale factor in address must be 1, 2, 4 or 8";
60 static const char OpPrecedence[] = {
88 unsigned ForcedDataPrefix = 0;
98 VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
101 DispEncoding_Default,
106 DispEncoding ForcedDispEncoding = DispEncoding_Default;
109 SMLoc consumeToken() {
117 assert(getParser().getStreamer().getTargetStreamer() &&
118 "do not have a target streamer");
125 bool matchingInlineAsm,
unsigned VariantID = 0) {
128 SwitchMode(X86::Is32Bit);
130 MissingFeatures, matchingInlineAsm,
133 SwitchMode(X86::Is16Bit);
137 enum InfixCalculatorTok {
162 enum IntelOperatorKind {
169 enum MasmOperatorKind {
176 class InfixCalculator {
177 typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
181 bool isUnaryOperator(InfixCalculatorTok
Op)
const {
182 return Op == IC_NEG ||
Op == IC_NOT;
186 int64_t popOperand() {
187 assert (!PostfixStack.empty() &&
"Poped an empty stack!");
189 if (!(
Op.first == IC_IMM ||
Op.first == IC_REGISTER))
193 void pushOperand(InfixCalculatorTok
Op, int64_t Val = 0) {
194 assert ((
Op == IC_IMM ||
Op == IC_REGISTER) &&
195 "Unexpected operand!");
196 PostfixStack.push_back(std::make_pair(
Op, Val));
199 void popOperator() { InfixOperatorStack.pop_back(); }
200 void pushOperator(InfixCalculatorTok
Op) {
202 if (InfixOperatorStack.empty()) {
203 InfixOperatorStack.push_back(
Op);
210 unsigned Idx = InfixOperatorStack.size() - 1;
211 InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
212 if (OpPrecedence[
Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
213 InfixOperatorStack.push_back(
Op);
219 unsigned ParenCount = 0;
222 if (InfixOperatorStack.empty())
225 Idx = InfixOperatorStack.size() - 1;
226 StackOp = InfixOperatorStack[Idx];
227 if (!(OpPrecedence[StackOp] >= OpPrecedence[
Op] || ParenCount))
232 if (!ParenCount && StackOp == IC_LPAREN)
235 if (StackOp == IC_RPAREN) {
237 InfixOperatorStack.pop_back();
238 }
else if (StackOp == IC_LPAREN) {
240 InfixOperatorStack.pop_back();
242 InfixOperatorStack.pop_back();
243 PostfixStack.push_back(std::make_pair(StackOp, 0));
247 InfixOperatorStack.push_back(
Op);
252 while (!InfixOperatorStack.empty()) {
253 InfixCalculatorTok StackOp = InfixOperatorStack.
pop_back_val();
254 if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
255 PostfixStack.push_back(std::make_pair(StackOp, 0));
258 if (PostfixStack.empty())
262 for (
unsigned i = 0,
e = PostfixStack.size();
i !=
e; ++
i) {
263 ICToken
Op = PostfixStack[
i];
264 if (
Op.first == IC_IMM ||
Op.first == IC_REGISTER) {
265 OperandStack.push_back(
Op);
266 }
else if (isUnaryOperator(
Op.first)) {
267 assert (OperandStack.size() > 0 &&
"Too few operands.");
269 assert (Operand.first == IC_IMM &&
270 "Unary operation with a register!");
276 OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
279 OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
283 assert (OperandStack.size() > 1 &&
"Too few operands.");
292 Val = Op1.second + Op2.second;
293 OperandStack.push_back(std::make_pair(IC_IMM, Val));
296 Val = Op1.second - Op2.second;
297 OperandStack.push_back(std::make_pair(IC_IMM, Val));
300 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
301 "Multiply operation with an immediate and a register!");
302 Val = Op1.second * Op2.second;
303 OperandStack.push_back(std::make_pair(IC_IMM, Val));
306 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
307 "Divide operation with an immediate and a register!");
308 assert (Op2.second != 0 &&
"Division by zero!");
309 Val = Op1.second / Op2.second;
310 OperandStack.push_back(std::make_pair(IC_IMM, Val));
313 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
314 "Modulo operation with an immediate and a register!");
315 Val = Op1.second % Op2.second;
316 OperandStack.push_back(std::make_pair(IC_IMM, Val));
319 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
320 "Or operation with an immediate and a register!");
321 Val = Op1.second | Op2.second;
322 OperandStack.push_back(std::make_pair(IC_IMM, Val));
325 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
326 "Xor operation with an immediate and a register!");
327 Val = Op1.second ^ Op2.second;
328 OperandStack.push_back(std::make_pair(IC_IMM, Val));
331 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
332 "And operation with an immediate and a register!");
333 Val = Op1.second & Op2.second;
334 OperandStack.push_back(std::make_pair(IC_IMM, Val));
337 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
338 "Left shift operation with an immediate and a register!");
339 Val = Op1.second << Op2.second;
340 OperandStack.push_back(std::make_pair(IC_IMM, Val));
343 assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
344 "Right shift operation with an immediate and a register!");
345 Val = Op1.second >> Op2.second;
346 OperandStack.push_back(std::make_pair(IC_IMM, Val));
349 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
350 "Equals operation with an immediate and a register!");
351 Val = (Op1.second == Op2.second) ? -1 : 0;
352 OperandStack.push_back(std::make_pair(IC_IMM, Val));
355 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
356 "Not-equals operation with an immediate and a register!");
357 Val = (Op1.second != Op2.second) ? -1 : 0;
358 OperandStack.push_back(std::make_pair(IC_IMM, Val));
361 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
362 "Less-than operation with an immediate and a register!");
363 Val = (Op1.second < Op2.second) ? -1 : 0;
364 OperandStack.push_back(std::make_pair(IC_IMM, Val));
367 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
368 "Less-than-or-equal operation with an immediate and a "
370 Val = (Op1.second <= Op2.second) ? -1 : 0;
371 OperandStack.push_back(std::make_pair(IC_IMM, Val));
374 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
375 "Greater-than operation with an immediate and a register!");
376 Val = (Op1.second > Op2.second) ? -1 : 0;
377 OperandStack.push_back(std::make_pair(IC_IMM, Val));
380 assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
381 "Greater-than-or-equal operation with an immediate and a "
383 Val = (Op1.second >= Op2.second) ? -1 : 0;
384 OperandStack.push_back(std::make_pair(IC_IMM, Val));
389 assert (OperandStack.size() == 1 &&
"Expected a single result.");
394 enum IntelExprState {
425 class IntelExprStateMachine {
426 IntelExprState State = IES_INIT, PrevState = IES_ERROR;
427 unsigned BaseReg = 0, IndexReg = 0, TmpReg = 0, Scale = 0;
429 const MCExpr *Sym =
nullptr;
434 bool MemExpr =
false;
435 bool OffsetOperator =
false;
436 bool AttachToOperandIdx =
false;
438 SMLoc OffsetOperatorLoc;
443 ErrMsg =
"cannot use more than one symbol in memory operand";
452 IntelExprStateMachine() =
default;
454 void addImm(int64_t imm) {
Imm += imm; }
455 short getBracCount()
const {
return BracCount; }
456 bool isMemExpr()
const {
return MemExpr; }
457 bool isOffsetOperator()
const {
return OffsetOperator; }
458 SMLoc getOffsetLoc()
const {
return OffsetOperatorLoc; }
459 unsigned getBaseReg()
const {
return BaseReg; }
460 unsigned getIndexReg()
const {
return IndexReg; }
461 unsigned getScale()
const {
return Scale; }
463 StringRef getSymName()
const {
return SymName; }
465 unsigned getSize()
const {
return CurType.
Size; }
466 unsigned getElementSize()
const {
return CurType.
ElementSize; }
467 unsigned getLength()
const {
return CurType.
Length; }
468 int64_t getImm() {
return Imm + IC.execute(); }
469 bool isValidEndState()
const {
470 return State == IES_RBRAC || State == IES_INTEGER;
477 void setAppendAfterOperand() { AttachToOperandIdx =
true; }
479 bool isPIC()
const {
return IsPIC; }
480 void setPIC() { IsPIC =
true; }
482 bool hadError()
const {
return State == IES_ERROR; }
488 if (IsPIC && AttachToOperandIdx)
489 ErrMsg =
"Don't use 2 or more regs for mem offset in PIC model!";
491 ErrMsg =
"BaseReg/IndexReg already set!";
496 IntelExprState CurrState = State;
505 IC.pushOperator(IC_OR);
508 PrevState = CurrState;
511 IntelExprState CurrState = State;
520 IC.pushOperator(IC_XOR);
523 PrevState = CurrState;
526 IntelExprState CurrState = State;
535 IC.pushOperator(IC_AND);
538 PrevState = CurrState;
541 IntelExprState CurrState = State;
550 IC.pushOperator(IC_EQ);
553 PrevState = CurrState;
556 IntelExprState CurrState = State;
565 IC.pushOperator(IC_NE);
568 PrevState = CurrState;
571 IntelExprState CurrState = State;
580 IC.pushOperator(IC_LT);
583 PrevState = CurrState;
586 IntelExprState CurrState = State;
595 IC.pushOperator(IC_LE);
598 PrevState = CurrState;
601 IntelExprState CurrState = State;
610 IC.pushOperator(IC_GT);
613 PrevState = CurrState;
616 IntelExprState CurrState = State;
625 IC.pushOperator(IC_GE);
628 PrevState = CurrState;
631 IntelExprState CurrState = State;
640 IC.pushOperator(IC_LSHIFT);
643 PrevState = CurrState;
646 IntelExprState CurrState = State;
655 IC.pushOperator(IC_RSHIFT);
658 PrevState = CurrState;
661 IntelExprState CurrState = State;
671 IC.pushOperator(IC_PLUS);
672 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
679 return regsUseUpError(ErrMsg);
686 PrevState = CurrState;
690 IntelExprState CurrState = State;
721 if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
722 CurrState == IES_INTEGER || CurrState == IES_RBRAC ||
723 CurrState == IES_OFFSET)
724 IC.pushOperator(IC_MINUS);
725 else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
727 ErrMsg =
"Scale can't be negative";
730 IC.pushOperator(IC_NEG);
731 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
738 return regsUseUpError(ErrMsg);
745 PrevState = CurrState;
749 IntelExprState CurrState = State;
775 IC.pushOperator(IC_NOT);
778 PrevState = CurrState;
781 IntelExprState CurrState = State;
789 State = IES_REGISTER;
791 IC.pushOperand(IC_REGISTER);
795 if (PrevState == IES_INTEGER) {
797 return regsUseUpError(ErrMsg);
798 State = IES_REGISTER;
801 Scale = IC.popOperand();
804 IC.pushOperand(IC_IMM);
811 PrevState = CurrState;
819 if (ParsingMSInlineAsm)
823 if (
auto *CE = dyn_cast<MCConstantExpr>(SymRef))
824 return onInteger(
CE->getValue(), ErrMsg);
837 if (setSymRef(SymRef, SymRefName, ErrMsg))
841 IC.pushOperand(IC_IMM);
842 if (ParsingMSInlineAsm)
849 bool onInteger(int64_t TmpInt,
StringRef &ErrMsg) {
850 IntelExprState CurrState = State;
876 if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
879 return regsUseUpError(ErrMsg);
887 IC.pushOperand(IC_IMM, TmpInt);
891 PrevState = CurrState;
903 State = IES_MULTIPLY;
904 IC.pushOperator(IC_MULTIPLY);
917 IC.pushOperator(IC_DIVIDE);
930 IC.pushOperator(IC_MOD);
946 IC.pushOperator(IC_PLUS);
952 assert(!BracCount &&
"BracCount should be zero on parsing's start");
961 IntelExprState CurrState = State;
970 if (BracCount-- != 1) {
971 ErrMsg =
"unexpected bracket encountered";
975 if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
982 return regsUseUpError(ErrMsg);
989 PrevState = CurrState;
993 IntelExprState CurrState = State;
1019 IC.pushOperator(IC_LPAREN);
1022 PrevState = CurrState;
1036 IC.pushOperator(IC_RPAREN);
1042 bool ParsingMSInlineAsm,
StringRef &ErrMsg) {
1046 ErrMsg =
"unexpected offset operator expression";
1051 if (setSymRef(Val,
ID, ErrMsg))
1053 OffsetOperator =
true;
1054 OffsetOperatorLoc = OffsetLoc;
1058 IC.pushOperand(IC_IMM);
1059 if (ParsingMSInlineAsm) {
1082 bool MatchingInlineAsm =
false) {
1084 if (MatchingInlineAsm) {
1085 if (!getLexer().isAtStartOfStatement())
1089 return Parser.
Error(L,
Msg, Range);
1094 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc,
1095 bool RestoreOnFailure);
1097 std::unique_ptr<X86Operand> DefaultMemSIOperand(
SMLoc Loc);
1098 std::unique_ptr<X86Operand> DefaultMemDIOperand(
SMLoc Loc);
1099 bool IsSIReg(
unsigned Reg);
1100 unsigned GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
bool IsSIReg);
1103 std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1104 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1112 bool ParseIntelDotOperator(IntelExprStateMachine &SM,
SMLoc &End);
1114 unsigned ParseIntelInlineAsmOperator(
unsigned OpKind);
1116 bool ParseMasmOperator(
unsigned OpKind, int64_t &Val);
1118 bool ParseIntelNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1119 bool &ParseError,
SMLoc &End);
1120 bool ParseMasmNamedOperator(
StringRef Name, IntelExprStateMachine &SM,
1121 bool &ParseError,
SMLoc &End);
1122 void RewriteIntelExpression(IntelExprStateMachine &SM,
SMLoc Start,
1124 bool ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &End);
1125 bool ParseIntelInlineAsmIdentifier(
const MCExpr *&Val,
StringRef &Identifier,
1127 bool IsUnevaluatedOperand,
SMLoc &End,
1128 bool IsParsingOffsetOperator =
false);
1130 IntelExprStateMachine &SM);
1132 bool ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
SMLoc StartLoc,
1137 bool ParseIntelMemoryOperandSize(
unsigned &Size);
1138 bool CreateMemForMSInlineAsm(
unsigned SegReg,
const MCExpr *Disp,
1139 unsigned BaseReg,
unsigned IndexReg,
1145 bool parseDirectiveArch();
1146 bool parseDirectiveNops(
SMLoc L);
1147 bool parseDirectiveEven(
SMLoc L);
1151 bool parseDirectiveFPOProc(
SMLoc L);
1152 bool parseDirectiveFPOSetFrame(
SMLoc L);
1153 bool parseDirectiveFPOPushReg(
SMLoc L);
1154 bool parseDirectiveFPOStackAlloc(
SMLoc L);
1155 bool parseDirectiveFPOStackAlign(
SMLoc L);
1156 bool parseDirectiveFPOEndPrologue(
SMLoc L);
1157 bool parseDirectiveFPOEndProc(
SMLoc L);
1160 bool parseSEHRegisterNumber(
unsigned RegClassID,
unsigned &RegNo);
1161 bool parseDirectiveSEHPushReg(
SMLoc);
1162 bool parseDirectiveSEHSetFrame(
SMLoc);
1163 bool parseDirectiveSEHSaveReg(
SMLoc);
1164 bool parseDirectiveSEHSaveXMM(
SMLoc);
1165 bool parseDirectiveSEHPushFrame(
SMLoc);
1167 unsigned checkTargetMatchPredicate(
MCInst &Inst)
override;
1173 void emitWarningForSpecialLVIInstruction(
SMLoc Loc);
1181 bool MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
1184 bool MatchingInlineAsm)
override;
1190 bool MatchingInlineAsm);
1192 bool MatchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &Opcode,
1195 bool MatchingInlineAsm);
1197 bool MatchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &Opcode,
1200 bool MatchingInlineAsm);
1202 bool OmitRegisterFromClobberLists(
unsigned RegNo)
override;
1209 bool ParseZ(std::unique_ptr<X86Operand> &Z,
const SMLoc &StartLoc);
1211 bool is64BitMode()
const {
1213 return getSTI().getFeatureBits()[X86::Is64Bit];
1215 bool is32BitMode()
const {
1217 return getSTI().getFeatureBits()[X86::Is32Bit];
1219 bool is16BitMode()
const {
1221 return getSTI().getFeatureBits()[X86::Is16Bit];
1223 void SwitchMode(
unsigned mode) {
1225 FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
1229 setAvailableFeatures(FB);
1234 unsigned getPointerWidth() {
1235 if (is16BitMode())
return 16;
1236 if (is32BitMode())
return 32;
1237 if (is64BitMode())
return 64;
1241 bool isParsingIntelSyntax() {
1242 return getParser().getAssemblerDialect();
1248 #define GET_ASSEMBLER_HEADER
1249 #include "X86GenAsmMatcher.inc"
1254 enum X86MatchResultTy {
1255 Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
1256 #define GET_OPERAND_DIAGNOSTIC_TYPES
1257 #include "X86GenAsmMatcher.inc"
1268 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
1271 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc)
override;
1273 SMLoc &EndLoc)
override;
1275 bool parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc)
override;
1280 bool ParseDirective(
AsmToken DirectiveID)
override;
1292 unsigned Scale,
bool Is64BitMode,
1299 !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1300 X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) ||
1301 X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) ||
1302 X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg))) {
1303 ErrMsg =
"invalid base+index expression";
1307 if (IndexReg != 0 &&
1308 !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1309 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1310 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1311 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1312 X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
1313 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
1314 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg))) {
1315 ErrMsg =
"invalid base+index expression";
1319 if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
1320 IndexReg == X86::EIP || IndexReg == X86::RIP ||
1321 IndexReg ==
X86::ESP || IndexReg == X86::RSP) {
1322 ErrMsg =
"invalid base+index expression";
1328 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
1329 (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1330 BaseReg !=
X86::SI && BaseReg != X86::DI))) {
1331 ErrMsg =
"invalid 16-bit base register";
1336 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg)) {
1337 ErrMsg =
"16-bit memory operand may not include only index register";
1341 if (BaseReg != 0 && IndexReg != 0) {
1342 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(BaseReg) &&
1343 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1344 X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1345 IndexReg == X86::EIZ)) {
1346 ErrMsg =
"base register is 64-bit, but index register is not";
1349 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(BaseReg) &&
1350 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg) ||
1351 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg) ||
1352 IndexReg == X86::RIZ)) {
1353 ErrMsg =
"base register is 32-bit, but index register is not";
1356 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg)) {
1357 if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(IndexReg) ||
1358 X86MCRegisterClasses[X86::GR64RegClassID].
contains(IndexReg)) {
1359 ErrMsg =
"base register is 16-bit, but index register is not";
1362 if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1363 (IndexReg !=
X86::SI && IndexReg != X86::DI)) {
1364 ErrMsg =
"invalid 16-bit base/index register combination";
1371 if (!Is64BitMode && BaseReg != 0 &&
1372 (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1373 ErrMsg =
"IP-relative addressing requires 64-bit mode";
1394 if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1395 (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1398 if (!is64BitMode()) {
1402 if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1403 X86MCRegisterClasses[X86::GR64RegClassID].
contains(RegNo) ||
1406 return Error(StartLoc,
1407 "register %" +
RegName +
" is only available in 64-bit mode",
1414 if (RegNo == 0 &&
RegName.startswith(
"db")) {
1473 if (isParsingIntelSyntax())
1475 return Error(StartLoc,
"invalid register name",
SMRange(StartLoc, EndLoc));
1480 bool X86AsmParser::ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
1481 SMLoc &EndLoc,
bool RestoreOnFailure) {
1487 auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1488 if (RestoreOnFailure) {
1489 while (!Tokens.empty()) {
1496 StartLoc = PercentTok.
getLoc();
1501 Tokens.push_back(PercentTok);
1510 if (isParsingIntelSyntax())
return true;
1511 return Error(StartLoc,
"invalid register name",
1515 if (MatchRegisterByName(RegNo, Tok.
getString(), StartLoc, EndLoc)) {
1521 if (RegNo == X86::ST0) {
1522 Tokens.push_back(Tok);
1529 Tokens.push_back(Parser.
getTok());
1535 return Error(IntTok.
getLoc(),
"expected stack index");
1538 case 0: RegNo = X86::ST0;
break;
1539 case 1: RegNo = X86::ST1;
break;
1540 case 2: RegNo = X86::ST2;
break;
1541 case 3: RegNo = X86::ST3;
break;
1542 case 4: RegNo = X86::ST4;
break;
1543 case 5: RegNo = X86::ST5;
break;
1544 case 6: RegNo = X86::ST6;
break;
1545 case 7: RegNo = X86::ST7;
break;
1548 return Error(IntTok.
getLoc(),
"invalid stack index");
1552 Tokens.push_back(IntTok);
1568 if (isParsingIntelSyntax())
return true;
1569 return Error(StartLoc,
"invalid register name",
1577 bool X86AsmParser::ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
1579 return ParseRegister(RegNo, StartLoc, EndLoc,
false);
1586 ParseRegister(RegNo, StartLoc, EndLoc,
true);
1587 bool PendingErrors = getParser().hasPendingError();
1588 getParser().clearPendingErrors();
1596 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(
SMLoc Loc) {
1597 bool Parse32 = is32BitMode() || Code16GCC;
1598 unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ?
X86::ESI :
X86::SI);
1605 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(
SMLoc Loc) {
1606 bool Parse32 = is32BitMode() || Code16GCC;
1607 unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ?
X86::EDI : X86::DI);
1614 bool X86AsmParser::IsSIReg(
unsigned Reg) {
1628 unsigned X86AsmParser::GetSIDIForRegClass(
unsigned RegClassID,
unsigned Reg,
1630 switch (RegClassID) {
1632 case X86::GR64RegClassID:
1633 return IsSIReg ? X86::RSI : X86::RDI;
1634 case X86::GR32RegClassID:
1636 case X86::GR16RegClassID:
1637 return IsSIReg ?
X86::SI : X86::DI;
1641 void X86AsmParser::AddDefaultSrcDestOperands(
1643 std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1644 if (isParsingIntelSyntax()) {
1654 bool X86AsmParser::VerifyAndAdjustOperands(
OperandVector &OrigOperands,
1657 if (OrigOperands.size() > 1) {
1659 assert(OrigOperands.size() == FinalOperands.size() + 1 &&
1660 "Operand size mismatch");
1664 int RegClassID = -1;
1665 for (
unsigned int i = 0;
i < FinalOperands.size(); ++
i) {
1669 if (FinalOp.
isReg() &&
1674 if (FinalOp.
isMem()) {
1676 if (!OrigOp.
isMem())
1680 unsigned OrigReg = OrigOp.
Mem.BaseReg;
1681 unsigned FinalReg = FinalOp.
Mem.BaseReg;
1685 if (RegClassID != -1 &&
1686 !X86MCRegisterClasses[RegClassID].
contains(OrigReg)) {
1688 "mismatching source and destination index registers");
1691 if (X86MCRegisterClasses[X86::GR64RegClassID].
contains(OrigReg))
1692 RegClassID = X86::GR64RegClassID;
1693 else if (X86MCRegisterClasses[X86::GR32RegClassID].
contains(OrigReg))
1694 RegClassID = X86::GR32RegClassID;
1695 else if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(OrigReg))
1696 RegClassID = X86::GR16RegClassID;
1702 bool IsSI = IsSIReg(FinalReg);
1703 FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
1705 if (FinalReg != OrigReg) {
1706 std::string
RegName = IsSI ?
"ES:(R|E)SI" :
"ES:(R|E)DI";
1707 Warnings.push_back(std::make_pair(
1709 "memory operand is only for determining the size, " +
RegName +
1710 " will be used for the location"));
1713 FinalOp.
Mem.Size = OrigOp.
Mem.Size;
1714 FinalOp.
Mem.SegReg = OrigOp.
Mem.SegReg;
1715 FinalOp.
Mem.BaseReg = FinalReg;
1721 for (
auto &WarningMsg : Warnings) {
1722 Warning(WarningMsg.first, WarningMsg.second);
1726 for (
unsigned int i = 0;
i < FinalOperands.size(); ++
i)
1727 OrigOperands.pop_back();
1730 for (
unsigned int i = 0;
i < FinalOperands.size(); ++
i)
1731 OrigOperands.push_back(
std::move(FinalOperands[
i]));
1737 if (isParsingIntelSyntax())
1743 bool X86AsmParser::CreateMemForMSInlineAsm(
1744 unsigned SegReg,
const MCExpr *Disp,
unsigned BaseReg,
unsigned IndexReg,
1752 Size = getPointerWidth();
1759 End, Size, Identifier,
1766 unsigned FrontendSize = 0;
1767 void *Decl =
nullptr;
1768 bool IsGlobalLV =
false;
1771 FrontendSize =
Info.Var.Type * 8;
1772 Decl =
Info.Var.Decl;
1773 IsGlobalLV =
Info.Var.IsGlobalLV;
1777 if (IsGlobalLV && (BaseReg || IndexReg)) {
1779 End, Size, Identifier, Decl, 0,
1780 BaseReg && IndexReg));
1786 BaseReg = BaseReg ? BaseReg : 1;
1788 getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start, End,
1790 X86::RIP, Identifier, Decl, FrontendSize));
1798 IntelExprStateMachine &SM,
1799 bool &ParseError,
SMLoc &End) {
1803 !getParser().isParsingMasm())
1805 if (
Name.equals_insensitive(
"not")) {
1807 }
else if (
Name.equals_insensitive(
"or")) {
1809 }
else if (
Name.equals_insensitive(
"shl")) {
1811 }
else if (
Name.equals_insensitive(
"shr")) {
1813 }
else if (
Name.equals_insensitive(
"xor")) {
1815 }
else if (
Name.equals_insensitive(
"and")) {
1817 }
else if (
Name.equals_insensitive(
"mod")) {
1819 }
else if (
Name.equals_insensitive(
"offset")) {
1820 SMLoc OffsetLoc = getTok().getLoc();
1821 const MCExpr *Val =
nullptr;
1824 ParseError = ParseIntelOffsetOperator(Val,
ID,
Info, End);
1829 SM.onOffset(Val, OffsetLoc,
ID,
Info, isParsingMSInlineAsm(), ErrMsg);
1835 if (!
Name.equals_insensitive(
"offset"))
1836 End = consumeToken();
1840 IntelExprStateMachine &SM,
1841 bool &ParseError,
SMLoc &End) {
1842 if (
Name.equals_insensitive(
"eq")) {
1844 }
else if (
Name.equals_insensitive(
"ne")) {
1846 }
else if (
Name.equals_insensitive(
"lt")) {
1848 }
else if (
Name.equals_insensitive(
"le")) {
1850 }
else if (
Name.equals_insensitive(
"gt")) {
1852 }
else if (
Name.equals_insensitive(
"ge")) {
1857 End = consumeToken();
1864 IntelExprStateMachine &SM) {
1868 SM.setAppendAfterOperand();
1871 bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM,
SMLoc &End) {
1877 if (getContext().getObjectFileInfo()->isPositionIndependent())
1886 bool UpdateLocLex =
true;
1891 if ((Done = SM.isValidEndState()))
1893 return Error(Tok.
getLoc(),
"unknown token in expression");
1895 return Error(getLexer().getErrLoc(), getLexer().getErr());
1902 UpdateLocLex =
false;
1903 if (ParseIntelDotOperator(SM, End))
1908 if ((Done = SM.isValidEndState()))
1910 return Error(Tok.
getLoc(),
"unknown token in expression");
1914 UpdateLocLex =
false;
1915 if (ParseIntelDotOperator(SM, End))
1920 if ((Done = SM.isValidEndState()))
1922 return Error(Tok.
getLoc(),
"unknown token in expression");
1933 UpdateLocLex =
false;
1934 if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1935 return Error(ValueLoc,
"expected absolute value");
1936 if (SM.onInteger(Res, ErrMsg))
1937 return Error(ValueLoc, ErrMsg);
1946 UpdateLocLex =
false;
1948 size_t DotOffset =
Identifier.find_first_of(
'.');
1966 const AsmToken &NextTok = getLexer().peekTok();
1975 End = consumeToken();
1982 if (!ParseRegister(
Reg, IdentLoc, End,
true)) {
1983 if (SM.onRegister(
Reg, ErrMsg))
1984 return Error(IdentLoc, ErrMsg);
1988 const std::pair<StringRef, StringRef> IDField =
1992 if (!
Field.empty() &&
1993 !MatchRegisterByName(
Reg,
ID, IdentLoc, IDEndLoc)) {
1994 if (SM.onRegister(
Reg, ErrMsg))
1995 return Error(IdentLoc, ErrMsg);
2000 return Error(FieldStartLoc,
"unknown offset");
2001 else if (SM.onPlus(ErrMsg))
2002 return Error(getTok().getLoc(), ErrMsg);
2003 else if (SM.onInteger(
Info.Offset, ErrMsg))
2004 return Error(IdentLoc, ErrMsg);
2005 SM.setTypeInfo(
Info.Type);
2007 End = consumeToken();
2013 bool ParseError =
false;
2014 if (ParseIntelNamedOperator(Identifier, SM, ParseError, End)) {
2020 ParseMasmNamedOperator(Identifier, SM, ParseError, End)) {
2033 if (ParseIntelDotOperator(SM, End))
2038 if (isParsingMSInlineAsm()) {
2040 if (
unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2041 if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2042 if (SM.onInteger(Val, ErrMsg))
2043 return Error(IdentLoc, ErrMsg);
2052 return Error(IdentLoc,
"expected identifier");
2053 if (ParseIntelInlineAsmIdentifier(Val, Identifier,
Info,
false, End))
2055 else if (SM.onIdentifierExpr(Val, Identifier,
Info, FieldInfo.
Type,
2057 return Error(IdentLoc, ErrMsg);
2061 if (
unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2063 if (ParseMasmOperator(OpKind, Val))
2065 if (SM.onInteger(Val, ErrMsg))
2066 return Error(IdentLoc, ErrMsg);
2069 if (!getParser().lookUpType(Identifier, FieldInfo.
Type)) {
2075 getParser().parseIdentifier(Identifier);
2079 if (getParser().lookUpField(FieldInfo.
Type.
Name, Identifier,
2083 return Error(IdentLoc,
"Unable to lookup field reference!",
2089 if (SM.onInteger(FieldInfo.
Offset, ErrMsg))
2090 return Error(IdentLoc, ErrMsg);
2094 if (getParser().parsePrimaryExpr(Val, End, &FieldInfo.
Type)) {
2095 return Error(Tok.
getLoc(),
"Unexpected identifier!");
2096 }
else if (SM.onIdentifierExpr(Val, Identifier,
Info, FieldInfo.
Type,
2098 return Error(IdentLoc, ErrMsg);
2104 SMLoc Loc = getTok().getLoc();
2105 int64_t
IntVal = getTok().getIntVal();
2106 End = consumeToken();
2107 UpdateLocLex =
false;
2110 if (IDVal ==
"f" || IDVal ==
"b") {
2112 getContext().getDirectionalLocalSymbol(
IntVal, IDVal ==
"b");
2117 return Error(Loc,
"invalid reference to undefined symbol");
2121 if (SM.onIdentifierExpr(Val, Identifier,
Info,
Type,
2122 isParsingMSInlineAsm(), ErrMsg))
2123 return Error(Loc, ErrMsg);
2124 End = consumeToken();
2126 if (SM.onInteger(
IntVal, ErrMsg))
2127 return Error(Loc, ErrMsg);
2130 if (SM.onInteger(
IntVal, ErrMsg))
2131 return Error(Loc, ErrMsg);
2136 if (SM.onPlus(ErrMsg))
2137 return Error(getTok().getLoc(), ErrMsg);
2140 if (SM.onMinus(ErrMsg))
2141 return Error(getTok().getLoc(), ErrMsg);
2151 SM.onLShift();
break;
2153 SM.onRShift();
break;
2156 return Error(Tok.
getLoc(),
"unexpected bracket encountered");
2157 tryParseOperandIdx(PrevTK, SM);
2160 if (SM.onRBrac(ErrMsg)) {
2168 return Error(Tok.
getLoc(),
"unknown token in expression");
2170 if (!Done && UpdateLocLex)
2171 End = consumeToken();
2178 void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2181 unsigned ExprLen = End.getPointer() - Start.getPointer();
2183 if (SM.getSym() && !SM.isOffsetOperator()) {
2185 if (
unsigned Len = SymName.
data() - Start.getPointer())
2188 ExprLen = End.getPointer() - (SymName.
data() + SymName.
size());
2191 if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2201 if (SM.getBaseReg())
2203 if (SM.getIndexReg())
2205 if (SM.isOffsetOperator())
2206 OffsetNameStr = SM.getSymName();
2208 IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2209 SM.getImm(), SM.isMemExpr());
2210 InstInfo->
AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2214 bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2216 bool IsUnevaluatedOperand,
SMLoc &End,
bool IsParsingOffsetOperator) {
2218 assert(isParsingMSInlineAsm() &&
"Expected to be parsing inline assembly.");
2222 SemaCallback->LookupInlineAsmIdentifier(LineBuf,
Info, IsUnevaluatedOperand);
2233 }
while (End.getPointer() < EndPtr);
2238 assert((End.getPointer() == EndPtr ||
2240 "frontend claimed part of a token?");
2246 SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2248 assert(InternalName.
size() &&
"We should have an internal name here.");
2251 if (!IsParsingOffsetOperator)
2259 MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
2270 const SMLoc consumedToken = consumeToken();
2272 return Error(Tok.
getLoc(),
"Expected an identifier after {");
2281 return Error(Tok.
getLoc(),
"Invalid rounding mode.");
2284 return Error(Tok.
getLoc(),
"Expected - at this point");
2288 return Error(Tok.
getLoc(),
"Expected } at this point");
2291 const MCExpr *RndModeOp =
2299 return Error(Tok.
getLoc(),
"Expected } at this point");
2304 return Error(Tok.
getLoc(),
"unknown token in expression");
2308 bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2324 }
else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2327 TrailingDot = DotDispStr.
substr(DotDispStr.
size() - 1);
2330 const std::pair<StringRef, StringRef> BaseMember = DotDispStr.
split(
'.');
2332 if (getParser().lookUpField(SM.getType(), DotDispStr,
Info) &&
2333 getParser().lookUpField(SM.getSymName(), DotDispStr,
Info) &&
2334 getParser().lookUpField(DotDispStr,
Info) &&
2336 SemaCallback->LookupInlineAsmField(
Base, Member,
Info.Offset)))
2337 return Error(Tok.
getLoc(),
"Unable to lookup field reference!");
2339 return Error(Tok.
getLoc(),
"Unexpected token type!");
2344 const char *DotExprEndLoc = DotDispStr.
data() + DotDispStr.
size();
2347 if (!TrailingDot.
empty())
2349 SM.addImm(
Info.Offset);
2350 SM.setTypeInfo(
Info.Type);
2360 SMLoc Start = Lex().getLoc();
2361 ID = getTok().getString();
2362 if (!isParsingMSInlineAsm()) {
2365 getParser().parsePrimaryExpr(Val, End,
nullptr))
2366 return Error(Start,
"unexpected token!");
2367 }
else if (ParseIntelInlineAsmIdentifier(Val,
ID,
Info,
false, End,
true)) {
2368 return Error(Start,
"unable to lookup expression");
2370 return Error(Start,
"offset operator cannot yet handle constants");
2377 unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(
StringRef Name) {
2379 .
Cases(
"TYPE",
"type",IOK_TYPE)
2380 .
Cases(
"SIZE",
"size",IOK_SIZE)
2381 .
Cases(
"LENGTH",
"length",IOK_LENGTH)
2391 unsigned X86AsmParser::ParseIntelInlineAsmOperator(
unsigned OpKind) {
2396 const MCExpr *Val =
nullptr;
2400 if (ParseIntelInlineAsmIdentifier(Val, Identifier,
Info,
2405 Error(Start,
"unable to lookup expression");
2412 case IOK_LENGTH: CVal =
Info.Var.Length;
break;
2413 case IOK_SIZE: CVal =
Info.Var.Size;
break;
2414 case IOK_TYPE: CVal =
Info.Var.Type;
break;
2422 unsigned X86AsmParser::IdentifyMasmOperator(
StringRef Name) {
2424 .
Case(
"type", MOK_TYPE)
2425 .
Cases(
"size",
"sizeof", MOK_SIZEOF)
2426 .
Cases(
"length",
"lengthof", MOK_LENGTHOF)
2436 bool X86AsmParser::ParseMasmOperator(
unsigned OpKind, int64_t &Val) {
2442 if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2445 const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.
getTok();
2461 IntelExprStateMachine SM;
2463 if (ParseIntelExpression(SM, End))
2473 Val = SM.getLength();
2476 Val = SM.getElementSize();
2481 return Error(OpLoc,
"expression has unknown type",
SMRange(Start, End));
2487 bool X86AsmParser::ParseIntelMemoryOperandSize(
unsigned &Size) {
2489 .
Cases(
"BYTE",
"byte", 8)
2490 .
Cases(
"WORD",
"word", 16)
2491 .
Cases(
"DWORD",
"dword", 32)
2492 .
Cases(
"FLOAT",
"float", 32)
2493 .
Cases(
"LONG",
"long", 32)
2494 .
Cases(
"FWORD",
"fword", 48)
2495 .
Cases(
"DOUBLE",
"double", 64)
2496 .
Cases(
"QWORD",
"qword", 64)
2497 .
Cases(
"MMWORD",
"mmword", 64)
2498 .
Cases(
"XWORD",
"xword", 80)
2499 .
Cases(
"TBYTE",
"tbyte", 80)
2500 .
Cases(
"XMMWORD",
"xmmword", 128)
2501 .
Cases(
"YMMWORD",
"ymmword", 256)
2502 .
Cases(
"ZMMWORD",
"zmmword", 512)
2507 return Error(Tok.
getLoc(),
"Expected 'PTR' or 'ptr' token!");
2520 if (ParseIntelMemoryOperandSize(Size))
2522 bool PtrInOperand = bool(Size);
2528 return ParseRoundingModeOp(Start,
Operands);
2533 if (RegNo == X86::RIP)
2534 return Error(Start,
"rip can only be used as a base register");
2538 return Error(Start,
"expected memory operand after 'ptr', "
2539 "found register operand instead");
2544 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(RegNo))
2545 return Error(Start,
"invalid segment register");
2547 Start = Lex().getLoc();
2551 IntelExprStateMachine SM;
2552 if (ParseIntelExpression(SM, End))
2555 if (isParsingMSInlineAsm())
2556 RewriteIntelExpression(SM, Start, Tok.
getLoc());
2558 int64_t
Imm = SM.getImm();
2559 const MCExpr *Disp = SM.getSym();
2568 if (!SM.isMemExpr() && !RegNo) {
2569 if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2575 SM.getSymName(),
Info.Var.Decl,
2576 Info.Var.IsGlobalLV));
2586 unsigned BaseReg = SM.getBaseReg();
2587 unsigned IndexReg = SM.getIndexReg();
2588 if (IndexReg && BaseReg == X86::RIP)
2590 unsigned Scale = SM.getScale();
2592 Size = SM.getElementSize() << 3;
2594 if (Scale == 0 && BaseReg !=
X86::ESP && BaseReg != X86::RSP &&
2595 (IndexReg ==
X86::ESP || IndexReg == X86::RSP))
2601 !(X86MCRegisterClasses[X86::VR128XRegClassID].
contains(IndexReg) ||
2602 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(IndexReg) ||
2603 X86MCRegisterClasses[X86::VR512RegClassID].
contains(IndexReg)) &&
2604 (X86MCRegisterClasses[X86::VR128XRegClassID].
contains(BaseReg) ||
2605 X86MCRegisterClasses[X86::VR256XRegClassID].
contains(BaseReg) ||
2606 X86MCRegisterClasses[X86::VR512RegClassID].
contains(BaseReg)))
2610 X86MCRegisterClasses[X86::GR16RegClassID].
contains(IndexReg))
2611 return Error(Start,
"16-bit addresses cannot have a scale");
2620 if ((BaseReg ==
X86::SI || BaseReg == X86::DI) &&
2621 (IndexReg == X86::BX || IndexReg == X86::BP))
2624 if ((BaseReg || IndexReg) &&
2627 return Error(Start, ErrMsg);
2628 if (isParsingMSInlineAsm())
2629 return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale, Start,
2630 End, Size, SM.getSymName(),
2635 unsigned DefaultBaseReg = X86::NoRegister;
2636 bool MaybeDirectBranchDest =
true;
2639 bool IsUnconditionalBranch =
2640 Name.equals_insensitive(
"jmp") ||
Name.equals_insensitive(
"call");
2641 if (is64BitMode() && SM.getElementSize() > 0) {
2642 DefaultBaseReg = X86::RIP;
2644 if (IsUnconditionalBranch) {
2646 MaybeDirectBranchDest =
false;
2648 DefaultBaseReg = X86::RIP;
2649 }
else if (!BaseReg && !IndexReg && Disp &&
2651 if (is64BitMode()) {
2652 if (SM.getSize() == 8) {
2653 MaybeDirectBranchDest =
false;
2654 DefaultBaseReg = X86::RIP;
2657 if (SM.getSize() == 4 || SM.getSize() == 2)
2658 MaybeDirectBranchDest =
false;
2664 if ((BaseReg || IndexReg || RegNo || DefaultBaseReg != X86::NoRegister))
2666 getPointerWidth(), RegNo, Disp, BaseReg, IndexReg, Scale, Start, End,
2667 Size, DefaultBaseReg,
StringRef(),
nullptr,
2668 0,
false, MaybeDirectBranchDest));
2671 getPointerWidth(), Disp, Start, End, Size,
StringRef(),
2673 MaybeDirectBranchDest));
2679 switch (getLexer().getKind()) {
2689 "expected immediate expression") ||
2690 getParser().parseExpression(Val, End) ||
2691 check(isa<X86MCExpr>(Val), L,
"expected immediate expression"))
2698 return ParseRoundingModeOp(Start,
Operands);
2707 const MCExpr *Expr =
nullptr;
2713 if (
auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2716 Reg = RE->getRegNo();
2719 if (
Reg == X86::EIZ ||
Reg == X86::RIZ)
2721 Loc,
"%eiz and %riz can only be used as index registers",
2723 if (
Reg == X86::RIP)
2724 return Error(Loc,
"%rip can only be used as a base register",
2731 if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].
contains(
Reg))
2732 return Error(Loc,
"invalid segment register");
2740 return ParseMemOperand(
Reg, Expr, Loc, EndLoc,
Operands);
2770 bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2771 const SMLoc &StartLoc) {
2777 (getLexer().getTok().getIdentifier() ==
"z")))
2782 return Error(getLexer().getLoc(),
"Expected } at this point");
2794 const SMLoc consumedToken = consumeToken();
2798 if (getLexer().getTok().getIntVal() != 1)
2799 return TokError(
"Expected 1to<NUM> at this point");
2803 return TokError(
"Expected 1to<NUM> at this point");
2806 StringRef BroadcastString = (
Prefix + getLexer().getTok().getIdentifier())
2809 return TokError(
"Expected 1to<NUM> at this point");
2810 const char *BroadcastPrimitive =
2812 .
Case(
"1to2",
"{1to2}")
2813 .
Case(
"1to4",
"{1to4}")
2814 .
Case(
"1to8",
"{1to8}")
2815 .
Case(
"1to16",
"{1to16}")
2816 .
Case(
"1to32",
"{1to32}")
2818 if (!BroadcastPrimitive)
2819 return TokError(
"Invalid memory broadcast primitive.");
2822 return TokError(
"Expected } at this point");
2833 std::unique_ptr<X86Operand>
Z;
2834 if (ParseZ(Z, consumedToken))
2840 SMLoc StartLoc =
Z ? consumeToken() : consumedToken;
2845 if (!ParseRegister(RegNo, RegLoc, StartLoc) &&
2846 X86MCRegisterClasses[X86::VK1RegClassID].
contains(RegNo)) {
2847 if (RegNo == X86::K0)
2848 return Error(RegLoc,
"Register k0 can't be used as write mask");
2850 return Error(getLexer().getLoc(),
"Expected } at this point");
2856 return Error(getLexer().getLoc(),
2857 "Expected an op-mask register at this point");
2862 if (ParseZ(Z, consumeToken()) || !Z)
2863 return Error(getLexer().getLoc(),
2864 "Expected a {z} mark at this point");
2880 bool X86AsmParser::ParseMemOperand(
unsigned SegReg,
const MCExpr *Disp,
2901 auto isAtMemOperand = [
this]() {
2906 auto TokCount = this->getLexer().peekTokens(Buf,
true);
2909 switch (Buf[0].getKind()) {
2916 if ((TokCount > 1) &&
2918 (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2920 Buf[1].getIdentifier().
size() + 1);
2931 MCSymbol *Sym = this->getContext().getOrCreateSymbol(
Id);
2934 return isa<X86MCExpr>(V);
2942 if (!isAtMemOperand()) {
2945 assert(!isa<X86MCExpr>(Disp) &&
"Expected non-register here.");
2961 0, 0, 1, StartLoc, EndLoc));
2967 unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
2968 SMLoc BaseLoc = getLexer().getLoc();
2975 check(!isa<X86MCExpr>(
E), BaseLoc,
"expected register here"))
2979 BaseReg = cast<X86MCExpr>(
E)->getRegNo();
2980 if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
2981 return Error(BaseLoc,
"eiz and riz can only be used as index registers",
2996 if (!isa<X86MCExpr>(
E)) {
3000 if (!
E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
3001 return Error(Loc,
"expected absolute expression");
3003 Warning(Loc,
"scale factor without index register is ignored");
3006 IndexReg = cast<X86MCExpr>(
E)->getRegNo();
3008 if (BaseReg == X86::RIP)
3010 "%rip as base register can not have an index register");
3011 if (IndexReg == X86::RIP)
3012 return Error(Loc,
"%rip is not allowed as an index register");
3023 return Error(Loc,
"expected scale expression");
3024 Scale = (unsigned)ScaleVal;
3026 if (X86MCRegisterClasses[X86::GR16RegClassID].
contains(BaseReg) &&
3028 return Error(Loc,
"scale factor in 16-bit address must be 1");
3030 return Error(Loc, ErrMsg);
3044 if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
3045 isa<MCConstantExpr>(Disp) &&
3046 cast<MCConstantExpr>(Disp)->getValue() == 0) {
3053 return Error(BaseLoc, ErrMsg);
3055 if (SegReg || BaseReg || IndexReg)
3057 BaseReg, IndexReg, Scale, StartLoc,
3066 bool X86AsmParser::parsePrimaryExpr(
const MCExpr *&Res,
SMLoc &EndLoc) {
3074 if (ParseRegister(RegNo, StartLoc, EndLoc))
3088 ForcedVEXEncoding = VEXEncoding_Default;
3089 ForcedDispEncoding = DispEncoding_Default;
3103 ForcedVEXEncoding = VEXEncoding_VEX;
3104 else if (
Prefix ==
"vex2")
3105 ForcedVEXEncoding = VEXEncoding_VEX2;
3106 else if (
Prefix ==
"vex3")
3107 ForcedVEXEncoding = VEXEncoding_VEX3;
3108 else if (
Prefix ==
"evex")
3109 ForcedVEXEncoding = VEXEncoding_EVEX;
3110 else if (
Prefix ==
"disp8")
3111 ForcedDispEncoding = DispEncoding_Disp8;
3112 else if (
Prefix ==
"disp32")
3113 ForcedDispEncoding = DispEncoding_Disp32;
3115 return Error(NameLoc,
"unknown prefix");
3131 if (isParsingMSInlineAsm()) {
3132 if (
Name.equals_insensitive(
"vex"))
3133 ForcedVEXEncoding = VEXEncoding_VEX;
3134 else if (
Name.equals_insensitive(
"vex2"))
3135 ForcedVEXEncoding = VEXEncoding_VEX2;
3136 else if (
Name.equals_insensitive(
"vex3"))
3137 ForcedVEXEncoding = VEXEncoding_VEX3;
3138 else if (
Name.equals_insensitive(
"evex"))
3139 ForcedVEXEncoding = VEXEncoding_EVEX;
3141 if (ForcedVEXEncoding != VEXEncoding_Default) {
3154 if (
Name.consume_back(
".d32")) {
3155 ForcedDispEncoding = DispEncoding_Disp32;
3156 }
else if (
Name.consume_back(
".d8")) {
3157 ForcedDispEncoding = DispEncoding_Disp8;
3163 if (isParsingIntelSyntax() &&
3164 (PatchedName ==
"jmp" || PatchedName ==
"jc" || PatchedName ==
"jnc" ||
3165 PatchedName ==
"jcxz" || PatchedName ==
"jecxz" ||
3170 : NextTok ==
"short") {
3179 NextTok.
size() + 1);
3185 PatchedName !=
"setb" && PatchedName !=
"setnb")
3186 PatchedName = PatchedName.
substr(0,
Name.size()-1);
3188 unsigned ComparisonPredicate = ~0U;
3195 bool IsVCMP = PatchedName[0] ==
'v';
3196 unsigned CCIdx =
IsVCMP ? 4 : 3;
3198 PatchedName.
slice(CCIdx, PatchedName.
size() - 2))
3200 .
Case(
"eq_oq", 0x00)
3202 .
Case(
"lt_os", 0x01)
3204 .
Case(
"le_os", 0x02)
3205 .
Case(
"unord", 0x03)
3206 .
Case(
"unord_q", 0x03)
3208 .
Case(
"neq_uq", 0x04)
3210 .
Case(
"nlt_us", 0x05)
3212 .
Case(
"nle_us", 0x06)
3214 .
Case(
"ord_q", 0x07)
3216 .
Case(
"eq_uq", 0x08)
3218 .
Case(
"nge_us", 0x09)
3220 .
Case(
"ngt_us", 0x0A)
3221 .
Case(
"false", 0x0B)
3222 .
Case(
"false_oq", 0x0B)
3223 .
Case(
"neq_oq", 0x0C)
3225 .
Case(
"ge_os", 0x0D)
3227 .
Case(
"gt_os", 0x0E)
3229 .
Case(
"true_uq", 0x0F)
3230 .
Case(
"eq_os", 0x10)
3231 .
Case(
"lt_oq", 0x11)
3232 .
Case(
"le_oq", 0x12)
3233 .
Case(
"unord_s", 0x13)
3234 .
Case(
"neq_us", 0x14)
3235 .
Case(
"nlt_uq", 0x15)
3236 .
Case(
"nle_uq", 0x16)
3237 .
Case(
"ord_s", 0x17)
3238 .
Case(
"eq_us", 0x18)
3239 .
Case(
"nge_uq", 0x19)
3240 .
Case(
"ngt_uq", 0x1A)
3241 .
Case(
"false_os", 0x1B)
3242 .
Case(
"neq_os", 0x1C)
3243 .
Case(
"ge_oq", 0x1D)
3244 .
Case(
"gt_oq", 0x1E)
3245 .
Case(
"true_us", 0x1F)
3247 if (CC != ~0U && (
IsVCMP || CC < 8) &&
3250 PatchedName =
IsVCMP ?
"vcmpss" :
"cmpss";
3251 else if (PatchedName.
endswith(
"sd"))
3252 PatchedName =
IsVCMP ?
"vcmpsd" :
"cmpsd";
3253 else if (PatchedName.
endswith(
"ps"))
3254 PatchedName =
IsVCMP ?
"vcmpps" :
"cmpps";
3255 else if (PatchedName.
endswith(
"pd"))
3256 PatchedName =
IsVCMP ?
"vcmppd" :
"cmppd";
3257 else if (PatchedName.
endswith(
"sh"))
3258 PatchedName =
"vcmpsh";
3259 else if (PatchedName.
endswith(
"ph"))
3260 PatchedName =
"vcmpph";
3264 ComparisonPredicate = CC;
3270 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3271 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3272 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3274 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3284 if (CC != ~0U && (CC != 0 || SuffixSize == 2)) {
3285 switch (PatchedName.
back()) {
3287 case 'b': PatchedName = SuffixSize == 2 ?
"vpcmpub" :
"vpcmpb";
break;
3288 case 'w': PatchedName = SuffixSize == 2 ?
"vpcmpuw" :
"vpcmpw";
break;
3289 case 'd': PatchedName = SuffixSize == 2 ?
"vpcmpud" :
"vpcmpd";
break;
3290 case 'q': PatchedName = SuffixSize == 2 ?
"vpcmpuq" :
"vpcmpq";
break;
3293 ComparisonPredicate = CC;
3299 (PatchedName.
back() ==
'b' || PatchedName.
back() ==
'w' ||
3300 PatchedName.
back() ==
'd' || PatchedName.
back() ==
'q')) {
3301 unsigned SuffixSize = PatchedName.
drop_back().
back() ==
'u' ? 2 : 1;
3303 PatchedName.
slice(5, PatchedName.
size() - SuffixSize))
3314 switch (PatchedName.
back()) {
3316 case 'b': PatchedName = SuffixSize == 2 ?
"vpcomub" :
"vpcomb";
break;
3317 case 'w': PatchedName = SuffixSize == 2 ?
"vpcomuw" :
"vpcomw";
break;
3318 case 'd': PatchedName = SuffixSize == 2 ?
"vpcomud" :
"vpcomd";
break;
3319 case 'q': PatchedName = SuffixSize == 2 ?
"vpcomuq" :
"vpcomq";
break;
3322 ComparisonPredicate = CC;
3336 .
Cases(
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
true)
3337 .
Cases(
"rex64",
"data32",
"data16",
"addr32",
"addr16",
true)
3338 .
Cases(
"xacquire",
"xrelease",
true)
3339 .
Cases(
"acquire",
"release", isParsingIntelSyntax())
3342 auto isLockRepeatNtPrefix = [](
StringRef N) {
3344 .
Cases(
"lock",
"rep",
"repe",
"repz",
"repne",
"repnz",
"notrack",
true)
3348 bool CurlyAsEndOfStatement =
false;
3351 while (isLockRepeatNtPrefix(
Name.lower())) {
3372 while (
Name.startswith(
";") ||
Name.startswith(
"\n") ||
3373 Name.startswith(
"#") ||
Name.startswith(
"\t") ||
3374 Name.startswith(
"/")) {
3385 if (PatchedName ==
"data16" && is16BitMode()) {
3386 return Error(NameLoc,
"redundant data16 prefix");
3388 if (PatchedName ==
"data32") {
3390 return Error(NameLoc,
"redundant data32 prefix");
3392 return Error(NameLoc,
"'data32' is not supported in 64-bit mode");
3394 PatchedName =
"data16";
3401 if (Next ==
"callw")
3403 if (Next ==
"ljmpw")
3408 ForcedDataPrefix = X86::Is32Bit;
3416 if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3418 getParser().getContext());
3447 CurlyAsEndOfStatement =
3448 isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3451 return TokError(
"unexpected token in argument list");
3455 if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3457 getParser().getContext());
3465 else if (CurlyAsEndOfStatement)
3468 getLexer().getTok().getLoc(), 0);
3475 if (IsFp &&
Operands.size() == 1) {
3477 .
Case(
"fsub",
"fsubp")
3478 .
Case(
"fdiv",
"fdivp")
3479 .
Case(
"fsubr",
"fsubrp")
3480 .
Case(
"fdivr",
"fdivrp");
3484 if ((
Name ==
"mov" ||
Name ==
"movw" ||
Name ==
"movl") &&
3492 X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3494 (X86MCRegisterClasses[X86::GR16RegClassID].
contains(Op1.
getReg()) ||
3495 X86MCRegisterClasses[X86::GR32RegClassID].
contains(Op1.
getReg()))) {
3497 if (
Name !=
"mov" &&
Name[3] == (is16BitMode() ?
'l' :
'w')) {
3498 Name = is16BitMode() ?
"movw" :
"movl";
3511 if ((
Name ==
"outb" ||
Name ==
"outsb" ||
Name ==
"outw" ||
Name ==
"outsw" ||
3530 bool HadVerifyError =
false;
3533 if (
Name.startswith(
"ins") &&
3538 AddDefaultSrcDestOperands(TmpOperands,
3540 DefaultMemDIOperand(NameLoc));
3541 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3545 if (
Name.startswith(
"outs") &&
3547 (
Name ==
"outsb" ||
Name ==
"outsw" ||
Name ==
"outsl" ||
3548 Name ==
"outsd" ||
Name ==
"outs")) {
3549 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3551 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3557 if (
Name.startswith(
"lods") &&
3559 (
Name ==
"lods" ||
Name ==
"lodsb" ||
Name ==
"lodsw" ||
3560 Name ==
"lodsl" ||
Name ==
"lodsd" ||
Name ==
"lodsq")) {
3561 TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
3562 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3568 if (
Name.startswith(
"stos") &&
3570 (
Name ==
"stos" ||
Name ==
"stosb" ||
Name ==
"stosw" ||
3571 Name ==
"stosl" ||
Name ==
"stosd" ||
Name ==
"stosq")) {
3572 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3573 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3579 if (
Name.startswith(
"scas") &&
3581 (
Name ==
"scas" ||
Name ==
"scasb" ||
Name ==
"scasw" ||
3582 Name ==
"scasl" ||
Name ==
"scasd" ||
Name ==
"scasq")) {
3583 TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3584 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3588 if (
Name.startswith(
"cmps") &&
3590 (
Name ==
"cmps" ||
Name ==
"cmpsb" ||
Name ==
"cmpsw" ||
3591 Name ==
"cmpsl" ||
Name ==
"cmpsd" ||
Name ==
"cmpsq")) {
3592 AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3593 DefaultMemSIOperand(NameLoc));
3594 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3598 if (((
Name.startswith(
"movs") &&
3599 (
Name ==
"movs" ||
Name ==
"movsb" ||
Name ==
"movsw" ||
3600 Name ==
"movsl" ||
Name ==
"movsd" ||
Name ==
"movsq")) ||
3601 (
Name.startswith(
"smov") &&
3602 (
Name ==
"smov" ||
Name ==
"smovb" ||
Name ==
"smovw" ||
3603 Name ==
"smovl" ||
Name ==
"smovd" ||
Name ==
"smovq"))) &&
3605 if (
Name ==
"movsd" &&
Operands.size() == 1 && !isParsingIntelSyntax())
3607 AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3608 DefaultMemDIOperand(NameLoc));
3609 HadVerifyError = VerifyAndAdjustOperands(
Operands, TmpOperands);
3613 if (HadVerifyError) {
3614 return HadVerifyError;
3622 "size, (R|E)BX will be used for the location");
3637 default:
return false;
3642 if (ForcedDispEncoding == DispEncoding_Disp32) {
3643 Inst.
setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3652 if (ForcedDispEncoding == DispEncoding_Disp32) {
3653 Inst.
setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3658 case X86::VMOVZPQILo2PQIrr:
3659 case X86::VMOVAPDrr:
3660 case X86::VMOVAPDYrr:
3661 case X86::VMOVAPSrr:
3662 case X86::VMOVAPSYrr:
3663 case X86::VMOVDQArr:
3664 case X86::VMOVDQAYrr:
3665 case X86::VMOVDQUrr:
3666 case X86::VMOVDQUYrr:
3667 case X86::VMOVUPDrr:
3668 case X86::VMOVUPDYrr:
3669 case X86::VMOVUPSrr:
3670 case X86::VMOVUPSYrr: {
3673 if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
3681 case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr;
break;
3682 case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV;
break;
3683 case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV;
break;
3684 case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV;
break;
3685 case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV;
break;
3686 case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV;
break;
3687 case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV;
break;
3688 case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV;
break;
3689 case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV;
break;
3690 case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV;
break;
3691 case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV;
break;
3692 case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV;
break;
3693 case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV;
break;
3699 case X86::VMOVSSrr: {
3702 if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
3710 case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV;
break;
3711 case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV;
break;
3716 case X86::RCR8ri:
case X86::RCR16ri:
case X86::RCR32ri:
case X86::RCR64ri:
3717 case X86::RCL8ri:
case X86::RCL16ri:
case X86::RCL32ri:
case X86::RCL64ri:
3718 case X86::ROR8ri:
case X86::ROR16ri:
case X86::ROR32ri:
case X86::ROR64ri:
3719 case X86::ROL8ri:
case X86::ROL16ri:
case X86::ROL32ri:
case X86::ROL64ri:
3720 case X86::SAR8ri:
case X86::SAR16ri:
case X86::SAR32ri:
case X86::SAR64ri:
3721 case X86::SHR8ri:
case X86::SHR16ri:
case X86::SHR32ri:
case X86::SHR64ri:
3722 case X86::SHL8ri:
case X86::SHL16ri:
case X86::SHL32ri:
case X86::SHL64ri: {
3731 case X86::RCR8ri: NewOpc = X86::RCR8r1;
break;
3732 case X86::RCR16ri: NewOpc = X86::RCR16r1;
break;
3733 case X86::RCR32ri: NewOpc = X86::RCR32r1;
break;
3734 case X86::RCR64ri: NewOpc = X86::RCR64r1;
break;
3735 case X86::RCL8ri: NewOpc = X86::RCL8r1;
break;
3736 case X86::RCL16ri: NewOpc = X86::RCL16r1;
break;
3737 case X86::RCL32ri: NewOpc = X86::RCL32r1;
break;
3738 case X86::RCL64ri: NewOpc = X86::RCL64r1;
break;
3739 case X86::ROR8ri: NewOpc = X86::ROR8r1;
break;
3740 case X86::ROR16ri: NewOpc = X86::ROR16r1;
break;
3741 case X86::ROR32ri: NewOpc = X86::ROR32r1;
break;
3742 case X86::ROR64ri: NewOpc = X86::ROR64r1;
break;
3743 case X86::ROL8ri: NewOpc = X86::ROL8r1;
break;
3744 case X86::ROL16ri: NewOpc = X86::ROL16r1;
break;
3745 case X86::ROL32ri: NewOpc = X86::ROL32r1;
break;
3746 case X86::ROL64ri: NewOpc = X86::ROL64r1;
break;
3747 case X86::SAR8ri: NewOpc = X86::SAR8r1;
break;
3748 case X86::SAR16ri: NewOpc = X86::SAR16r1;
break;
3749 case X86::SAR32ri: NewOpc = X86::SAR32r1;
break;
3750 case X86::SAR64ri: NewOpc = X86::SAR64r1;
break;
3751 case X86::SHR8ri: NewOpc = X86::SHR8r1;
break;
3752 case X86::SHR16ri: NewOpc = X86::SHR16r1;
break;
3753 case X86::SHR32ri: NewOpc = X86::SHR32r1;
break;
3754 case X86::SHR64ri: NewOpc = X86::SHR64r1;
break;
3755 case X86::SHL8ri: NewOpc = X86::SHL8r1;
break;
3756 case X86::SHL16ri: NewOpc = X86::SHL16r1;
break;
3757 case X86::SHL32ri: NewOpc = X86::SHL32r1;
break;
3758 case X86::SHL64ri: NewOpc = X86::SHL64r1;
break;
3768 case X86::RCR8mi:
case X86::RCR16mi:
case X86::RCR32mi:
case X86::RCR64mi:
3769 case X86::RCL8mi:
case X86::RCL16mi:
case X86::RCL32mi:
case X86::RCL64mi:
3770 case X86::ROR8mi:
case X86::ROR16mi:
case X86::ROR32mi:
case X86::ROR64mi:
3771 case X86::ROL8mi:
case X86::ROL16mi:
case X86::ROL32mi:
case X86::ROL64mi:
3772 case X86::SAR8mi:
case X86::SAR16mi:
case X86::SAR32mi:
case X86::SAR64mi:
3773 case X86::SHR8mi:
case X86::SHR16mi:
case X86::SHR32mi:
case X86::SHR64mi:
3774 case X86::SHL8mi:
case X86::SHL16mi:
case X86::SHL32mi:
case X86::SHL64mi: {
3784 case X86::RCR8mi: NewOpc = X86::RCR8m1;
break;
3785 case X86::RCR16mi: NewOpc = X86::RCR16m1;
break;
3786 case X86::RCR32mi: NewOpc = X86::RCR32m1;
break;
3787 case X86::RCR64mi: NewOpc = X86::RCR64m1;
break;
3788 case X86::RCL8mi: NewOpc = X86::RCL8m1;
break;
3789 case X86::RCL16mi: NewOpc = X86::RCL16m1;
break;
3790 case X86::RCL32mi: NewOpc = X86::RCL32m1;
break;
3791 case X86::RCL64mi: NewOpc = X86::RCL64m1;
break;
3792 case X86::ROR8mi: NewOpc = X86::ROR8m1;
break;
3793 case X86::ROR16mi: NewOpc = X86::ROR16m1;
break;
3794 case X86::ROR32mi: NewOpc = X86::ROR32m1;
break;
3795 case X86::ROR64mi: NewOpc = X86::ROR64m1;
break;
3796 case X86::ROL8mi: NewOpc = X86::ROL8m1;
break;
3797 case X86::ROL16mi: NewOpc = X86::ROL16m1;
break;
3798 case X86::ROL32mi: NewOpc = X86::ROL32m1;
break;
3799 case X86::ROL64mi: NewOpc = X86::ROL64m1;
break;
3800 case X86::SAR8mi: NewOpc = X86::SAR8m1;
break;
3801 case X86::SAR16mi: NewOpc = X86::SAR16m1;
break;
3802 case X86::SAR32mi: NewOpc = X86::SAR32m1;
break;
3803 case X86::SAR64mi: NewOpc = X86::SAR64m1;
break;
3804 case X86::SHR8mi: NewOpc = X86::SHR8m1;
break;
3805 case X86::SHR16mi: NewOpc = X86::SHR16m1;
break;
3806 case X86::SHR32mi: NewOpc = X86::SHR32m1;
break;
3807 case X86::SHR64mi: NewOpc = X86::SHR64m1;
break;
3808 case X86::SHL8mi: NewOpc = X86::SHL8m1;
break;
3809 case X86::SHL16mi: NewOpc = X86::SHL16m1;
break;
3810 case X86::SHL32mi: NewOpc = X86::SHL32m1;
break;
3811 case X86::SHL64mi: NewOpc = X86::SHL64m1;
break;
3836 using namespace X86;
3839 uint64_t TSFlags = MII.get(Opcode).TSFlags;
3840 if (isVFCMADDCPH(Opcode) || isVFCMADDCSH(Opcode) || isVFMADDCPH(Opcode) ||
3841 isVFMADDCSH(Opcode)) {
3845 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3846 "distinct from source registers");
3847 }
else if (isVFCMULCPH(Opcode) || isVFCMULCSH(Opcode) || isVFMULCPH(Opcode) ||
3848 isVFMULCSH(Opcode)) {
3852 return Warning(Ops[0]->getStartLoc(),
"Destination register should be "
3853 "distinct from source registers");
3854 }
else if (isV4FMADDPS(Opcode) || isV4FMADDSS(Opcode) ||
3855 isV4FNMADDPS(Opcode) || isV4FNMADDSS(Opcode) ||
3856 isVP4DPWSSDS(Opcode) || isVP4DPWSSD(Opcode)) {
3859 unsigned Src2Enc =
MRI->getEncodingValue(Src2);
3860 if (Src2Enc % 4 != 0) {
3862 unsigned GroupStart = (Src2Enc / 4) * 4;
3863 unsigned GroupEnd = GroupStart + 3;
3864 return Warning(Ops[0]->getStartLoc(),
3865 "source register '" +
RegName +
"' implicitly denotes '" +
3870 }
else if (isVGATHERDPD(Opcode) || isVGATHERDPS(Opcode) ||
3871 isVGATHERQPD(Opcode) || isVGATHERQPS(Opcode) ||
3872 isVPGATHERDD(Opcode) || isVPGATHERDQ(Opcode) ||
3873 isVPGATHERQD(Opcode) || isVPGATHERQQ(Opcode)) {
3877 unsigned Index =
MRI->getEncodingValue(
3880 return Warning(Ops[0]->getStartLoc(),
"index and destination registers "
3881 "should be distinct");
3885 unsigned Index =
MRI->getEncodingValue(
3888 return Warning(Ops[0]->getStartLoc(),
"mask, index, and destination "
3889 "registers should be distinct");
3899 for (
unsigned i = 0;
i != NumOps; ++
i) {
3911 if (UsesRex && HReg != X86::NoRegister) {
3913 return Error(Ops[0]->getStartLoc(),
3914 "can't encode '" +
RegName +
"' in an instruction requiring "
3924 void X86AsmParser::emitWarningForSpecialLVIInstruction(
SMLoc Loc) {
3925 Warning(Loc,
"Instruction may be vulnerable to LVI and "
3926 "requires manual mitigation");
3927 Note(
SMLoc(),
"See https://software.intel.com/"
3928 "security-software-guidance/insights/"
3929 "deep-dive-load-value-injection#specialinstructions"
3930 " for more information");
3954 bool Parse32 = is32BitMode() || Code16GCC;
3956 is64BitMode() ? X86::RSP : (Parse32 ?
X86::ESP : X86::SP);
3962 ShlMemOp->addMemOperands(ShlInst, 5);
3975 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
3987 void X86AsmParser::applyLVILoadHardeningMitigation(
MCInst &Inst,
4004 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4007 }
else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
4010 emitWarningForSpecialLVIInstruction(Inst.
getLoc());
4032 getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity])
4033 applyLVICFIMitigation(Inst, Out);
4038 getSTI().getFeatureBits()[X86::FeatureLVILoadHardening])
4039 applyLVILoadHardeningMitigation(Inst, Out);
4042 bool X86AsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4045 bool MatchingInlineAsm) {
4046 if (isParsingIntelSyntax())
4055 bool MatchingInlineAsm) {
4060 .
Case(
"finit",
"fninit")
4061 .
Case(
"fsave",
"fnsave")
4062 .
Case(
"fstcw",
"fnstcw")
4063 .
Case(
"fstcww",
"fnstcw")
4064 .
Case(
"fstenv",
"fnstenv")
4065 .
Case(
"fstsw",
"fnstsw")
4066 .
Case(
"fstsww",
"fnstsw")
4067 .
Case(
"fclex",
"fnclex")
4073 if (!MatchingInlineAsm)
4074 emitInstruction(Inst,
Operands, Out);
4079 bool X86AsmParser::ErrorMissingFeature(
SMLoc IDLoc,
4081 bool MatchingInlineAsm) {
4082 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
4085 OS <<
"instruction requires:";
4086 for (
unsigned i = 0,
e = MissingFeatures.
size();
i !=
e; ++
i) {
4087 if (MissingFeatures[
i])
4090 return Error(IDLoc, OS.str(),
SMRange(), MatchingInlineAsm);
4094 unsigned Result = 0;
4097 Result =
Prefix.getPrefix();
4103 unsigned X86AsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
4107 if (ForcedVEXEncoding == VEXEncoding_EVEX &&
4109 return Match_Unsupported;
4111 if ((ForcedVEXEncoding == VEXEncoding_VEX ||
4112 ForcedVEXEncoding == VEXEncoding_VEX2 ||
4113 ForcedVEXEncoding == VEXEncoding_VEX3) &&
4115 return Match_Unsupported;
4119 (ForcedVEXEncoding != VEXEncoding_VEX &&
4120 ForcedVEXEncoding != VEXEncoding_VEX2 &&
4121 ForcedVEXEncoding != VEXEncoding_VEX3))
4122 return Match_Unsupported;
4124 return Match_Success;
4127 bool X86AsmParser::MatchAndEmitATTInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4131 bool MatchingInlineAsm) {
4133 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4138 Out, MatchingInlineAsm);
4146 if (ForcedVEXEncoding == VEXEncoding_VEX)
4148 else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4150 else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4152 else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4156 if (ForcedDispEncoding == DispEncoding_Disp8)
4158 else if (ForcedDispEncoding == DispEncoding_Disp32)
4166 if (ForcedDataPrefix == X86::Is32Bit)
4167 SwitchMode(X86::Is32Bit);
4171 MissingFeatures, MatchingInlineAsm,
4172 isParsingIntelSyntax());
4173 if (ForcedDataPrefix == X86::Is32Bit) {
4174 SwitchMode(X86::Is16Bit);
4175 ForcedDataPrefix = 0;
4177 switch (OriginalError) {
4180 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4185 if (!MatchingInlineAsm)
4186 while (processInstruction(Inst,
Operands))
4190 if (!MatchingInlineAsm)
4191 emitInstruction(Inst,
Operands, Out);
4194 case Match_InvalidImmUnsignedi4: {
4196 if (ErrorLoc ==
SMLoc())
4198 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4199 EmptyRange, MatchingInlineAsm);
4201 case Match_MissingFeature:
4202 return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4203 case Match_InvalidOperand:
4204 case Match_MnemonicFail:
4205 case Match_Unsupported:
4208 if (
Op.getToken().empty()) {
4209 Error(IDLoc,
"instruction must have size higher than 0", EmptyRange,
4224 Op.setTokenValue(Tmp);
4232 const char *Suffixes =
Base[0] !=
'f' ?
"bwlq" :
"slt\0";
4234 const char *MemSize =
Base[0] !=
'f' ?
"\x08\x10\x20\x40" :
"\x20\x40\x50\0";
4246 bool HasVectorReg =
false;
4251 HasVectorReg =
true;
4252 else if (X86Op->
isMem()) {
4254 assert(
MemOp->Mem.Size == 0 &&
"Memory size always 0 under ATT syntax");
4262 Tmp.back() = Suffixes[
I];
4263 if (
MemOp && HasVectorReg)
4264 MemOp->Mem.Size = MemSize[
I];
4265 Match[
I] = Match_MnemonicFail;
4266 if (
MemOp || !HasVectorReg) {
4268 MatchInstruction(
Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4269 MatchingInlineAsm, isParsingIntelSyntax());
4271 if (Match[
I] == Match_MissingFeature)
4272 ErrorInfoMissingFeatures = MissingFeatures;
4282 unsigned NumSuccessfulMatches =
llvm::count(Match, Match_Success);
4283 if (NumSuccessfulMatches == 1) {
4284 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4289 if (!MatchingInlineAsm)
4290 while (processInstruction(Inst,
Operands))
4294 if (!MatchingInlineAsm)
4295 emitInstruction(Inst,
Operands, Out);
4304 if (NumSuccessfulMatches > 1) {
4306 unsigned NumMatches = 0;
4308 if (Match[
I] == Match_Success)
4309 MatchChars[NumMatches++] = Suffixes[
I];
4313 OS <<
"ambiguous instructions require an explicit suffix (could be ";
4314 for (
unsigned i = 0;
i != NumMatches; ++
i) {
4317 if (
i + 1 == NumMatches)
4319 OS <<
"'" <<
Base << MatchChars[
i] <<
"'";
4322 Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
4330 if (
llvm::count(Match, Match_MnemonicFail) == 4) {
4331 if (OriginalError == Match_MnemonicFail)
4332 return Error(IDLoc,
"invalid instruction mnemonic '" +
Base +
"'",
4333 Op.getLocRange(), MatchingInlineAsm);
4335 if (OriginalError == Match_Unsupported)
4336 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4339 assert(OriginalError == Match_InvalidOperand &&
"Unexpected error");
4343 return Error(IDLoc,
"too few operands for instruction", EmptyRange,
4350 OperandRange, MatchingInlineAsm);
4354 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4360 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4366 if (
llvm::count(Match, Match_MissingFeature) == 1) {
4368 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4374 if (
llvm::count(Match, Match_InvalidOperand) == 1) {
4375 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4380 Error(IDLoc,
"unknown use of instruction mnemonic without a size suffix",
4381 EmptyRange, MatchingInlineAsm);
4385 bool X86AsmParser::MatchAndEmitIntelInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4389 bool MatchingInlineAsm) {
4391 assert((*
Operands[0]).isToken() &&
"Leading operand should always be a mnemonic!");
4405 if (ForcedVEXEncoding == VEXEncoding_VEX)
4407 else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4409 else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4411 else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4415 if (ForcedDispEncoding == DispEncoding_Disp8)
4417 else if (ForcedDispEncoding == DispEncoding_Disp32)
4428 UnsizedMemOp = X86Op;
4438 static const char *
const PtrSizedInstrs[] = {
"call",
"jmp",
"push"};
4439 for (
const char *Instr : PtrSizedInstrs) {
4440 if (Mnemonic == Instr) {
4441 UnsizedMemOp->
Mem.Size = getPointerWidth();
4453 if (Mnemonic ==
"push" &&
Operands.size() == 2) {
4455 if (X86Op->
isImm()) {
4457 const auto *
CE = dyn_cast<MCConstantExpr>(X86Op->
getImm());
4458 unsigned Size = getPointerWidth();
4463 Tmp += (is64BitMode())
4465 : (is32BitMode()) ?
"l" : (is16BitMode()) ?
"w" :
" ";
4466 Op.setTokenValue(Tmp);
4469 MissingFeatures, MatchingInlineAsm,
4480 static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4481 for (
unsigned Size : MopSizes) {
4482 UnsizedMemOp->
Mem.Size =
Size;
4485 unsigned M = MatchInstruction(
Operands, Inst, ErrorInfoIgnore,
4486 MissingFeatures, MatchingInlineAsm,
4487 isParsingIntelSyntax());
4492 if (
Match.back() == Match_MissingFeature)
4493 ErrorInfoMissingFeatures = MissingFeatures;
4497 UnsizedMemOp->
Mem.Size = 0;
4503 if (
Match.empty()) {
4504 Match.push_back(MatchInstruction(
4506 isParsingIntelSyntax()));
4508 if (
Match.back() == Match_MissingFeature)
4509 ErrorInfoMissingFeatures = MissingFeatures;
4514 UnsizedMemOp->
Mem.Size = 0;
4517 if (
Match.back() == Match_MnemonicFail) {
4518 return Error(IDLoc,
"invalid instruction mnemonic '" + Mnemonic +
"'",
4519 Op.getLocRange(), MatchingInlineAsm);
4522 unsigned NumSuccessfulMatches =
llvm::count(Match, Match_Success);
4526 if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4529 unsigned M = MatchInstruction(
4531 isParsingIntelSyntax());
4532 if (M == Match_Success)
4533 NumSuccessfulMatches = 1;
4545 if (NumSuccessfulMatches == 1) {
4546 if (!MatchingInlineAsm && validateInstruction(Inst,
Operands))
4551 if (!MatchingInlineAsm)
4552 while (processInstruction(Inst,
Operands))
4555 if (!MatchingInlineAsm)
4556 emitInstruction(Inst,
Operands, Out);
4559 }
else if (NumSuccessfulMatches > 1) {
4561 "multiple matches only possible with unsized memory operands");
4563 "ambiguous operand size for instruction '" + Mnemonic +
"\'",
4569 return Error(IDLoc,
"unsupported instruction", EmptyRange,
4575 if (
llvm::count(Match, Match_MissingFeature) == 1) {
4577 return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4583 if (
llvm::count(Match, Match_InvalidOperand) == 1) {
4584 return Error(IDLoc,
"invalid operand for instruction", EmptyRange,
4588 if (
llvm::count(Match, Match_InvalidImmUnsignedi4) == 1) {
4590 if (ErrorLoc ==
SMLoc())
4592 return Error(ErrorLoc,
"immediate must be an integer in range [0, 15]",
4593 EmptyRange, MatchingInlineAsm);
4597 return Error(IDLoc,
"unknown instruction mnemonic", EmptyRange,
4601 bool X86AsmParser::OmitRegisterFromClobberLists(
unsigned RegNo) {
4602 return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
4605 bool X86AsmParser::ParseDirective(
AsmToken DirectiveID) {
4609 return parseDirectiveArch();
4611 return ParseDirectiveCode(IDVal, DirectiveID.
getLoc());
4617 return Error(DirectiveID.
getLoc(),
"'.att_syntax noprefix' is not "
4618 "supported: registers must have a "
4619 "'%' prefix in .att_syntax");
4621 getParser().setAssemblerDialect(0);
4623 }
else if (IDVal.
startswith(
".intel_syntax")) {
4624 getParser().setAssemblerDialect(1);
4629 return Error(DirectiveID.
getLoc(),
"'.intel_syntax prefix' is not "
4630 "supported: registers must not have "
4631 "a '%' prefix in .intel_syntax");
4634 }
else if (IDVal ==
".nops")
4635 return parseDirectiveNops(DirectiveID.
getLoc());
4636 else if (IDVal ==
".even")
4637 return parseDirectiveEven(DirectiveID.
getLoc());
4638 else if (IDVal ==
".cv_fpo_proc")
4639 return parseDirectiveFPOProc(DirectiveID.
getLoc());
4640 else if (IDVal ==
".cv_fpo_setframe")
4641 return parseDirectiveFPOSetFrame(DirectiveID.
getLoc());
4642 else if (IDVal ==
".cv_fpo_pushreg")
4643 return parseDirectiveFPOPushReg(DirectiveID.
getLoc());
4644 else if (IDVal ==
".cv_fpo_stackalloc")
4645 return parseDirectiveFPOStackAlloc(DirectiveID.
getLoc());
4646 else if (IDVal ==
".cv_fpo_stackalign")
4647 return parseDirectiveFPOStackAlign(DirectiveID.
getLoc());
4648 else if (IDVal ==
".cv_fpo_endprologue")
4649 return parseDirectiveFPOEndPrologue(DirectiveID.
getLoc());
4650 else if (IDVal ==
".cv_fpo_endproc")
4651 return parseDirectiveFPOEndProc(DirectiveID.
getLoc());
4652 else if (IDVal ==
".seh_pushreg" ||
4654 return parseDirectiveSEHPushReg(DirectiveID.
getLoc());
4655 else if (IDVal ==
".seh_setframe" ||
4657 return parseDirectiveSEHSetFrame(DirectiveID.
getLoc());
4658 else if (IDVal ==
".seh_savereg" ||
4660 return parseDirectiveSEHSaveReg(DirectiveID.
getLoc());
4661 else if (IDVal ==
".seh_savexmm" ||
4663 return parseDirectiveSEHSaveXMM(DirectiveID.
getLoc());
4664 else if (IDVal ==
".seh_pushframe" ||
4666 return parseDirectiveSEHPushFrame(DirectiveID.
getLoc());
4671 bool X86AsmParser::parseDirectiveArch() {
4673 getParser().parseStringToEndOfStatement();
4679 bool X86AsmParser::parseDirectiveNops(
SMLoc L) {
4680 int64_t NumBytes = 0,
Control = 0;
4681 SMLoc NumBytesLoc, ControlLoc;
4683 NumBytesLoc = getTok().getLoc();
4684 if (getParser().checkForValidSection() ||
4685 getParser().parseAbsoluteExpression(NumBytes))
4689 ControlLoc = getTok().getLoc();
4690 if (getParser().parseAbsoluteExpression(
Control))
4693 if (getParser().parseEOL())
4696 if (NumBytes <= 0) {
4697 Error(NumBytesLoc,
"'.nops' directive with non-positive size");
4702 Error(ControlLoc,
"'.nops' directive with negative NOP size");
4707 getParser().getStreamer().emitNops(NumBytes,
Control, L, STI);
4714 bool X86AsmParser::parseDirectiveEven(
SMLoc L) {
4720 getStreamer().initSections(
false, getSTI());
4721 Section = getStreamer().getCurrentSectionOnly();
4724 getStreamer().emitCodeAlignment(2, &getSTI(), 0);
4726 getStreamer().emitValueToAlignment(2, 0, 1, 0);
4732 bool X86AsmParser::ParseDirectiveCode(
StringRef IDVal,
SMLoc L) {
4735 if (IDVal ==
".code16") {
4737 if (!is16BitMode()) {
4738 SwitchMode(X86::Is16Bit);
4739 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4741 }
else if (IDVal ==
".code16gcc") {
4745 if (!is16BitMode()) {
4746 SwitchMode(X86::Is16Bit);
4747 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
4749 }
else if (IDVal ==
".code32") {
4751 if (!is32BitMode()) {
4752 SwitchMode(X86::Is32Bit);
4753 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
4755 }
else if (IDVal ==
".code64") {
4757 if (!is64BitMode()) {
4758 SwitchMode(X86::Is64Bit);
4759 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code64);
4762 Error(L,
"unknown directive " + IDVal);
4770 bool X86AsmParser::parseDirectiveFPOProc(
SMLoc L) {
4775 return Parser.
TokError(
"expected symbol name");
4776 if (Parser.
parseIntToken(ParamsSize,
"expected parameter byte count"))
4779 return Parser.
TokError(
"parameters size out of range");
4782 MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4783 return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
4787 bool X86AsmParser::parseDirectiveFPOSetFrame(
SMLoc L) {
4790 if (ParseRegister(
Reg, DummyLoc, DummyLoc) || parseEOL())
4792 return getTargetStreamer().emitFPOSetFrame(
Reg, L);
4796 bool X86AsmParser::parseDirectiveFPOPushReg(
SMLoc L) {
4799 if (ParseRegister(
Reg, DummyLoc, DummyLoc) || parseEOL())
4801 return getTargetStreamer().emitFPOPushReg(
Reg, L);
4805 bool X86AsmParser::parseDirectiveFPOStackAlloc(
SMLoc L) {
4808 if (Parser.
parseIntToken(Offset,
"expected offset") || parseEOL())
4810 return getTargetStreamer().emitFPOStackAlloc(Offset, L);
4814 bool X86AsmParser::parseDirectiveFPOStackAlign(
SMLoc L) {
4817 if (Parser.
parseIntToken(Offset,
"expected offset") || parseEOL())
4819 return getTargetStreamer().emitFPOStackAlign(Offset, L);
4823 bool X86AsmParser::parseDirectiveFPOEndPrologue(
SMLoc L) {
4827 return getTargetStreamer().emitFPOEndPrologue(L);
4831 bool X86AsmParser::parseDirectiveFPOEndProc(
SMLoc L) {
4835 return getTargetStreamer().emitFPOEndProc(L);
4838 bool X86AsmParser::parseSEHRegisterNumber(
unsigned RegClassID,
4840 SMLoc startLoc = getLexer().getLoc();
4846 if (ParseRegister(RegNo, startLoc, endLoc))
4849 if (!X86MCRegisterClasses[RegClassID].
contains(RegNo)) {
4850 return Error(startLoc,
4851 "register is not supported for use with this directive");
4857 if (getParser().parseAbsoluteExpression(EncodedReg))
4863 for (
MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
4864 if (
MRI->getEncodingValue(
Reg) == EncodedReg) {
4870 return Error(startLoc,
4871 "incorrect register number for use with this directive");
4878 bool X86AsmParser::parseDirectiveSEHPushReg(
SMLoc Loc) {
4880 if (parseSEHRegisterNumber(X86::GR64RegClassID,
Reg))
4884 return TokError(
"unexpected token in directive");
4887 getStreamer().emitWinCFIPushReg(
Reg, Loc);
4891 bool X86AsmParser::parseDirectiveSEHSetFrame(
SMLoc Loc) {
4894 if (parseSEHRegisterNumber(X86::GR64RegClassID,
Reg))
4897 return TokError(
"you must specify a stack pointer offset");
4900 if (getParser().parseAbsoluteExpression(Off))
4904 return TokError(
"unexpected token in directive");
4907 getStreamer().emitWinCFISetFrame(
Reg, Off, Loc);
4911 bool X86AsmParser::parseDirectiveSEHSaveReg(
SMLoc Loc) {
4914 if (parseSEHRegisterNumber(X86::GR64RegClassID,
Reg))
4917 return TokError(
"you must specify an offset on the stack");
4920 if (getParser().parseAbsoluteExpression(Off))
4924 return TokError(
"unexpected token in directive");
4927 getStreamer().emitWinCFISaveReg(
Reg, Off, Loc);
4931 bool X86AsmParser::parseDirectiveSEHSaveXMM(
SMLoc Loc) {
4934 if (parseSEHRegisterNumber(X86::VR128XRegClassID,
Reg))
4937 return TokError(
"you must specify an offset on the stack");
4940 if (getParser().parseAbsoluteExpression(Off))
4944 return TokError(
"unexpected token in directive");
4947 getStreamer().emitWinCFISaveXMM(
Reg, Off, Loc);
4951 bool X86AsmParser::parseDirectiveSEHPushFrame(
SMLoc Loc) {
4955 SMLoc startLoc = getLexer().getLoc();
4957 if (!getParser().parseIdentifier(CodeID)) {
4958 if (CodeID !=
"code")
4959 return Error(startLoc,
"expected @code");
4965 return TokError(
"unexpected token in directive");
4968 getStreamer().emitWinCFIPushFrame(Code, Loc);
4978 #define GET_REGISTER_MATCHER
4979 #define GET_MATCHER_IMPLEMENTATION
4980 #define GET_SUBTARGET_FEATURE_NAME
4981 #include "X86GenAsmMatcher.inc"