LLVM 23.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E);
186 bool parseDataExpr(const MCExpr *&Res) override;
187 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
188
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
193
194 bool parseDirectiveTLSDescCall(SMLoc L);
195
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
198
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
205
206 bool parseDirectiveVariantPCS(SMLoc L);
207
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
245 OperandVector &Operands, MCStreamer &Out,
246 uint64_t &ErrorInfo,
247 bool MatchingInlineAsm) override;
248 /// @name Auto-generated Match Functions
249 /// {
250
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
253
254 /// }
255
256 ParseStatus tryParseScalarRegister(MCRegister &Reg);
257 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
258 RegKind MatchKind);
259 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
260 ParseStatus tryParseSVCR(OperandVector &Operands);
261 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
262 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
263 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
264 ParseStatus tryParseSysReg(OperandVector &Operands);
265 ParseStatus tryParseSysCROperand(OperandVector &Operands);
266 template <bool IsSVEPrefetch = false>
267 ParseStatus tryParsePrefetch(OperandVector &Operands);
268 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
269 ParseStatus tryParsePSBHint(OperandVector &Operands);
270 ParseStatus tryParseBTIHint(OperandVector &Operands);
271 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
272 ParseStatus tryParseTIndexHint(OperandVector &Operands);
273 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
274 ParseStatus tryParseAdrLabel(OperandVector &Operands);
275 template <bool AddFPZeroAsLiteral>
276 ParseStatus tryParseFPImm(OperandVector &Operands);
277 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
278 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
279 bool tryParseNeonVectorRegister(OperandVector &Operands);
280 ParseStatus tryParseVectorIndex(OperandVector &Operands);
281 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
282 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
283 template <bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
285 ParseStatus tryParseGPROperand(OperandVector &Operands);
286 ParseStatus tryParseZTOperand(OperandVector &Operands);
287 template <bool ParseShiftExtend, bool ParseSuffix>
288 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
289 template <RegKind RK>
290 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
292 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
293 template <RegKind VectorKind>
294 ParseStatus tryParseVectorList(OperandVector &Operands,
295 bool ExpectMatch = false);
296 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
297 ParseStatus tryParseSVEPattern(OperandVector &Operands);
298 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
299 ParseStatus tryParseGPR64x8(OperandVector &Operands);
300 ParseStatus tryParseImmRange(OperandVector &Operands);
301 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
302 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
303
304public:
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
309 };
310 bool IsILP32;
311 bool IsWindowsArm64EC;
312
313 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII, const MCTargetOptions &Options)
315 : MCTargetAsmParser(Options, STI, MII) {
316 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
317 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
319 MCStreamer &S = getParser().getStreamer();
320 if (S.getTargetStreamer() == nullptr)
321 new AArch64TargetStreamer(S);
322
323 // Alias .hword/.word/.[dx]word to the target-independent
324 // .2byte/.4byte/.8byte directives as they have the same form and
325 // semantics:
326 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
327 Parser.addAliasForDirective(".hword", ".2byte");
328 Parser.addAliasForDirective(".word", ".4byte");
329 Parser.addAliasForDirective(".dword", ".8byte");
330 Parser.addAliasForDirective(".xword", ".8byte");
331
332 // Initialize the set of available features.
333 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
334 }
335
336 bool areEqualRegs(const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2) const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
339 SMLoc NameLoc, OperandVector &Operands) override;
340 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
341 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc) override;
343 bool ParseDirective(AsmToken DirectiveID) override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
345 unsigned Kind) override;
346
347 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
348 AArch64::Specifier &DarwinSpec,
349 int64_t &Addend);
350};
351
352/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
353/// instruction.
354class AArch64Operand : public MCParsedAsmOperand {
355private:
356 enum KindTy {
357 k_Immediate,
358 k_ShiftedImm,
359 k_ImmRange,
360 k_CondCode,
361 k_Register,
362 k_MatrixRegister,
363 k_MatrixTileList,
364 k_SVCR,
365 k_VectorList,
366 k_VectorIndex,
367 k_Token,
368 k_SysReg,
369 k_SysCR,
370 k_Prefetch,
371 k_ShiftExtend,
372 k_FPImm,
373 k_Barrier,
374 k_PSBHint,
375 k_PHint,
376 k_BTIHint,
377 k_CMHPriorityHint,
378 k_TIndexHint,
379 } Kind;
380
381 SMLoc StartLoc, EndLoc;
382
383 struct TokOp {
384 const char *Data;
385 unsigned Length;
386 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
387 };
388
389 // Separate shift/extend operand.
390 struct ShiftExtendOp {
392 unsigned Amount;
393 bool HasExplicitAmount;
394 };
395
396 struct RegOp {
397 MCRegister Reg;
398 RegKind Kind;
399 int ElementWidth;
400
401 // The register may be allowed as a different register class,
402 // e.g. for GPR64as32 or GPR32as64.
403 RegConstraintEqualityTy EqualityTy;
404
405 // In some cases the shift/extend needs to be explicitly parsed together
406 // with the register, rather than as a separate operand. This is needed
407 // for addressing modes where the instruction as a whole dictates the
408 // scaling/extend, rather than specific bits in the instruction.
409 // By parsing them as a single operand, we avoid the need to pass an
410 // extra operand in all CodeGen patterns (because all operands need to
411 // have an associated value), and we avoid the need to update TableGen to
412 // accept operands that have no associated bits in the instruction.
413 //
414 // An added benefit of parsing them together is that the assembler
415 // can give a sensible diagnostic if the scaling is not correct.
416 //
417 // The default is 'lsl #0' (HasExplicitAmount = false) if no
418 // ShiftExtend is specified.
419 ShiftExtendOp ShiftExtend;
420 };
421
422 struct MatrixRegOp {
423 MCRegister Reg;
424 unsigned ElementWidth;
425 MatrixKind Kind;
426 };
427
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
430 };
431
432 struct VectorListOp {
433 MCRegister Reg;
434 unsigned Count;
435 unsigned Stride;
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
439 };
440
441 struct VectorIndexOp {
442 int Val;
443 };
444
445 struct ImmOp {
446 const MCExpr *Val;
447 };
448
449 struct ShiftedImmOp {
450 const MCExpr *Val;
451 unsigned ShiftAmount;
452 };
453
454 struct ImmRangeOp {
455 unsigned First;
456 unsigned Last;
457 };
458
459 struct CondCodeOp {
461 };
462
463 struct FPImmOp {
464 uint64_t Val; // APFloat value bitcasted to uint64_t.
465 bool IsExact; // describes whether parsed value was exact.
466 };
467
468 struct BarrierOp {
469 const char *Data;
470 unsigned Length;
471 unsigned Val; // Not the enum since not all values have names.
472 bool HasnXSModifier;
473 };
474
475 struct SysRegOp {
476 const char *Data;
477 unsigned Length;
478 uint32_t MRSReg;
479 uint32_t MSRReg;
480 uint32_t PStateField;
481 };
482
483 struct SysCRImmOp {
484 unsigned Val;
485 };
486
487 struct PrefetchOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492
493 struct PSBHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498 struct PHintOp {
499 const char *Data;
500 unsigned Length;
501 unsigned Val;
502 };
503 struct BTIHintOp {
504 const char *Data;
505 unsigned Length;
506 unsigned Val;
507 };
508 struct CMHPriorityHintOp {
509 const char *Data;
510 unsigned Length;
511 unsigned Val;
512 };
513 struct TIndexHintOp {
514 const char *Data;
515 unsigned Length;
516 unsigned Val;
517 };
518
519 struct SVCROp {
520 const char *Data;
521 unsigned Length;
522 unsigned PStateField;
523 };
524
525 union {
526 struct TokOp Tok;
527 struct RegOp Reg;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
532 struct ImmOp Imm;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
535 struct CondCodeOp CondCode;
536 struct FPImmOp FPImm;
537 struct BarrierOp Barrier;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
540 struct PrefetchOp Prefetch;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
547 struct SVCROp SVCR;
548 };
549
550 // Keep the MCContext around as the MCExprs may need manipulated during
551 // the add<>Operands() calls.
552 MCContext &Ctx;
553
554public:
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
556
557 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
558 Kind = o.Kind;
559 StartLoc = o.StartLoc;
560 EndLoc = o.EndLoc;
561 switch (Kind) {
562 case k_Token:
563 Tok = o.Tok;
564 break;
565 case k_Immediate:
566 Imm = o.Imm;
567 break;
568 case k_ShiftedImm:
569 ShiftedImm = o.ShiftedImm;
570 break;
571 case k_ImmRange:
572 ImmRange = o.ImmRange;
573 break;
574 case k_CondCode:
575 CondCode = o.CondCode;
576 break;
577 case k_FPImm:
578 FPImm = o.FPImm;
579 break;
580 case k_Barrier:
581 Barrier = o.Barrier;
582 break;
583 case k_Register:
584 Reg = o.Reg;
585 break;
586 case k_MatrixRegister:
587 MatrixReg = o.MatrixReg;
588 break;
589 case k_MatrixTileList:
590 MatrixTileList = o.MatrixTileList;
591 break;
592 case k_VectorList:
593 VectorList = o.VectorList;
594 break;
595 case k_VectorIndex:
596 VectorIndex = o.VectorIndex;
597 break;
598 case k_SysReg:
599 SysReg = o.SysReg;
600 break;
601 case k_SysCR:
602 SysCRImm = o.SysCRImm;
603 break;
604 case k_Prefetch:
605 Prefetch = o.Prefetch;
606 break;
607 case k_PSBHint:
608 PSBHint = o.PSBHint;
609 break;
610 case k_PHint:
611 PHint = o.PHint;
612 break;
613 case k_BTIHint:
614 BTIHint = o.BTIHint;
615 break;
616 case k_CMHPriorityHint:
617 CMHPriorityHint = o.CMHPriorityHint;
618 break;
619 case k_TIndexHint:
620 TIndexHint = o.TIndexHint;
621 break;
622 case k_ShiftExtend:
623 ShiftExtend = o.ShiftExtend;
624 break;
625 case k_SVCR:
626 SVCR = o.SVCR;
627 break;
628 }
629 }
630
631 /// getStartLoc - Get the location of the first token of this operand.
632 SMLoc getStartLoc() const override { return StartLoc; }
633 /// getEndLoc - Get the location of the last token of this operand.
634 SMLoc getEndLoc() const override { return EndLoc; }
635
636 StringRef getToken() const {
637 assert(Kind == k_Token && "Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
639 }
640
641 bool isTokenSuffix() const {
642 assert(Kind == k_Token && "Invalid access!");
643 return Tok.IsSuffix;
644 }
645
646 const MCExpr *getImm() const {
647 assert(Kind == k_Immediate && "Invalid access!");
648 return Imm.Val;
649 }
650
651 const MCExpr *getShiftedImmVal() const {
652 assert(Kind == k_ShiftedImm && "Invalid access!");
653 return ShiftedImm.Val;
654 }
655
656 unsigned getShiftedImmShift() const {
657 assert(Kind == k_ShiftedImm && "Invalid access!");
658 return ShiftedImm.ShiftAmount;
659 }
660
661 unsigned getFirstImmVal() const {
662 assert(Kind == k_ImmRange && "Invalid access!");
663 return ImmRange.First;
664 }
665
666 unsigned getLastImmVal() const {
667 assert(Kind == k_ImmRange && "Invalid access!");
668 return ImmRange.Last;
669 }
670
672 assert(Kind == k_CondCode && "Invalid access!");
673 return CondCode.Code;
674 }
675
676 APFloat getFPImm() const {
677 assert (Kind == k_FPImm && "Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
679 }
680
681 bool getFPImmIsExact() const {
682 assert (Kind == k_FPImm && "Invalid access!");
683 return FPImm.IsExact;
684 }
685
686 unsigned getBarrier() const {
687 assert(Kind == k_Barrier && "Invalid access!");
688 return Barrier.Val;
689 }
690
691 StringRef getBarrierName() const {
692 assert(Kind == k_Barrier && "Invalid access!");
693 return StringRef(Barrier.Data, Barrier.Length);
694 }
695
696 bool getBarriernXSModifier() const {
697 assert(Kind == k_Barrier && "Invalid access!");
698 return Barrier.HasnXSModifier;
699 }
700
701 MCRegister getReg() const override {
702 assert(Kind == k_Register && "Invalid access!");
703 return Reg.Reg;
704 }
705
706 MCRegister getMatrixReg() const {
707 assert(Kind == k_MatrixRegister && "Invalid access!");
708 return MatrixReg.Reg;
709 }
710
711 unsigned getMatrixElementWidth() const {
712 assert(Kind == k_MatrixRegister && "Invalid access!");
713 return MatrixReg.ElementWidth;
714 }
715
716 MatrixKind getMatrixKind() const {
717 assert(Kind == k_MatrixRegister && "Invalid access!");
718 return MatrixReg.Kind;
719 }
720
721 unsigned getMatrixTileListRegMask() const {
722 assert(isMatrixTileList() && "Invalid access!");
723 return MatrixTileList.RegMask;
724 }
725
726 RegConstraintEqualityTy getRegEqualityTy() const {
727 assert(Kind == k_Register && "Invalid access!");
728 return Reg.EqualityTy;
729 }
730
731 MCRegister getVectorListStart() const {
732 assert(Kind == k_VectorList && "Invalid access!");
733 return VectorList.Reg;
734 }
735
736 unsigned getVectorListCount() const {
737 assert(Kind == k_VectorList && "Invalid access!");
738 return VectorList.Count;
739 }
740
741 unsigned getVectorListStride() const {
742 assert(Kind == k_VectorList && "Invalid access!");
743 return VectorList.Stride;
744 }
745
746 int getVectorIndex() const {
747 assert(Kind == k_VectorIndex && "Invalid access!");
748 return VectorIndex.Val;
749 }
750
751 StringRef getSysReg() const {
752 assert(Kind == k_SysReg && "Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
754 }
755
756 unsigned getSysCR() const {
757 assert(Kind == k_SysCR && "Invalid access!");
758 return SysCRImm.Val;
759 }
760
761 unsigned getPrefetch() const {
762 assert(Kind == k_Prefetch && "Invalid access!");
763 return Prefetch.Val;
764 }
765
766 unsigned getPSBHint() const {
767 assert(Kind == k_PSBHint && "Invalid access!");
768 return PSBHint.Val;
769 }
770
771 unsigned getPHint() const {
772 assert(Kind == k_PHint && "Invalid access!");
773 return PHint.Val;
774 }
775
776 StringRef getPSBHintName() const {
777 assert(Kind == k_PSBHint && "Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
779 }
780
781 StringRef getPHintName() const {
782 assert(Kind == k_PHint && "Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
784 }
785
786 unsigned getBTIHint() const {
787 assert(Kind == k_BTIHint && "Invalid access!");
788 return BTIHint.Val;
789 }
790
791 StringRef getBTIHintName() const {
792 assert(Kind == k_BTIHint && "Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
794 }
795
796 unsigned getCMHPriorityHint() const {
797 assert(Kind == k_CMHPriorityHint && "Invalid access!");
798 return CMHPriorityHint.Val;
799 }
800
801 StringRef getCMHPriorityHintName() const {
802 assert(Kind == k_CMHPriorityHint && "Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
804 }
805
806 unsigned getTIndexHint() const {
807 assert(Kind == k_TIndexHint && "Invalid access!");
808 return TIndexHint.Val;
809 }
810
811 StringRef getTIndexHintName() const {
812 assert(Kind == k_TIndexHint && "Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
814 }
815
816 StringRef getSVCR() const {
817 assert(Kind == k_SVCR && "Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
819 }
820
821 StringRef getPrefetchName() const {
822 assert(Kind == k_Prefetch && "Invalid access!");
823 return StringRef(Prefetch.Data, Prefetch.Length);
824 }
825
826 AArch64_AM::ShiftExtendType getShiftExtendType() const {
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
831 llvm_unreachable("Invalid access!");
832 }
833
834 unsigned getShiftExtendAmount() const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
839 llvm_unreachable("Invalid access!");
840 }
841
842 bool hasShiftExtendAmount() const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
847 llvm_unreachable("Invalid access!");
848 }
849
850 bool isImm() const override { return Kind == k_Immediate; }
851 bool isMem() const override { return false; }
852
853 bool isUImm6() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859 int64_t Val = MCE->getValue();
860 return (Val >= 0 && Val < 64);
861 }
862
863 template <int Width> bool isSImm() const {
864 return bool(isSImmScaled<Width, 1>());
865 }
866
867 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
868 return isImmScaled<Bits, Scale>(true);
869 }
870
871 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
872 DiagnosticPredicate isUImmScaled() const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() + Offset))
876
877 return isImmScaled<Bits, Scale, IsRange>(false);
878 }
879
880 template <int Bits, int Scale, bool IsRange = false>
881 DiagnosticPredicate isImmScaled(bool Signed) const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
885
886 int64_t Val;
887 if (isImmRange())
888 Val = getFirstImmVal();
889 else {
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
891 if (!MCE)
893 Val = MCE->getValue();
894 }
895
896 int64_t MinVal, MaxVal;
897 if (Signed) {
898 int64_t Shift = Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
901 } else {
902 MinVal = 0;
903 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
904 }
905
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
908
910 }
911
912 DiagnosticPredicate isSVEPattern() const {
913 if (!isImm())
915 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
916 if (!MCE)
918 int64_t Val = MCE->getValue();
919 if (Val >= 0 && Val < 32)
922 }
923
924 DiagnosticPredicate isSVEVecLenSpecifier() const {
925 if (!isImm())
927 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
928 if (!MCE)
930 int64_t Val = MCE->getValue();
931 if (Val >= 0 && Val <= 1)
934 }
935
936 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
937 AArch64::Specifier ELFSpec;
938 AArch64::Specifier DarwinSpec;
939 int64_t Addend;
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
941 Addend)) {
942 // If we don't understand the expression, assume the best and
943 // let the fixup and relocation code deal with it.
944 return true;
945 }
946
947 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
955 ELFSpec)) {
956 // Note that we don't range-check the addend. It's adjusted modulo page
957 // size when converted, so there is no "out of range" condition when using
958 // @pageoff.
959 return true;
960 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
961 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
962 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
963 return Addend == 0;
964 }
965
966 return false;
967 }
968
969 template <int Scale> bool isUImm12Offset() const {
970 if (!isImm())
971 return false;
972
973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
974 if (!MCE)
975 return isSymbolicUImm12Offset(getImm());
976
977 int64_t Val = MCE->getValue();
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
979 }
980
981 template <int N, int M>
982 bool isImmInRange() const {
983 if (!isImm())
984 return false;
985 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
986 if (!MCE)
987 return false;
988 int64_t Val = MCE->getValue();
989 return (Val >= N && Val <= M);
990 }
991
992 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
993 // a logical immediate can always be represented when inverted.
994 template <typename T>
995 bool isLogicalImm() const {
996 if (!isImm())
997 return false;
998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
999 if (!MCE)
1000 return false;
1001
1002 int64_t Val = MCE->getValue();
1003 // Avoid left shift by 64 directly.
1004 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1005 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1006 if ((Val & Upper) && (Val & Upper) != Upper)
1007 return false;
1008
1009 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
1010 }
1011
1012 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1013
1014 bool isImmRange() const { return Kind == k_ImmRange; }
1015
1016 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1017 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1018 /// immediate that can be shifted by 'Shift'.
1019 template <unsigned Width>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1022 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1023 return std::make_pair(CE->getValue(), Width);
1024
1025 if (isImm())
1026 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1027 int64_t Val = CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(Val >> Width, Width);
1030 else
1031 return std::make_pair(Val, 0u);
1032 }
1033
1034 return {};
1035 }
1036
1037 bool isAddSubImm() const {
1038 if (!isShiftedImm() && !isImm())
1039 return false;
1040
1041 const MCExpr *Expr;
1042
1043 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1048 return false;
1049 } else {
1050 Expr = getImm();
1051 }
1052
1053 AArch64::Specifier ELFSpec;
1054 AArch64::Specifier DarwinSpec;
1055 int64_t Addend;
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1057 Addend)) {
1058 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1059 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1060 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1068 ELFSpec);
1069 }
1070
1071 // If it's a constant, it should be a real immediate in range.
1072 if (auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1074
1075 // If it's an expression, we hope for the best and let the fixup/relocation
1076 // code deal with it.
1077 return true;
1078 }
1079
1080 bool isAddSubImmNeg() const {
1081 if (!isShiftedImm() && !isImm())
1082 return false;
1083
1084 // Otherwise it should be a real negative immediate in range.
1085 if (auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1087
1088 return false;
1089 }
1090
1091 // Signed value in the range -128 to +127. For element widths of
1092 // 16 bits or higher it may also be a signed multiple of 256 in the
1093 // range -32768 to +32512.
1094 // For element-width of 8 bits a range of -128 to 255 is accepted,
1095 // since a copy of a byte can be either signed/unsigned.
1096 template <typename T>
1097 DiagnosticPredicate isSVECpyImm() const {
1098 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1100
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1105 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1106 << ShiftedImm->second))
1108
1110 }
1111
1112 // Unsigned value in the range 0 to 255. For element widths of
1113 // 16 bits or higher it may also be a signed multiple of 256 in the
1114 // range 0 to 65280.
1115 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1116 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1118
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1123 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1124 << ShiftedImm->second))
1126
1128 }
1129
1130 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1134 }
1135
1136 bool isCondCode() const { return Kind == k_CondCode; }
1137
1138 bool isSIMDImmType10() const {
1139 if (!isImm())
1140 return false;
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1142 if (!MCE)
1143 return false;
1145 }
1146
1147 template<int N>
1148 bool isBranchTarget() const {
1149 if (!isImm())
1150 return false;
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1152 if (!MCE)
1153 return true;
1154 int64_t Val = MCE->getValue();
1155 if (Val & 0x3)
1156 return false;
1157 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1159 }
1160
1161 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1162 if (!isImm())
1163 return false;
1164
1165 AArch64::Specifier ELFSpec;
1166 AArch64::Specifier DarwinSpec;
1167 int64_t Addend;
1168 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1169 Addend)) {
1170 return false;
1171 }
1172 if (DarwinSpec != AArch64::S_None)
1173 return false;
1174
1175 return llvm::is_contained(AllowedModifiers, ELFSpec);
1176 }
1177
1178 bool isMovWSymbolG3() const {
1179 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1180 }
1181
1182 bool isMovWSymbolG2() const {
1183 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1187 }
1188
1189 bool isMovWSymbolG1() const {
1190 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1195 }
1196
1197 bool isMovWSymbolG0() const {
1198 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1203 }
1204
1205 template<int RegWidth, int Shift>
1206 bool isMOVZMovAlias() const {
1207 if (!isImm()) return false;
1208
1209 const MCExpr *E = getImm();
1210 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1211 uint64_t Value = CE->getValue();
1212
1213 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1214 }
1215 // Only supports the case of Shift being 0 if an expression is used as an
1216 // operand
1217 return !Shift && E;
1218 }
1219
1220 template<int RegWidth, int Shift>
1221 bool isMOVNMovAlias() const {
1222 if (!isImm()) return false;
1223
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1225 if (!CE) return false;
1226 uint64_t Value = CE->getValue();
1227
1228 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1229 }
1230
1231 bool isFPImm() const {
1232 return Kind == k_FPImm &&
1233 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1234 }
1235
1236 bool isBarrier() const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1238 }
1239 bool isBarriernXS() const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1241 }
1242 bool isSysReg() const { return Kind == k_SysReg; }
1243
1244 bool isMRSSystemRegister() const {
1245 if (!isSysReg()) return false;
1246
1247 return SysReg.MRSReg != -1U;
1248 }
1249
1250 bool isMSRSystemRegister() const {
1251 if (!isSysReg()) return false;
1252 return SysReg.MSRReg != -1U;
1253 }
1254
1255 bool isSystemPStateFieldWithImm0_1() const {
1256 if (!isSysReg()) return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1258 }
1259
1260 bool isSystemPStateFieldWithImm0_15() const {
1261 if (!isSysReg())
1262 return false;
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1264 }
1265
1266 bool isSVCR() const {
1267 if (Kind != k_SVCR)
1268 return false;
1269 return SVCR.PStateField != -1U;
1270 }
1271
1272 bool isReg() const override {
1273 return Kind == k_Register;
1274 }
1275
1276 bool isVectorList() const { return Kind == k_VectorList; }
1277
1278 bool isScalarReg() const {
1279 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1280 }
1281
1282 bool isNeonVectorReg() const {
1283 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1284 }
1285
1286 bool isNeonVectorRegLo() const {
1287 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 Reg.Reg) ||
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1291 Reg.Reg));
1292 }
1293
1294 bool isNeonVectorReg0to7() const {
1295 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1297 Reg.Reg));
1298 }
1299
1300 bool isMatrix() const { return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1302
1303 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1304 RegKind RK;
1305 switch (Class) {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1313 break;
1314 default:
1315 llvm_unreachable("Unsupported register class");
1316 }
1317
1318 return (Kind == k_Register && Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[Class].contains(getReg());
1320 }
1321
1322 template <unsigned Class> bool isSVEVectorReg() const {
1323 RegKind RK;
1324 switch (Class) {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1332 break;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1340 break;
1341 default:
1342 llvm_unreachable("Unsupported register class");
1343 }
1344
1345 return (Kind == k_Register && Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[Class].contains(getReg());
1347 }
1348
1349 template <unsigned Class> bool isFPRasZPR() const {
1350 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[Class].contains(getReg());
1352 }
1353
1354 template <int ElementWidth, unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1356 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1358
1359 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1361
1363 }
1364
1365 template <int ElementWidth, unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1367 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1370
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1375
1377 }
1378
1379 template <int ElementWidth, unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1381 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1383
1384 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1386
1388 }
1389
1390 template <int ElementWidth, unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1392 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1394
1395 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1397
1399 }
1400
1401 template <int ElementWidth, unsigned Class,
1402 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1408
1409 // Give a more specific diagnostic when the user has explicitly typed in
1410 // a shift-amount that does not match what is expected, but for which
1411 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1412 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1413 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1414 ShiftExtendTy == AArch64_AM::SXTW) &&
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1417
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1420
1422 }
1423
1424 bool isGPR32as64() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
1427 }
1428
1429 bool isGPR64as32() const {
1430 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
1432 }
1433
1434 bool isGPR64x8() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1437 Reg.Reg);
1438 }
1439
1440 bool isWSeqPair() const {
1441 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1443 Reg.Reg);
1444 }
1445
1446 bool isXSeqPair() const {
1447 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1449 Reg.Reg);
1450 }
1451
1452 bool isSyspXzrPair() const {
1453 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1454 }
1455
1456 template<int64_t Angle, int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation() const {
1458 if (!isImm())
1460
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1462 if (!CE)
1464 uint64_t Value = CE->getValue();
1465
1466 if (Value % Angle == Remainder && Value <= 270)
1469 }
1470
1471 template <unsigned RegClassID> bool isGPR64() const {
1472 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1474 }
1475
1476 template <unsigned RegClassID, int ExtWidth>
1477 DiagnosticPredicate isGPR64WithShiftExtend() const {
1478 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1480
1481 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1482 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1485 }
1486
1487 /// Is this a vector list with the type implicit (presumably attached to the
1488 /// instruction itself)?
1489 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList() const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1495 }
1496
1497 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1498 unsigned ElementWidth, unsigned Stride = 1>
1499 bool isTypedVectorList() const {
1500 if (Kind != k_VectorList)
1501 return false;
1502 if (VectorList.Count != NumRegs)
1503 return false;
1504 if (VectorList.RegisterKind != VectorKind)
1505 return false;
1506 if (VectorList.ElementWidth != ElementWidth)
1507 return false;
1508 if (VectorList.Stride != Stride)
1509 return false;
1510 return VectorList.NumElements == NumElements;
1511 }
1512
1513 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1514 unsigned ElementWidth, unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple() const {
1516 bool Res =
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1518 if (!Res)
1520 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
1523 }
1524
1525 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided() const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1529 ElementWidth, Stride>();
1530 if (!Res)
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1537 }
1538
1539 template <int Min, int Max>
1540 DiagnosticPredicate isVectorIndex() const {
1541 if (Kind != k_VectorIndex)
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1546 }
1547
1548 bool isToken() const override { return Kind == k_Token; }
1549
1550 bool isTokenEqual(StringRef Str) const {
1551 return Kind == k_Token && getToken() == Str;
1552 }
1553 bool isSysCR() const { return Kind == k_SysCR; }
1554 bool isPrefetch() const { return Kind == k_Prefetch; }
1555 bool isPSBHint() const { return Kind == k_PSBHint; }
1556 bool isPHint() const { return Kind == k_PHint; }
1557 bool isBTIHint() const { return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1560 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1561 bool isShifter() const {
1562 if (!isShiftExtend())
1563 return false;
1564
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1568 ST == AArch64_AM::MSL);
1569 }
1570
1571 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1572 if (Kind != k_FPImm)
1574
1575 if (getFPImmIsExact()) {
1576 // Lookup the immediate from table of supported immediates.
1577 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1578 assert(Desc && "Unknown enum value");
1579
1580 // Calculate its FP value.
1581 APFloat RealVal(APFloat::IEEEdouble());
1582 auto StatusOrErr =
1583 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1584 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1585 llvm_unreachable("FP immediate is not exact");
1586
1587 if (getFPImm().bitwiseIsEqual(RealVal))
1589 }
1590
1592 }
1593
1594 template <unsigned ImmA, unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm() const {
1596 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1597 if ((Res = isExactFPImm<ImmA>()))
1599 if ((Res = isExactFPImm<ImmB>()))
1601 return Res;
1602 }
1603
1604 bool isExtend() const {
1605 if (!isShiftExtend())
1606 return false;
1607
1608 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1610 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1611 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1612 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1613 ET == AArch64_AM::LSL) &&
1614 getShiftExtendAmount() <= 4;
1615 }
1616
1617 bool isExtend64() const {
1618 if (!isExtend())
1619 return false;
1620 // Make sure the extend expects a 32-bit source register.
1621 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1622 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1623 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1624 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1625 }
1626
1627 bool isExtendLSL64() const {
1628 if (!isExtend())
1629 return false;
1630 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1631 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1632 ET == AArch64_AM::LSL) &&
1633 getShiftExtendAmount() <= 4;
1634 }
1635
1636 bool isLSLImm3Shift() const {
1637 if (!isShiftExtend())
1638 return false;
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1641 }
1642
1643 template<int Width> bool isMemXExtend() const {
1644 if (!isExtend())
1645 return false;
1646 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1647 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1648 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1649 getShiftExtendAmount() == 0);
1650 }
1651
1652 template<int Width> bool isMemWExtend() const {
1653 if (!isExtend())
1654 return false;
1655 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1656 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1657 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1658 getShiftExtendAmount() == 0);
1659 }
1660
1661 template <unsigned width>
1662 bool isArithmeticShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // An arithmetic shifter is LSL, LSR, or ASR.
1667 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1668 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1669 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1670 }
1671
1672 template <unsigned width>
1673 bool isLogicalShifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A logical shifter is LSL, LSR, ASR or ROR.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1680 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1681 getShiftExtendAmount() < width;
1682 }
1683
1684 bool isMovImm32Shifter() const {
1685 if (!isShifter())
1686 return false;
1687
1688 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1689 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1690 if (ST != AArch64_AM::LSL)
1691 return false;
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1694 }
1695
1696 bool isMovImm64Shifter() const {
1697 if (!isShifter())
1698 return false;
1699
1700 // A MOVi shifter is LSL of 0 or 16.
1701 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1702 if (ST != AArch64_AM::LSL)
1703 return false;
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1706 }
1707
1708 bool isLogicalVecShifter() const {
1709 if (!isShifter())
1710 return false;
1711
1712 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1713 unsigned Shift = getShiftExtendAmount();
1714 return getShiftExtendType() == AArch64_AM::LSL &&
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1716 }
1717
1718 bool isLogicalVecHalfWordShifter() const {
1719 if (!isLogicalVecShifter())
1720 return false;
1721
1722 // A logical vector shifter is a left shift by 0 or 8.
1723 unsigned Shift = getShiftExtendAmount();
1724 return getShiftExtendType() == AArch64_AM::LSL &&
1725 (Shift == 0 || Shift == 8);
1726 }
1727
1728 bool isMoveVecShifter() const {
1729 if (!isShiftExtend())
1730 return false;
1731
1732 // A logical vector shifter is a left shift by 8 or 16.
1733 unsigned Shift = getShiftExtendAmount();
1734 return getShiftExtendType() == AArch64_AM::MSL &&
1735 (Shift == 8 || Shift == 16);
1736 }
1737
1738 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1739 // to LDUR/STUR when the offset is not legal for the former but is for
1740 // the latter. As such, in addition to checking for being a legal unscaled
1741 // address, also check that it is not a legal scaled address. This avoids
1742 // ambiguity in the matcher.
1743 template<int Width>
1744 bool isSImm9OffsetFB() const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1746 }
1747
1748 bool isAdrpLabel() const {
1749 // Validation was handled during parsing, so we just verify that
1750 // something didn't go haywire.
1751 if (!isImm())
1752 return false;
1753
1754 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1755 int64_t Val = CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool isAdrLabel() const {
1765 // Validation was handled during parsing, so we just verify that
1766 // something didn't go haywire.
1767 if (!isImm())
1768 return false;
1769
1770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1771 int64_t Val = CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <= Max;
1775 }
1776
1777 return true;
1778 }
1779
1780 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand() const {
1782 if (!isMatrix())
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1789 }
1790
1791 bool isPAuthPCRelLabel16Operand() const {
1792 // PAuth PCRel16 operands are similar to regular branch targets, but only
1793 // negative values are allowed for concrete immediates as signing instr
1794 // should be in a lower address.
1795 if (!isImm())
1796 return false;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1798 if (!MCE)
1799 return true;
1800 int64_t Val = MCE->getValue();
1801 if (Val & 0b11)
1802 return false;
1803 return (Val <= 0) && (Val > -(1 << 18));
1804 }
1805
1806 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1807 // Add as immediates when possible. Null MCExpr = 0.
1808 if (!Expr)
1810 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1811 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1812 else
1814 }
1815
1816 void addRegOperands(MCInst &Inst, unsigned N) const {
1817 assert(N == 1 && "Invalid number of operands!");
1819 }
1820
1821 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1822 assert(N == 1 && "Invalid number of operands!");
1823 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1824 }
1825
1826 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 assert(
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1830
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1832 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1834
1836 }
1837
1838 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1842
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1844 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1846
1848 }
1849
1850 template <int Width>
1851 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1852 unsigned Base;
1853 switch (Width) {
1854 case 8: Base = AArch64::B0; break;
1855 case 16: Base = AArch64::H0; break;
1856 case 32: Base = AArch64::S0; break;
1857 case 64: Base = AArch64::D0; break;
1858 case 128: Base = AArch64::Q0; break;
1859 default:
1860 llvm_unreachable("Unsupported width");
1861 }
1862 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1863 }
1864
1865 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 MCRegister Reg = getReg();
1868 // Normalise to PPR
1869 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1870 Reg = Reg - AArch64::PN0 + AArch64::P0;
1872 }
1873
1874 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 Inst.addOperand(
1877 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1878 }
1879
1880 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1882 assert(
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1884 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1885 }
1886
1887 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!");
1889 assert(
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1892 }
1893
1894 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1897 }
1898
1899 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1902 }
1903
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1909 };
1910
1911 template <VecListIndexType RegTy, unsigned NumRegs,
1912 bool IsConsecutive = false>
1913 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1914 assert(N == 1 && "Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1918 /* DReg */ { AArch64::Q0,
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 /* QReg */ { AArch64::Q0,
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 /* ZReg */ { AArch64::Z0,
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 /* PReg */ { AArch64::P0,
1928 AArch64::P0, AArch64::P0_P1 }
1929 };
1930
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1933
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1936
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1939 FirstRegs[(unsigned)RegTy][0]));
1940 }
1941
1942 template <unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1946
1947 switch (NumRegs) {
1948 case 2:
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1953 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 } else {
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1958 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1959 }
1960 break;
1961 case 4:
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1966 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 } else {
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1971 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1972 }
1973 break;
1974 default:
1975 llvm_unreachable("Unsupported number of registers for strided vec list");
1976 }
1977 }
1978
1979 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF && "Invalid mask!");
1983 Inst.addOperand(MCOperand::createImm(RegMask));
1984 }
1985
1986 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1989 }
1990
1991 template <unsigned ImmIs0, unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1995 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1996 }
1997
1998 void addImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 // If this is a pageoff symrefexpr with an addend, adjust the addend
2001 // to be only the page-offset portion. Otherwise, just add the expr
2002 // as-is.
2003 addExpr(Inst, getImm());
2004 }
2005
2006 template <int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 2 && "Invalid number of operands!");
2009 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2010 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
2011 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2012 } else if (isShiftedImm()) {
2013 addExpr(Inst, getShiftedImmVal());
2014 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
2015 } else {
2016 addExpr(Inst, getImm());
2018 }
2019 }
2020
2021 template <int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 2 && "Invalid number of operands!");
2024 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2025 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2026 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2027 } else
2028 llvm_unreachable("Not a shifted negative immediate");
2029 }
2030
2031 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2034 }
2035
2036 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2039 if (!MCE)
2040 addExpr(Inst, getImm());
2041 else
2042 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2043 }
2044
2045 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2046 addImmOperands(Inst, N);
2047 }
2048
2049 template<int Scale>
2050 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2053
2054 if (!MCE) {
2056 return;
2057 }
2058 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2059 }
2060
2061 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2065 }
2066
2067 template <int Scale>
2068 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2071 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2072 }
2073
2074 template <int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2078 }
2079
2080 template <typename T>
2081 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2084 std::make_unsigned_t<T> Val = MCE->getValue();
2085 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2086 Inst.addOperand(MCOperand::createImm(encoding));
2087 }
2088
2089 template <typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2094 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2095 Inst.addOperand(MCOperand::createImm(encoding));
2096 }
2097
2098 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!");
2100 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2101 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2102 Inst.addOperand(MCOperand::createImm(encoding));
2103 }
2104
2105 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2120 // PC-relative operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2130 }
2131
2132 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2133 // Branch operands don't encode the low bits, so shift them off
2134 // here. If it's a label, however, just put it on directly as there's
2135 // not enough information now to do anything.
2136 assert(N == 1 && "Invalid number of operands!");
2137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2138 if (!MCE) {
2139 addExpr(Inst, getImm());
2140 return;
2141 }
2142 assert(MCE && "Invalid constant immediate operand!");
2143 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2144 }
2145
2146 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2147 // Branch operands don't encode the low bits, so shift them off
2148 // here. If it's a label, however, just put it on directly as there's
2149 // not enough information now to do anything.
2150 assert(N == 1 && "Invalid number of operands!");
2151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2152 if (!MCE) {
2153 addExpr(Inst, getImm());
2154 return;
2155 }
2156 assert(MCE && "Invalid constant immediate operand!");
2157 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2158 }
2159
2160 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2161 // Branch operands don't encode the low bits, so shift them off
2162 // here. If it's a label, however, just put it on directly as there's
2163 // not enough information now to do anything.
2164 assert(N == 1 && "Invalid number of operands!");
2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2166 if (!MCE) {
2167 addExpr(Inst, getImm());
2168 return;
2169 }
2170 assert(MCE && "Invalid constant immediate operand!");
2171 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2172 }
2173
2174 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2177 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2178 }
2179
2180 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(MCOperand::createImm(getBarrier()));
2183 }
2184
2185 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(MCOperand::createImm(getBarrier()));
2188 }
2189
2190 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192
2193 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2194 }
2195
2196 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198
2199 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2200 }
2201
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204
2205 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2206 }
2207
2208 void addSVCROperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210
2211 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2212 }
2213
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2218 }
2219
2220 void addSysCROperands(MCInst &Inst, unsigned N) const {
2221 assert(N == 1 && "Invalid number of operands!");
2222 Inst.addOperand(MCOperand::createImm(getSysCR()));
2223 }
2224
2225 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 1 && "Invalid number of operands!");
2227 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2228 }
2229
2230 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2231 assert(N == 1 && "Invalid number of operands!");
2232 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2233 }
2234
2235 void addPHintOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 Inst.addOperand(MCOperand::createImm(getPHint()));
2238 }
2239
2240 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2243 }
2244
2245 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2246 assert(N == 1 && "Invalid number of operands!");
2247 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2248 }
2249
2250 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 1 && "Invalid number of operands!");
2252 Inst.addOperand(MCOperand::createImm(getTIndexHint()));
2253 }
2254
2255 void addShifterOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 unsigned Imm =
2258 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2260 }
2261
2262 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2263 assert(N == 1 && "Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2266 }
2267
2268 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2269 assert(N == 1 && "Invalid number of operands!");
2270
2271 if (!isScalarReg())
2272 return;
2273
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2275 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2277 if (Reg != AArch64::XZR)
2278 llvm_unreachable("wrong register");
2279
2280 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2281 }
2282
2283 void addExtendOperands(MCInst &Inst, unsigned N) const {
2284 assert(N == 1 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2287 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2289 }
2290
2291 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2294 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2295 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2297 }
2298
2299 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2300 assert(N == 2 && "Invalid number of operands!");
2301 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2302 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2303 Inst.addOperand(MCOperand::createImm(IsSigned));
2304 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2305 }
2306
2307 // For 8-bit load/store instructions with a register offset, both the
2308 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2309 // they're disambiguated by whether the shift was explicit or implicit rather
2310 // than its size.
2311 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2312 assert(N == 2 && "Invalid number of operands!");
2313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2314 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2315 Inst.addOperand(MCOperand::createImm(IsSigned));
2316 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2317 }
2318
2319 template<int Shift>
2320 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322
2323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2324 if (CE) {
2325 uint64_t Value = CE->getValue();
2326 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2327 } else {
2328 addExpr(Inst, getImm());
2329 }
2330 }
2331
2332 template<int Shift>
2333 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2334 assert(N == 1 && "Invalid number of operands!");
2335
2336 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2337 uint64_t Value = CE->getValue();
2338 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2339 }
2340
2341 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2342 assert(N == 1 && "Invalid number of operands!");
2343 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2344 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2345 }
2346
2347 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2348 assert(N == 1 && "Invalid number of operands!");
2349 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2350 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2351 }
2352
2353 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2354
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2357 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2361 Op->StartLoc = S;
2362 Op->EndLoc = S;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2370 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2371 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2372 Op->Reg.Reg = Reg;
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2386 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2387 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2393 HasExplicitAmount);
2394 Op->Reg.ElementWidth = ElementWidth;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2400 unsigned NumElements, unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2403 Op->VectorList.Reg = Reg;
2404 Op->VectorList.Count = Count;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2409 Op->StartLoc = S;
2410 Op->EndLoc = E;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2417 Op->VectorIndex.Val = Idx;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2427 Op->StartLoc = S;
2428 Op->EndLoc = E;
2429 return Op;
2430 }
2431
2432 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 RegMap = {
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2450 };
2451
2452 if (ElementWidth == 64)
2453 OutRegs.insert(Reg);
2454 else {
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2456 assert(!Regs.empty() && "Invalid tile or element width!");
2457 OutRegs.insert_range(Regs);
2458 }
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2462 SMLoc E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2464 Op->Imm.Val = Val;
2465 Op->StartLoc = S;
2466 Op->EndLoc = E;
2467 return Op;
2468 }
2469
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2471 unsigned ShiftAmount,
2472 SMLoc S, SMLoc E,
2473 MCContext &Ctx) {
2474 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2477 Op->StartLoc = S;
2478 Op->EndLoc = E;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2483 unsigned Last, SMLoc S,
2484 SMLoc E,
2485 MCContext &Ctx) {
2486 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2487 Op->ImmRange.First = First;
2488 Op->ImmRange.Last = Last;
2489 Op->EndLoc = E;
2490 return Op;
2491 }
2492
2493 static std::unique_ptr<AArch64Operand>
2494 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2495 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2496 Op->CondCode.Code = Code;
2497 Op->StartLoc = S;
2498 Op->EndLoc = E;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2505 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2506 Op->FPImm.IsExact = IsExact;
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2522 Op->StartLoc = S;
2523 Op->EndLoc = S;
2524 return Op;
2525 }
2526
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2528 uint32_t MRSReg,
2529 uint32_t MSRReg,
2530 uint32_t PStateField,
2531 MCContext &Ctx) {
2532 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2538 Op->StartLoc = S;
2539 Op->EndLoc = S;
2540 return Op;
2541 }
2542
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2549 Op->StartLoc = S;
2550 Op->EndLoc = S;
2551 return Op;
2552 }
2553
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2555 SMLoc E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2557 Op->SysCRImm.Val = Val;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2564 StringRef Str,
2565 SMLoc S,
2566 MCContext &Ctx) {
2567 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2571 Op->StartLoc = S;
2572 Op->EndLoc = S;
2573 return Op;
2574 }
2575
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2577 StringRef Str,
2578 SMLoc S,
2579 MCContext &Ctx) {
2580 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2584 Op->StartLoc = S;
2585 Op->EndLoc = S;
2586 return Op;
2587 }
2588
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2590 StringRef Str,
2591 SMLoc S,
2592 MCContext &Ctx) {
2593 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2597 Op->StartLoc = S;
2598 Op->EndLoc = S;
2599 return Op;
2600 }
2601
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2619 Op->StartLoc = S;
2620 Op->EndLoc = S;
2621 return Op;
2622 }
2623
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2628 Op->MatrixReg.Reg = Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2631 Op->StartLoc = S;
2632 Op->EndLoc = E;
2633 return Op;
2634 }
2635
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2642 Op->StartLoc = S;
2643 Op->EndLoc = S;
2644 return Op;
2645 }
2646
2647 static std::unique_ptr<AArch64Operand>
2648 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2649 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2654 Op->StartLoc = S;
2655 Op->EndLoc = E;
2656 return Op;
2657 }
2658};
2659
2660} // end anonymous namespace.
2661
2662void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2663 switch (Kind) {
2664 case k_FPImm:
2665 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2667 OS << " (inexact)";
2668 OS << ">";
2669 break;
2670 case k_Barrier: {
2671 StringRef Name = getBarrierName();
2672 if (!Name.empty())
2673 OS << "<barrier " << Name << ">";
2674 else
2675 OS << "<barrier invalid #" << getBarrier() << ">";
2676 break;
2677 }
2678 case k_Immediate:
2679 MAI.printExpr(OS, *getImm());
2680 break;
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS << "<shiftedimm ";
2684 MAI.printExpr(OS, *getShiftedImmVal());
2685 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2686 break;
2687 }
2688 case k_ImmRange: {
2689 OS << "<immrange ";
2690 OS << getFirstImmVal();
2691 OS << ":" << getLastImmVal() << ">";
2692 break;
2693 }
2694 case k_CondCode:
2695 OS << "<condcode " << getCondCode() << ">";
2696 break;
2697 case k_VectorList: {
2698 OS << "<vectorlist ";
2699 MCRegister Reg = getVectorListStart();
2700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2701 OS << Reg.id() + i * getVectorListStride() << " ";
2702 OS << ">";
2703 break;
2704 }
2705 case k_VectorIndex:
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2707 break;
2708 case k_SysReg:
2709 OS << "<sysreg: " << getSysReg() << '>';
2710 break;
2711 case k_Token:
2712 OS << "'" << getToken() << "'";
2713 break;
2714 case k_SysCR:
2715 OS << "c" << getSysCR();
2716 break;
2717 case k_Prefetch: {
2718 StringRef Name = getPrefetchName();
2719 if (!Name.empty())
2720 OS << "<prfop " << Name << ">";
2721 else
2722 OS << "<prfop invalid #" << getPrefetch() << ">";
2723 break;
2724 }
2725 case k_PSBHint:
2726 OS << getPSBHintName();
2727 break;
2728 case k_PHint:
2729 OS << getPHintName();
2730 break;
2731 case k_BTIHint:
2732 OS << getBTIHintName();
2733 break;
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2736 break;
2737 case k_TIndexHint:
2738 OS << getTIndexHintName();
2739 break;
2740 case k_MatrixRegister:
2741 OS << "<matrix " << getMatrixReg().id() << ">";
2742 break;
2743 case k_MatrixTileList: {
2744 OS << "<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (unsigned I = MaxBits; I > 0; --I)
2748 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2749 OS << '>';
2750 break;
2751 }
2752 case k_SVCR: {
2753 OS << getSVCR();
2754 break;
2755 }
2756 case k_Register:
2757 OS << "<register " << getReg().id() << ">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2759 break;
2760 [[fallthrough]];
2761 case k_ShiftExtend:
2762 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2765 OS << "<imp>";
2766 OS << '>';
2767 break;
2768 }
2769}
2770
2771/// @name Auto-generated Match Functions
2772/// {
2773
2775
2776/// }
2777
2778static unsigned MatchNeonVectorRegName(StringRef Name) {
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case("v0", AArch64::Q0)
2781 .Case("v1", AArch64::Q1)
2782 .Case("v2", AArch64::Q2)
2783 .Case("v3", AArch64::Q3)
2784 .Case("v4", AArch64::Q4)
2785 .Case("v5", AArch64::Q5)
2786 .Case("v6", AArch64::Q6)
2787 .Case("v7", AArch64::Q7)
2788 .Case("v8", AArch64::Q8)
2789 .Case("v9", AArch64::Q9)
2790 .Case("v10", AArch64::Q10)
2791 .Case("v11", AArch64::Q11)
2792 .Case("v12", AArch64::Q12)
2793 .Case("v13", AArch64::Q13)
2794 .Case("v14", AArch64::Q14)
2795 .Case("v15", AArch64::Q15)
2796 .Case("v16", AArch64::Q16)
2797 .Case("v17", AArch64::Q17)
2798 .Case("v18", AArch64::Q18)
2799 .Case("v19", AArch64::Q19)
2800 .Case("v20", AArch64::Q20)
2801 .Case("v21", AArch64::Q21)
2802 .Case("v22", AArch64::Q22)
2803 .Case("v23", AArch64::Q23)
2804 .Case("v24", AArch64::Q24)
2805 .Case("v25", AArch64::Q25)
2806 .Case("v26", AArch64::Q26)
2807 .Case("v27", AArch64::Q27)
2808 .Case("v28", AArch64::Q28)
2809 .Case("v29", AArch64::Q29)
2810 .Case("v30", AArch64::Q30)
2811 .Case("v31", AArch64::Q31)
2812 .Default(0);
2813}
2814
2815/// Returns an optional pair of (#elements, element-width) if Suffix
2816/// is a valid vector kind. Where the number of elements in a vector
2817/// or the vector width is implicit or explicitly unknown (but still a
2818/// valid suffix kind), 0 is used.
2819static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2822
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2826 .Case("", {0, 0})
2827 .Case(".1d", {1, 64})
2828 .Case(".1q", {1, 128})
2829 // '.2h' needed for fp16 scalar pairwise reductions
2830 .Case(".2h", {2, 16})
2831 .Case(".2b", {2, 8})
2832 .Case(".2s", {2, 32})
2833 .Case(".2d", {2, 64})
2834 // '.4b' is another special case for the ARMv8.2a dot product
2835 // operand
2836 .Case(".4b", {4, 8})
2837 .Case(".4h", {4, 16})
2838 .Case(".4s", {4, 32})
2839 .Case(".8b", {8, 8})
2840 .Case(".8h", {8, 16})
2841 .Case(".16b", {16, 8})
2842 // Accept the width neutral ones, too, for verbose syntax. If
2843 // those aren't used in the right places, the token operand won't
2844 // match so all will work out.
2845 .Case(".b", {0, 8})
2846 .Case(".h", {0, 16})
2847 .Case(".s", {0, 32})
2848 .Case(".d", {0, 64})
2849 .Default({-1, -1});
2850 break;
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2856 .Case("", {0, 0})
2857 .Case(".b", {0, 8})
2858 .Case(".h", {0, 16})
2859 .Case(".s", {0, 32})
2860 .Case(".d", {0, 64})
2861 .Case(".q", {0, 128})
2862 .Default({-1, -1});
2863 break;
2864 default:
2865 llvm_unreachable("Unsupported RegKind");
2866 }
2867
2868 if (Res == std::make_pair(-1, -1))
2869 return std::nullopt;
2870
2871 return std::optional<std::pair<int, int>>(Res);
2872}
2873
2874static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2875 return parseVectorKind(Suffix, VectorKind).has_value();
2876}
2877
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case("z0", AArch64::Z0)
2881 .Case("z1", AArch64::Z1)
2882 .Case("z2", AArch64::Z2)
2883 .Case("z3", AArch64::Z3)
2884 .Case("z4", AArch64::Z4)
2885 .Case("z5", AArch64::Z5)
2886 .Case("z6", AArch64::Z6)
2887 .Case("z7", AArch64::Z7)
2888 .Case("z8", AArch64::Z8)
2889 .Case("z9", AArch64::Z9)
2890 .Case("z10", AArch64::Z10)
2891 .Case("z11", AArch64::Z11)
2892 .Case("z12", AArch64::Z12)
2893 .Case("z13", AArch64::Z13)
2894 .Case("z14", AArch64::Z14)
2895 .Case("z15", AArch64::Z15)
2896 .Case("z16", AArch64::Z16)
2897 .Case("z17", AArch64::Z17)
2898 .Case("z18", AArch64::Z18)
2899 .Case("z19", AArch64::Z19)
2900 .Case("z20", AArch64::Z20)
2901 .Case("z21", AArch64::Z21)
2902 .Case("z22", AArch64::Z22)
2903 .Case("z23", AArch64::Z23)
2904 .Case("z24", AArch64::Z24)
2905 .Case("z25", AArch64::Z25)
2906 .Case("z26", AArch64::Z26)
2907 .Case("z27", AArch64::Z27)
2908 .Case("z28", AArch64::Z28)
2909 .Case("z29", AArch64::Z29)
2910 .Case("z30", AArch64::Z30)
2911 .Case("z31", AArch64::Z31)
2912 .Default(0);
2913}
2914
2916 return StringSwitch<unsigned>(Name.lower())
2917 .Case("p0", AArch64::P0)
2918 .Case("p1", AArch64::P1)
2919 .Case("p2", AArch64::P2)
2920 .Case("p3", AArch64::P3)
2921 .Case("p4", AArch64::P4)
2922 .Case("p5", AArch64::P5)
2923 .Case("p6", AArch64::P6)
2924 .Case("p7", AArch64::P7)
2925 .Case("p8", AArch64::P8)
2926 .Case("p9", AArch64::P9)
2927 .Case("p10", AArch64::P10)
2928 .Case("p11", AArch64::P11)
2929 .Case("p12", AArch64::P12)
2930 .Case("p13", AArch64::P13)
2931 .Case("p14", AArch64::P14)
2932 .Case("p15", AArch64::P15)
2933 .Default(0);
2934}
2935
2937 return StringSwitch<unsigned>(Name.lower())
2938 .Case("pn0", AArch64::PN0)
2939 .Case("pn1", AArch64::PN1)
2940 .Case("pn2", AArch64::PN2)
2941 .Case("pn3", AArch64::PN3)
2942 .Case("pn4", AArch64::PN4)
2943 .Case("pn5", AArch64::PN5)
2944 .Case("pn6", AArch64::PN6)
2945 .Case("pn7", AArch64::PN7)
2946 .Case("pn8", AArch64::PN8)
2947 .Case("pn9", AArch64::PN9)
2948 .Case("pn10", AArch64::PN10)
2949 .Case("pn11", AArch64::PN11)
2950 .Case("pn12", AArch64::PN12)
2951 .Case("pn13", AArch64::PN13)
2952 .Case("pn14", AArch64::PN14)
2953 .Case("pn15", AArch64::PN15)
2954 .Default(0);
2955}
2956
2958 return StringSwitch<unsigned>(Name.lower())
2959 .Case("za0.d", AArch64::ZAD0)
2960 .Case("za1.d", AArch64::ZAD1)
2961 .Case("za2.d", AArch64::ZAD2)
2962 .Case("za3.d", AArch64::ZAD3)
2963 .Case("za4.d", AArch64::ZAD4)
2964 .Case("za5.d", AArch64::ZAD5)
2965 .Case("za6.d", AArch64::ZAD6)
2966 .Case("za7.d", AArch64::ZAD7)
2967 .Case("za0.s", AArch64::ZAS0)
2968 .Case("za1.s", AArch64::ZAS1)
2969 .Case("za2.s", AArch64::ZAS2)
2970 .Case("za3.s", AArch64::ZAS3)
2971 .Case("za0.h", AArch64::ZAH0)
2972 .Case("za1.h", AArch64::ZAH1)
2973 .Case("za0.b", AArch64::ZAB0)
2974 .Default(0);
2975}
2976
2977static unsigned matchMatrixRegName(StringRef Name) {
2978 return StringSwitch<unsigned>(Name.lower())
2979 .Case("za", AArch64::ZA)
2980 .Case("za0.q", AArch64::ZAQ0)
2981 .Case("za1.q", AArch64::ZAQ1)
2982 .Case("za2.q", AArch64::ZAQ2)
2983 .Case("za3.q", AArch64::ZAQ3)
2984 .Case("za4.q", AArch64::ZAQ4)
2985 .Case("za5.q", AArch64::ZAQ5)
2986 .Case("za6.q", AArch64::ZAQ6)
2987 .Case("za7.q", AArch64::ZAQ7)
2988 .Case("za8.q", AArch64::ZAQ8)
2989 .Case("za9.q", AArch64::ZAQ9)
2990 .Case("za10.q", AArch64::ZAQ10)
2991 .Case("za11.q", AArch64::ZAQ11)
2992 .Case("za12.q", AArch64::ZAQ12)
2993 .Case("za13.q", AArch64::ZAQ13)
2994 .Case("za14.q", AArch64::ZAQ14)
2995 .Case("za15.q", AArch64::ZAQ15)
2996 .Case("za0.d", AArch64::ZAD0)
2997 .Case("za1.d", AArch64::ZAD1)
2998 .Case("za2.d", AArch64::ZAD2)
2999 .Case("za3.d", AArch64::ZAD3)
3000 .Case("za4.d", AArch64::ZAD4)
3001 .Case("za5.d", AArch64::ZAD5)
3002 .Case("za6.d", AArch64::ZAD6)
3003 .Case("za7.d", AArch64::ZAD7)
3004 .Case("za0.s", AArch64::ZAS0)
3005 .Case("za1.s", AArch64::ZAS1)
3006 .Case("za2.s", AArch64::ZAS2)
3007 .Case("za3.s", AArch64::ZAS3)
3008 .Case("za0.h", AArch64::ZAH0)
3009 .Case("za1.h", AArch64::ZAH1)
3010 .Case("za0.b", AArch64::ZAB0)
3011 .Case("za0h.q", AArch64::ZAQ0)
3012 .Case("za1h.q", AArch64::ZAQ1)
3013 .Case("za2h.q", AArch64::ZAQ2)
3014 .Case("za3h.q", AArch64::ZAQ3)
3015 .Case("za4h.q", AArch64::ZAQ4)
3016 .Case("za5h.q", AArch64::ZAQ5)
3017 .Case("za6h.q", AArch64::ZAQ6)
3018 .Case("za7h.q", AArch64::ZAQ7)
3019 .Case("za8h.q", AArch64::ZAQ8)
3020 .Case("za9h.q", AArch64::ZAQ9)
3021 .Case("za10h.q", AArch64::ZAQ10)
3022 .Case("za11h.q", AArch64::ZAQ11)
3023 .Case("za12h.q", AArch64::ZAQ12)
3024 .Case("za13h.q", AArch64::ZAQ13)
3025 .Case("za14h.q", AArch64::ZAQ14)
3026 .Case("za15h.q", AArch64::ZAQ15)
3027 .Case("za0h.d", AArch64::ZAD0)
3028 .Case("za1h.d", AArch64::ZAD1)
3029 .Case("za2h.d", AArch64::ZAD2)
3030 .Case("za3h.d", AArch64::ZAD3)
3031 .Case("za4h.d", AArch64::ZAD4)
3032 .Case("za5h.d", AArch64::ZAD5)
3033 .Case("za6h.d", AArch64::ZAD6)
3034 .Case("za7h.d", AArch64::ZAD7)
3035 .Case("za0h.s", AArch64::ZAS0)
3036 .Case("za1h.s", AArch64::ZAS1)
3037 .Case("za2h.s", AArch64::ZAS2)
3038 .Case("za3h.s", AArch64::ZAS3)
3039 .Case("za0h.h", AArch64::ZAH0)
3040 .Case("za1h.h", AArch64::ZAH1)
3041 .Case("za0h.b", AArch64::ZAB0)
3042 .Case("za0v.q", AArch64::ZAQ0)
3043 .Case("za1v.q", AArch64::ZAQ1)
3044 .Case("za2v.q", AArch64::ZAQ2)
3045 .Case("za3v.q", AArch64::ZAQ3)
3046 .Case("za4v.q", AArch64::ZAQ4)
3047 .Case("za5v.q", AArch64::ZAQ5)
3048 .Case("za6v.q", AArch64::ZAQ6)
3049 .Case("za7v.q", AArch64::ZAQ7)
3050 .Case("za8v.q", AArch64::ZAQ8)
3051 .Case("za9v.q", AArch64::ZAQ9)
3052 .Case("za10v.q", AArch64::ZAQ10)
3053 .Case("za11v.q", AArch64::ZAQ11)
3054 .Case("za12v.q", AArch64::ZAQ12)
3055 .Case("za13v.q", AArch64::ZAQ13)
3056 .Case("za14v.q", AArch64::ZAQ14)
3057 .Case("za15v.q", AArch64::ZAQ15)
3058 .Case("za0v.d", AArch64::ZAD0)
3059 .Case("za1v.d", AArch64::ZAD1)
3060 .Case("za2v.d", AArch64::ZAD2)
3061 .Case("za3v.d", AArch64::ZAD3)
3062 .Case("za4v.d", AArch64::ZAD4)
3063 .Case("za5v.d", AArch64::ZAD5)
3064 .Case("za6v.d", AArch64::ZAD6)
3065 .Case("za7v.d", AArch64::ZAD7)
3066 .Case("za0v.s", AArch64::ZAS0)
3067 .Case("za1v.s", AArch64::ZAS1)
3068 .Case("za2v.s", AArch64::ZAS2)
3069 .Case("za3v.s", AArch64::ZAS3)
3070 .Case("za0v.h", AArch64::ZAH0)
3071 .Case("za1v.h", AArch64::ZAH1)
3072 .Case("za0v.b", AArch64::ZAB0)
3073 .Default(0);
3074}
3075
3076bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3077 SMLoc &EndLoc) {
3078 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3079}
3080
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3082 SMLoc &EndLoc) {
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(Reg);
3085 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3086 return Res;
3087}
3088
3089// Matches a register name or register alias previously defined by '.req'
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 RegKind Kind) {
3092 MCRegister Reg = MCRegister();
3093 if ((Reg = matchSVEDataVectorRegName(Name)))
3094 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3095
3096 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3097 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3098
3100 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3101
3102 if ((Reg = MatchNeonVectorRegName(Name)))
3103 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3104
3105 if ((Reg = matchMatrixRegName(Name)))
3106 return Kind == RegKind::Matrix ? Reg : MCRegister();
3107
3108 if (Name.equals_insensitive("zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3110
3111 // The parsed register must be of RegKind Scalar
3112 if ((Reg = MatchRegisterName(Name)))
3113 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3114
3115 if (!Reg) {
3116 // Handle a few common aliases of registers.
3117 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3118 .Case("fp", AArch64::FP)
3119 .Case("lr", AArch64::LR)
3120 .Case("x31", AArch64::XZR)
3121 .Case("w31", AArch64::WZR)
3122 .Default(0))
3123 return Kind == RegKind::Scalar ? Reg : MCRegister();
3124
3125 // Check for aliases registered via .req. Canonicalize to lower case.
3126 // That's more consistent since register names are case insensitive, and
3127 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3128 auto Entry = RegisterReqs.find(Name.lower());
3129 if (Entry == RegisterReqs.end())
3130 return MCRegister();
3131
3132 // set Reg if the match is the right kind of register
3133 if (Kind == Entry->getValue().first)
3134 Reg = Entry->getValue().second;
3135 }
3136 return Reg;
3137}
3138
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 switch (K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3144 return 32;
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3148 return 16;
3149 case RegKind::LookupTable:
3150 return 1;
3151 }
3152 llvm_unreachable("Unsupported RegKind");
3153}
3154
3155/// tryParseScalarRegister - Try to parse a register name. The token must be an
3156/// Identifier when called, and if it is a register name the token is eaten and
3157/// the register is added to the operand list.
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3160 if (Tok.isNot(AsmToken::Identifier))
3161 return ParseStatus::NoMatch;
3162
3163 std::string lowerCase = Tok.getString().lower();
3164 MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3165 if (!Reg)
3166 return ParseStatus::NoMatch;
3167
3168 RegNum = Reg;
3169 Lex(); // Eat identifier token.
3170 return ParseStatus::Success;
3171}
3172
3173/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3175 SMLoc S = getLoc();
3176
3177 if (getTok().isNot(AsmToken::Identifier))
3178 return Error(S, "Expected cN operand where 0 <= N <= 15");
3179
3180 StringRef Tok = getTok().getIdentifier();
3181 if (Tok[0] != 'c' && Tok[0] != 'C')
3182 return Error(S, "Expected cN operand where 0 <= N <= 15");
3183
3184 uint32_t CRNum;
3185 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3186 if (BadNum || CRNum > 15)
3187 return Error(S, "Expected cN operand where 0 <= N <= 15");
3188
3189 Lex(); // Eat identifier token.
3190 Operands.push_back(
3191 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3192 return ParseStatus::Success;
3193}
3194
3195// Either an identifier for named values or a 6-bit immediate.
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3197 SMLoc S = getLoc();
3198 const AsmToken &Tok = getTok();
3199
3200 unsigned MaxVal = 63;
3201
3202 // Immediate case, with optional leading hash:
3203 if (parseOptionalToken(AsmToken::Hash) ||
3204 Tok.is(AsmToken::Integer)) {
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(ImmVal))
3207 return ParseStatus::Failure;
3208
3209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3210 if (!MCE)
3211 return TokError("immediate value expected for prefetch operand");
3212 unsigned prfop = MCE->getValue();
3213 if (prfop > MaxVal)
3214 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3215 "] expected");
3216
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3218 Operands.push_back(AArch64Operand::CreatePrefetch(
3219 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3220 return ParseStatus::Success;
3221 }
3222
3223 if (Tok.isNot(AsmToken::Identifier))
3224 return TokError("prefetch hint expected");
3225
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3227 if (!RPRFM)
3228 return TokError("prefetch hint expected");
3229
3230 Operands.push_back(AArch64Operand::CreatePrefetch(
3231 RPRFM->Encoding, Tok.getString(), S, getContext()));
3232 Lex(); // Eat identifier token.
3233 return ParseStatus::Success;
3234}
3235
3236/// tryParsePrefetch - Try to parse a prefetch operand.
3237template <bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3239 SMLoc S = getLoc();
3240 const AsmToken &Tok = getTok();
3241
3242 auto LookupByName = [](StringRef N) {
3243 if (IsSVEPrefetch) {
3244 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3249 };
3250
3251 auto LookupByEncoding = [](unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3254 return std::optional<StringRef>(Res->Name);
3255 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3258 };
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3260
3261 // Either an identifier for named values or a 5-bit immediate.
3262 // Eat optional hash.
3263 if (parseOptionalToken(AsmToken::Hash) ||
3264 Tok.is(AsmToken::Integer)) {
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(ImmVal))
3267 return ParseStatus::Failure;
3268
3269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3270 if (!MCE)
3271 return TokError("immediate value expected for prefetch operand");
3272 unsigned prfop = MCE->getValue();
3273 if (prfop > MaxVal)
3274 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3275 "] expected");
3276
3277 auto PRFM = LookupByEncoding(MCE->getValue());
3278 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3279 S, getContext()));
3280 return ParseStatus::Success;
3281 }
3282
3283 if (Tok.isNot(AsmToken::Identifier))
3284 return TokError("prefetch hint expected");
3285
3286 auto PRFM = LookupByName(Tok.getString());
3287 if (!PRFM)
3288 return TokError("prefetch hint expected");
3289
3290 Operands.push_back(AArch64Operand::CreatePrefetch(
3291 *PRFM, Tok.getString(), S, getContext()));
3292 Lex(); // Eat identifier token.
3293 return ParseStatus::Success;
3294}
3295
3296/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3297ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3298 SMLoc S = getLoc();
3299 const AsmToken &Tok = getTok();
3300 if (Tok.isNot(AsmToken::Identifier))
3301 return TokError("invalid operand for instruction");
3302
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3304 if (!PSB)
3305 return TokError("invalid operand for instruction");
3306
3307 Operands.push_back(AArch64Operand::CreatePSBHint(
3308 PSB->Encoding, Tok.getString(), S, getContext()));
3309 Lex(); // Eat identifier token.
3310 return ParseStatus::Success;
3311}
3312
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3315
3316 MCRegister RegNum;
3317
3318 // The case where xzr, xzr is not present is handled by an InstAlias.
3319
3320 auto RegTok = getTok(); // in case we need to backtrack
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3322 return ParseStatus::NoMatch;
3323
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(RegTok);
3326 return ParseStatus::NoMatch;
3327 }
3328
3329 if (parseComma())
3330 return ParseStatus::Failure;
3331
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError("expected register operand");
3334
3335 if (RegNum != AArch64::XZR)
3336 return TokError("xzr must be followed by xzr");
3337
3338 // We need to push something, since we claim this is an operand in .td.
3339 // See also AArch64AsmParser::parseKeywordOperand.
3340 Operands.push_back(AArch64Operand::CreateReg(
3341 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3342
3343 return ParseStatus::Success;
3344}
3345
3346/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3347ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3348 SMLoc S = getLoc();
3349 const AsmToken &Tok = getTok();
3350 if (Tok.isNot(AsmToken::Identifier))
3351 return TokError("invalid operand for instruction");
3352
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3354 if (!BTI)
3355 return TokError("invalid operand for instruction");
3356
3357 Operands.push_back(AArch64Operand::CreateBTIHint(
3358 BTI->Encoding, Tok.getString(), S, getContext()));
3359 Lex(); // Eat identifier token.
3360 return ParseStatus::Success;
3361}
3362
3363/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3365 SMLoc S = getLoc();
3366 const AsmToken &Tok = getTok();
3367 if (Tok.isNot(AsmToken::Identifier))
3368 return TokError("invalid operand for instruction");
3369
3370 auto CMHPriority =
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3372 if (!CMHPriority)
3373 return TokError("invalid operand for instruction");
3374
3375 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3376 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3377 Lex(); // Eat identifier token.
3378 return ParseStatus::Success;
3379}
3380
3381/// tryParseTIndexHint - Try to parse a TIndex operand
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384 const AsmToken &Tok = getTok();
3385 if (Tok.isNot(AsmToken::Identifier))
3386 return TokError("invalid operand for instruction");
3387
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.getString());
3389 if (!TIndex)
3390 return TokError("invalid operand for instruction");
3391
3392 Operands.push_back(AArch64Operand::CreateTIndexHint(
3393 TIndex->Encoding, Tok.getString(), S, getContext()));
3394 Lex(); // Eat identifier token.
3395 return ParseStatus::Success;
3396}
3397
3398/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3399/// instruction.
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3401 SMLoc S = getLoc();
3402 const MCExpr *Expr = nullptr;
3403
3404 if (getTok().is(AsmToken::Hash)) {
3405 Lex(); // Eat hash token.
3406 }
3407
3408 if (parseSymbolicImmVal(Expr))
3409 return ParseStatus::Failure;
3410
3411 AArch64::Specifier ELFSpec;
3412 AArch64::Specifier DarwinSpec;
3413 int64_t Addend;
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3415 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3416 // No modifier was specified at all; this is the syntax for an ELF basic
3417 // ADRP relocation (unfortunately).
3418 Expr =
3420 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3421 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3422 Addend != 0) {
3423 return Error(S, "gotpage label reference not allowed an addend");
3424 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3425 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3426 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3427 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3428 ELFSpec != AArch64::S_GOT_PAGE &&
3429 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3430 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3431 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3433 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3434 // The operand must be an @page or @gotpage qualified symbolref.
3435 return Error(S, "page or gotpage label reference expected");
3436 }
3437 }
3438
3439 // We have either a label reference possibly with addend or an immediate. The
3440 // addend is a raw value here. The linker will adjust it to only reference the
3441 // page.
3442 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3443 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3444
3445 return ParseStatus::Success;
3446}
3447
3448/// tryParseAdrLabel - Parse and validate a source label for the ADR
3449/// instruction.
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3451 SMLoc S = getLoc();
3452 const MCExpr *Expr = nullptr;
3453
3454 // Leave anything with a bracket to the default for SVE
3455 if (getTok().is(AsmToken::LBrac))
3456 return ParseStatus::NoMatch;
3457
3458 if (getTok().is(AsmToken::Hash))
3459 Lex(); // Eat hash token.
3460
3461 if (parseSymbolicImmVal(Expr))
3462 return ParseStatus::Failure;
3463
3464 AArch64::Specifier ELFSpec;
3465 AArch64::Specifier DarwinSpec;
3466 int64_t Addend;
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3468 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3469 // No modifier was specified at all; this is the syntax for an ELF basic
3470 // ADR relocation (unfortunately).
3472 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3473 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3474 // adr. It's not actually GOT entry page address but the GOT address
3475 // itself - we just share the same variant kind with :got_auth: operator
3476 // applied for adrp.
3477 // TODO: can we somehow get current TargetMachine object to call
3478 // getCodeModel() on it to ensure we are using tiny code model?
3479 return Error(S, "unexpected adr label");
3480 }
3481 }
3482
3483 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3484 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// tryParseFPImm - A floating point immediate expression operand.
3489template <bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3491 SMLoc S = getLoc();
3492
3493 bool Hash = parseOptionalToken(AsmToken::Hash);
3494
3495 // Handle negation, as that still comes through as a separate token.
3496 bool isNegative = parseOptionalToken(AsmToken::Minus);
3497
3498 const AsmToken &Tok = getTok();
3499 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3500 if (!Hash)
3501 return ParseStatus::NoMatch;
3502 return TokError("invalid floating point immediate");
3503 }
3504
3505 // Parse hexadecimal representation.
3506 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3507 if (Tok.getIntVal() > 255 || isNegative)
3508 return TokError("encoded floating point value out of range");
3509
3511 Operands.push_back(
3512 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3513 } else {
3514 // Parse FP representation.
3515 APFloat RealVal(APFloat::IEEEdouble());
3516 auto StatusOrErr =
3517 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3518 if (errorToBool(StatusOrErr.takeError()))
3519 return TokError("invalid floating point representation");
3520
3521 if (isNegative)
3522 RealVal.changeSign();
3523
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3525 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3526 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3527 } else
3528 Operands.push_back(AArch64Operand::CreateFPImm(
3529 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3530 }
3531
3532 Lex(); // Eat the token.
3533
3534 return ParseStatus::Success;
3535}
3536
3537/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3538/// a shift suffix, for example '#1, lsl #12'.
3539ParseStatus
3540AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3541 SMLoc S = getLoc();
3542
3543 if (getTok().is(AsmToken::Hash))
3544 Lex(); // Eat '#'
3545 else if (getTok().isNot(AsmToken::Integer))
3546 // Operand should start from # or should be integer, emit error otherwise.
3547 return ParseStatus::NoMatch;
3548
3549 if (getTok().is(AsmToken::Integer) &&
3550 getLexer().peekTok().is(AsmToken::Colon))
3551 return tryParseImmRange(Operands);
3552
3553 const MCExpr *Imm = nullptr;
3554 if (parseSymbolicImmVal(Imm))
3555 return ParseStatus::Failure;
3556 else if (getTok().isNot(AsmToken::Comma)) {
3557 Operands.push_back(
3558 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3559 return ParseStatus::Success;
3560 }
3561
3562 // Eat ','
3563 Lex();
3564 StringRef VecGroup;
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 Operands.push_back(
3567 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3568 Operands.push_back(
3569 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3570 return ParseStatus::Success;
3571 }
3572
3573 // The optional operand must be "lsl #N" where N is non-negative.
3574 if (!getTok().is(AsmToken::Identifier) ||
3575 !getTok().getIdentifier().equals_insensitive("lsl"))
3576 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3577
3578 // Eat 'lsl'
3579 Lex();
3580
3581 parseOptionalToken(AsmToken::Hash);
3582
3583 if (getTok().isNot(AsmToken::Integer))
3584 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3585
3586 int64_t ShiftAmount = getTok().getIntVal();
3587
3588 if (ShiftAmount < 0)
3589 return Error(getLoc(), "positive shift amount required");
3590 Lex(); // Eat the number
3591
3592 // Just in case the optional lsl #0 is used for immediates other than zero.
3593 if (ShiftAmount == 0 && Imm != nullptr) {
3594 Operands.push_back(
3595 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3596 return ParseStatus::Success;
3597 }
3598
3599 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3600 getLoc(), getContext()));
3601 return ParseStatus::Success;
3602}
3603
3604/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3605/// suggestion to help common typos.
3607AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3608 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3609 .Case("eq", AArch64CC::EQ)
3610 .Case("ne", AArch64CC::NE)
3611 .Case("cs", AArch64CC::HS)
3612 .Case("hs", AArch64CC::HS)
3613 .Case("cc", AArch64CC::LO)
3614 .Case("lo", AArch64CC::LO)
3615 .Case("mi", AArch64CC::MI)
3616 .Case("pl", AArch64CC::PL)
3617 .Case("vs", AArch64CC::VS)
3618 .Case("vc", AArch64CC::VC)
3619 .Case("hi", AArch64CC::HI)
3620 .Case("ls", AArch64CC::LS)
3621 .Case("ge", AArch64CC::GE)
3622 .Case("lt", AArch64CC::LT)
3623 .Case("gt", AArch64CC::GT)
3624 .Case("le", AArch64CC::LE)
3625 .Case("al", AArch64CC::AL)
3626 .Case("nv", AArch64CC::NV)
3627 // SVE condition code aliases:
3628 .Case("none", AArch64CC::EQ)
3629 .Case("any", AArch64CC::NE)
3630 .Case("nlast", AArch64CC::HS)
3631 .Case("last", AArch64CC::LO)
3632 .Case("first", AArch64CC::MI)
3633 .Case("nfrst", AArch64CC::PL)
3634 .Case("pmore", AArch64CC::HI)
3635 .Case("plast", AArch64CC::LS)
3636 .Case("tcont", AArch64CC::GE)
3637 .Case("tstop", AArch64CC::LT)
3638 .Default(AArch64CC::Invalid);
3639
3640 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3641 Suggestion = "nfrst";
3642
3643 return CC;
3644}
3645
3646/// parseCondCode - Parse a Condition Code operand.
3647bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3648 bool invertCondCode) {
3649 SMLoc S = getLoc();
3650 const AsmToken &Tok = getTok();
3651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3652
3653 StringRef Cond = Tok.getString();
3654 std::string Suggestion;
3655 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3656 if (CC == AArch64CC::Invalid) {
3657 std::string Msg = "invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg += ", did you mean " + Suggestion + "?";
3660 return TokError(Msg);
3661 }
3662 Lex(); // Eat identifier token.
3663
3664 if (invertCondCode) {
3665 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3666 return TokError("condition codes AL and NV are invalid for this instruction");
3668 }
3669
3670 Operands.push_back(
3671 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3672 return false;
3673}
3674
3675ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3677 SMLoc S = getLoc();
3678
3679 if (Tok.isNot(AsmToken::Identifier))
3680 return TokError("invalid operand for instruction");
3681
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3684 if (!SVCR)
3685 return ParseStatus::NoMatch;
3686 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3688
3689 Operands.push_back(
3690 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3691 Lex(); // Eat identifier token.
3692 return ParseStatus::Success;
3693}
3694
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3697 SMLoc S = getLoc();
3698
3699 StringRef Name = Tok.getString();
3700
3701 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3702 Lex(); // eat "za[.(b|h|s|d)]"
3703 unsigned ElementWidth = 0;
3704 auto DotPosition = Name.find('.');
3705 if (DotPosition != StringRef::npos) {
3706 const auto &KindRes =
3707 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3708 if (!KindRes)
3709 return TokError(
3710 "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3712 }
3713 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3714 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3715 getContext()));
3716 if (getLexer().is(AsmToken::LBrac)) {
3717 // There's no comma after matrix operand, so we can parse the next operand
3718 // immediately.
3719 if (parseOperand(Operands, false, false))
3720 return ParseStatus::NoMatch;
3721 }
3722 return ParseStatus::Success;
3723 }
3724
3725 // Try to parse matrix register.
3726 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3727 if (!Reg)
3728 return ParseStatus::NoMatch;
3729
3730 size_t DotPosition = Name.find('.');
3731 assert(DotPosition != StringRef::npos && "Unexpected register");
3732
3733 StringRef Head = Name.take_front(DotPosition);
3734 StringRef Tail = Name.drop_front(DotPosition);
3735 StringRef RowOrColumn = Head.take_back();
3736
3737 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3738 .Case("h", MatrixKind::Row)
3739 .Case("v", MatrixKind::Col)
3740 .Default(MatrixKind::Tile);
3741
3742 // Next up, parsing the suffix
3743 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3744 if (!KindRes)
3745 return TokError(
3746 "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3748
3749 Lex();
3750
3751 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3752 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3753
3754 if (getLexer().is(AsmToken::LBrac)) {
3755 // There's no comma after matrix operand, so we can parse the next operand
3756 // immediately.
3757 if (parseOperand(Operands, false, false))
3758 return ParseStatus::NoMatch;
3759 }
3760 return ParseStatus::Success;
3761}
3762
3763/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3764/// them if present.
3765ParseStatus
3766AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3768 std::string LowerID = Tok.getString().lower();
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3771 .Case("lsl", AArch64_AM::LSL)
3772 .Case("lsr", AArch64_AM::LSR)
3773 .Case("asr", AArch64_AM::ASR)
3774 .Case("ror", AArch64_AM::ROR)
3775 .Case("msl", AArch64_AM::MSL)
3776 .Case("uxtb", AArch64_AM::UXTB)
3777 .Case("uxth", AArch64_AM::UXTH)
3778 .Case("uxtw", AArch64_AM::UXTW)
3779 .Case("uxtx", AArch64_AM::UXTX)
3780 .Case("sxtb", AArch64_AM::SXTB)
3781 .Case("sxth", AArch64_AM::SXTH)
3782 .Case("sxtw", AArch64_AM::SXTW)
3783 .Case("sxtx", AArch64_AM::SXTX)
3785
3787 return ParseStatus::NoMatch;
3788
3789 SMLoc S = Tok.getLoc();
3790 Lex();
3791
3792 bool Hash = parseOptionalToken(AsmToken::Hash);
3793
3794 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3795 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3796 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3797 ShOp == AArch64_AM::MSL) {
3798 // We expect a number here.
3799 return TokError("expected #imm after shift specifier");
3800 }
3801
3802 // "extend" type operations don't need an immediate, #0 is implicit.
3803 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3804 Operands.push_back(
3805 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3806 return ParseStatus::Success;
3807 }
3808
3809 // Make sure we do actually have a number, identifier or a parenthesized
3810 // expression.
3811 SMLoc E = getLoc();
3812 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3813 !getTok().is(AsmToken::Identifier))
3814 return Error(E, "expected integer shift amount");
3815
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(ImmVal))
3818 return ParseStatus::Failure;
3819
3820 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3821 if (!MCE)
3822 return Error(E, "expected constant '#imm' after shift specifier");
3823
3824 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3825 Operands.push_back(AArch64Operand::CreateShiftExtend(
3826 ShOp, MCE->getValue(), true, S, E, getContext()));
3827 return ParseStatus::Success;
3828}
3829
3830static const struct Extension {
3831 const char *Name;
3833} ExtensionMap[] = {
3834 {"crc", {AArch64::FeatureCRC}},
3835 {"sm4", {AArch64::FeatureSM4}},
3836 {"sha3", {AArch64::FeatureSHA3}},
3837 {"sha2", {AArch64::FeatureSHA2}},
3838 {"aes", {AArch64::FeatureAES}},
3839 {"crypto", {AArch64::FeatureCrypto}},
3840 {"fp", {AArch64::FeatureFPARMv8}},
3841 {"simd", {AArch64::FeatureNEON}},
3842 {"ras", {AArch64::FeatureRAS}},
3843 {"rasv2", {AArch64::FeatureRASv2}},
3844 {"lse", {AArch64::FeatureLSE}},
3845 {"predres", {AArch64::FeaturePredRes}},
3846 {"predres2", {AArch64::FeatureSPECRES2}},
3847 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3848 {"mte", {AArch64::FeatureMTE}},
3849 {"memtag", {AArch64::FeatureMTE}},
3850 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3851 {"pan", {AArch64::FeaturePAN}},
3852 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3853 {"ccpp", {AArch64::FeatureCCPP}},
3854 {"rcpc", {AArch64::FeatureRCPC}},
3855 {"rng", {AArch64::FeatureRandGen}},
3856 {"sve", {AArch64::FeatureSVE}},
3857 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3858 {"sve2", {AArch64::FeatureSVE2}},
3859 {"sve-aes", {AArch64::FeatureSVEAES}},
3860 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {"sve-sm4", {AArch64::FeatureSVESM4}},
3862 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3864 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3866 {"sve2-bitperm",
3867 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {"sve2p1", {AArch64::FeatureSVE2p1}},
3870 {"ls64", {AArch64::FeatureLS64}},
3871 {"xs", {AArch64::FeatureXS}},
3872 {"pauth", {AArch64::FeaturePAuth}},
3873 {"flagm", {AArch64::FeatureFlagM}},
3874 {"rme", {AArch64::FeatureRME}},
3875 {"sme", {AArch64::FeatureSME}},
3876 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3877 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3878 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3879 {"sme2", {AArch64::FeatureSME2}},
3880 {"sme2p1", {AArch64::FeatureSME2p1}},
3881 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3882 {"hbc", {AArch64::FeatureHBC}},
3883 {"mops", {AArch64::FeatureMOPS}},
3884 {"mec", {AArch64::FeatureMEC}},
3885 {"the", {AArch64::FeatureTHE}},
3886 {"d128", {AArch64::FeatureD128}},
3887 {"lse128", {AArch64::FeatureLSE128}},
3888 {"ite", {AArch64::FeatureITE}},
3889 {"cssc", {AArch64::FeatureCSSC}},
3890 {"rcpc3", {AArch64::FeatureRCPC3}},
3891 {"gcs", {AArch64::FeatureGCS}},
3892 {"bf16", {AArch64::FeatureBF16}},
3893 {"compnum", {AArch64::FeatureComplxNum}},
3894 {"dotprod", {AArch64::FeatureDotProd}},
3895 {"f32mm", {AArch64::FeatureMatMulFP32}},
3896 {"f64mm", {AArch64::FeatureMatMulFP64}},
3897 {"fp16", {AArch64::FeatureFullFP16}},
3898 {"fp16fml", {AArch64::FeatureFP16FML}},
3899 {"i8mm", {AArch64::FeatureMatMulInt8}},
3900 {"lor", {AArch64::FeatureLOR}},
3901 {"profile", {AArch64::FeatureSPE}},
3902 // "rdma" is the name documented by binutils for the feature, but
3903 // binutils also accepts incomplete prefixes of features, so "rdm"
3904 // works too. Support both spellings here.
3905 {"rdm", {AArch64::FeatureRDM}},
3906 {"rdma", {AArch64::FeatureRDM}},
3907 {"sb", {AArch64::FeatureSB}},
3908 {"ssbs", {AArch64::FeatureSSBS}},
3909 {"fp8", {AArch64::FeatureFP8}},
3910 {"faminmax", {AArch64::FeatureFAMINMAX}},
3911 {"fp8fma", {AArch64::FeatureFP8FMA}},
3912 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3913 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3914 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3915 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3916 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3917 {"lut", {AArch64::FeatureLUT}},
3918 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3919 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3920 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3921 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3922 {"cpa", {AArch64::FeatureCPA}},
3923 {"tlbiw", {AArch64::FeatureTLBIW}},
3924 {"pops", {AArch64::FeaturePoPS}},
3925 {"cmpbr", {AArch64::FeatureCMPBR}},
3926 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3927 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3928 {"fprcvt", {AArch64::FeatureFPRCVT}},
3929 {"lsfe", {AArch64::FeatureLSFE}},
3930 {"sme2p2", {AArch64::FeatureSME2p2}},
3931 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3932 {"sve2p2", {AArch64::FeatureSVE2p2}},
3933 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3934 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3935 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3936 {"lsui", {AArch64::FeatureLSUI}},
3937 {"occmo", {AArch64::FeatureOCCMO}},
3938 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {"lscp", {AArch64::FeatureLSCP}},
3942 {"tlbid", {AArch64::FeatureTLBID}},
3943 {"mpamv2", {AArch64::FeatureMPAMv2}},
3944 {"mtetc", {AArch64::FeatureMTETC}},
3945 {"gcie", {AArch64::FeatureGCIE}},
3946 {"sme2p3", {AArch64::FeatureSME2p3}},
3947 {"sve2p3", {AArch64::FeatureSVE2p3}},
3948 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3949 {"f16mm", {AArch64::FeatureF16MM}},
3950 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3951 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3952 {"mops-go", {AArch64::FeatureMOPS_GO}},
3953 {"poe2", {AArch64::FeatureS1POE2}},
3954 {"tev", {AArch64::FeatureTEV}},
3955 {"btie", {AArch64::FeatureBTIE}},
3956 {"dit", {AArch64::FeatureDIT}},
3957 {"brbe", {AArch64::FeatureBRBE}},
3958 {"bti", {AArch64::FeatureBranchTargetId}},
3959 {"fcma", {AArch64::FeatureComplxNum}},
3960 {"jscvt", {AArch64::FeatureJS}},
3961 {"pauth-lr", {AArch64::FeaturePAuthLR}},
3962 {"ssve-fexpa", {AArch64::FeatureSSVE_FEXPA}},
3963 {"wfxt", {AArch64::FeatureWFxT}},
3965
3966static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3967 if (FBS[AArch64::HasV8_0aOps])
3968 Str += "ARMv8a";
3969 if (FBS[AArch64::HasV8_1aOps])
3970 Str += "ARMv8.1a";
3971 else if (FBS[AArch64::HasV8_2aOps])
3972 Str += "ARMv8.2a";
3973 else if (FBS[AArch64::HasV8_3aOps])
3974 Str += "ARMv8.3a";
3975 else if (FBS[AArch64::HasV8_4aOps])
3976 Str += "ARMv8.4a";
3977 else if (FBS[AArch64::HasV8_5aOps])
3978 Str += "ARMv8.5a";
3979 else if (FBS[AArch64::HasV8_6aOps])
3980 Str += "ARMv8.6a";
3981 else if (FBS[AArch64::HasV8_7aOps])
3982 Str += "ARMv8.7a";
3983 else if (FBS[AArch64::HasV8_8aOps])
3984 Str += "ARMv8.8a";
3985 else if (FBS[AArch64::HasV8_9aOps])
3986 Str += "ARMv8.9a";
3987 else if (FBS[AArch64::HasV9_0aOps])
3988 Str += "ARMv9-a";
3989 else if (FBS[AArch64::HasV9_1aOps])
3990 Str += "ARMv9.1a";
3991 else if (FBS[AArch64::HasV9_2aOps])
3992 Str += "ARMv9.2a";
3993 else if (FBS[AArch64::HasV9_3aOps])
3994 Str += "ARMv9.3a";
3995 else if (FBS[AArch64::HasV9_4aOps])
3996 Str += "ARMv9.4a";
3997 else if (FBS[AArch64::HasV9_5aOps])
3998 Str += "ARMv9.5a";
3999 else if (FBS[AArch64::HasV9_6aOps])
4000 Str += "ARMv9.6a";
4001 else if (FBS[AArch64::HasV9_7aOps])
4002 Str += "ARMv9.7a";
4003 else if (FBS[AArch64::HasV8_0rOps])
4004 Str += "ARMv8r";
4005 else {
4006 SmallVector<std::string, 2> ExtMatches;
4007 for (const auto& Ext : ExtensionMap) {
4008 // Use & in case multiple features are enabled
4009 if ((FBS & Ext.Features) != FeatureBitset())
4010 ExtMatches.push_back(Ext.Name);
4011 }
4012 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
4013 }
4014}
4015
4016void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4017 SMLoc S) {
4018 const uint16_t Op2 = Encoding & 7;
4019 const uint16_t Cm = (Encoding & 0x78) >> 3;
4020 const uint16_t Cn = (Encoding & 0x780) >> 7;
4021 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4022
4023 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
4024
4025 Operands.push_back(
4026 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4027 Operands.push_back(
4028 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
4029 Operands.push_back(
4030 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
4031 Expr = MCConstantExpr::create(Op2, getContext());
4032 Operands.push_back(
4033 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4034}
4035
4036/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4037/// are simple aliases for the SYS instruction. Parse them specially so that
4038/// we create a SYS MCInst.
4039bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4040 OperandVector &Operands) {
4041 if (Name.contains('.'))
4042 return TokError("invalid operand");
4043
4044 Mnemonic = Name;
4045 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
4046
4047 const AsmToken &Tok = getTok();
4048 StringRef Op = Tok.getString();
4049 SMLoc S = Tok.getLoc();
4050 bool ExpectRegister = true;
4051 bool OptionalRegister = false;
4052 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4053 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4054
4055 if (Mnemonic == "ic") {
4056 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
4057 if (!IC)
4058 return TokError("invalid operand for IC instruction");
4059 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
4060 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4062 return TokError(Str);
4063 }
4064 ExpectRegister = IC->NeedsReg;
4065 createSysAlias(IC->Encoding, Operands, S);
4066 } else if (Mnemonic == "dc") {
4067 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4068 if (!DC)
4069 return TokError("invalid operand for DC instruction");
4070 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4071 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4073 return TokError(Str);
4074 }
4075 createSysAlias(DC->Encoding, Operands, S);
4076 } else if (Mnemonic == "at") {
4077 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4078 if (!AT)
4079 return TokError("invalid operand for AT instruction");
4080 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4081 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4083 return TokError(Str);
4084 }
4085 createSysAlias(AT->Encoding, Operands, S);
4086 } else if (Mnemonic == "tlbi") {
4087 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4088 if (!TLBI)
4089 return TokError("invalid operand for TLBI instruction");
4090 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4091 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4093 return TokError(Str);
4094 }
4095 ExpectRegister = TLBI->NeedsReg;
4096 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4097 if (hasAll || hasTLBID) {
4098 OptionalRegister = TLBI->OptionalReg;
4099 }
4100 createSysAlias(TLBI->Encoding, Operands, S);
4101 } else if (Mnemonic == "mlbi") {
4102 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Op);
4103 if (!MLBI)
4104 return TokError("invalid operand for MLBI instruction");
4105 else if (!MLBI->haveFeatures(getSTI().getFeatureBits())) {
4106 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4108 return TokError(Str);
4109 }
4110 ExpectRegister = MLBI->NeedsReg;
4111 createSysAlias(MLBI->Encoding, Operands, S);
4112 } else if (Mnemonic == "gic") {
4113 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4114 if (!GIC)
4115 return TokError("invalid operand for GIC instruction");
4116 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4117 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4119 return TokError(Str);
4120 }
4121 ExpectRegister = GIC->NeedsReg;
4122 createSysAlias(GIC->Encoding, Operands, S);
4123 } else if (Mnemonic == "gsb") {
4124 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4125 if (!GSB)
4126 return TokError("invalid operand for GSB instruction");
4127 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4128 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4130 return TokError(Str);
4131 }
4132 ExpectRegister = false;
4133 createSysAlias(GSB->Encoding, Operands, S);
4134 } else if (Mnemonic == "plbi") {
4135 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Op);
4136 if (!PLBI)
4137 return TokError("invalid operand for PLBI instruction");
4138 else if (!PLBI->haveFeatures(getSTI().getFeatureBits())) {
4139 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4141 return TokError(Str);
4142 }
4143 ExpectRegister = PLBI->NeedsReg;
4144 if (hasAll || hasTLBID) {
4145 OptionalRegister = PLBI->OptionalReg;
4146 }
4147 createSysAlias(PLBI->Encoding, Operands, S);
4148 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4149 Mnemonic == "cosp") {
4150
4151 if (Op.lower() != "rctx")
4152 return TokError("invalid operand for prediction restriction instruction");
4153
4154 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4155 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4156
4157 if (Mnemonic == "cosp" && !hasSpecres2)
4158 return TokError("COSP requires: predres2");
4159 if (!hasPredres)
4160 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4161
4162 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4163 : Mnemonic == "dvp" ? 0b101
4164 : Mnemonic == "cosp" ? 0b110
4165 : Mnemonic == "cpp" ? 0b111
4166 : 0;
4167 assert(PRCTX_Op2 &&
4168 "Invalid mnemonic for prediction restriction instruction");
4169 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4170 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4171
4172 createSysAlias(Encoding, Operands, S);
4173 }
4174
4175 Lex(); // Eat operand.
4176
4177 bool HasRegister = false;
4178
4179 // Check for the optional register operand.
4180 if (parseOptionalToken(AsmToken::Comma)) {
4181 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4182 return TokError("expected register operand");
4183 HasRegister = true;
4184 }
4185
4186 if (!OptionalRegister) {
4187 if (ExpectRegister && !HasRegister)
4188 return TokError("specified " + Mnemonic + " op requires a register");
4189 else if (!ExpectRegister && HasRegister)
4190 return TokError("specified " + Mnemonic + " op does not use a register");
4191 }
4192
4193 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4194 return true;
4195
4196 return false;
4197}
4198
4199/// parseSyslAlias - The GICR instructions are simple aliases for
4200/// the SYSL instruction. Parse them specially so that we create a
4201/// SYS MCInst.
4202bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4203 OperandVector &Operands) {
4204
4205 Mnemonic = Name;
4206 Operands.push_back(
4207 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4208
4209 // Now expect two operands (identifier + register)
4210 SMLoc startLoc = getLoc();
4211 const AsmToken &regTok = getTok();
4212 StringRef reg = regTok.getString();
4213 MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4214 if (!Reg)
4215 return TokError("expected register operand");
4216
4217 Operands.push_back(AArch64Operand::CreateReg(
4218 Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4219
4220 Lex(); // Eat token
4221 if (parseToken(AsmToken::Comma))
4222 return true;
4223
4224 // Check for identifier
4225 const AsmToken &operandTok = getTok();
4226 StringRef Op = operandTok.getString();
4227 SMLoc S2 = operandTok.getLoc();
4228 Lex(); // Eat token
4229
4230 if (Mnemonic == "gicr") {
4231 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4232 if (!GICR)
4233 return Error(S2, "invalid operand for GICR instruction");
4234 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4235 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4237 return Error(S2, Str);
4238 }
4239 createSysAlias(GICR->Encoding, Operands, S2);
4240 }
4241
4242 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4243 return true;
4244
4245 return false;
4246}
4247
4248/// parseSyspAlias - The TLBIP instructions are simple aliases for
4249/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4250bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4251 OperandVector &Operands) {
4252 if (Name.contains('.'))
4253 return TokError("invalid operand");
4254
4255 Mnemonic = Name;
4256 Operands.push_back(
4257 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4258
4259 const AsmToken &Tok = getTok();
4260 StringRef Op = Tok.getString();
4261 SMLoc S = Tok.getLoc();
4262
4263 if (Mnemonic == "tlbip") {
4264 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Op);
4265 if (!TLBIP)
4266 return TokError("invalid operand for TLBIP instruction");
4267 if (!getSTI().hasFeature(AArch64::FeatureD128) &&
4268 !getSTI().hasFeature(AArch64::FeatureAll))
4269 return TokError("instruction requires: d128");
4270 if (!TLBIP->haveFeatures(getSTI().getFeatureBits())) {
4271 std::string Str("instruction requires: ");
4273 return TokError(Str);
4274 }
4275 createSysAlias(TLBIP->Encoding, Operands, S);
4276 }
4277
4278 Lex(); // Eat operand.
4279
4280 if (parseComma())
4281 return true;
4282
4283 if (Tok.isNot(AsmToken::Identifier))
4284 return TokError("expected register identifier");
4285 auto Result = tryParseSyspXzrPair(Operands);
4286 if (Result.isNoMatch())
4287 Result = tryParseGPRSeqPair(Operands);
4288 if (!Result.isSuccess())
4289 return TokError("specified " + Mnemonic +
4290 " op requires a pair of registers");
4291
4292 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4293 return true;
4294
4295 return false;
4296}
4297
4298ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4299 MCAsmParser &Parser = getParser();
4300 const AsmToken &Tok = getTok();
4301
4302 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4303 return TokError("'csync' operand expected");
4304 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4305 // Immediate operand.
4306 const MCExpr *ImmVal;
4307 SMLoc ExprLoc = getLoc();
4308 AsmToken IntTok = Tok;
4309 if (getParser().parseExpression(ImmVal))
4310 return ParseStatus::Failure;
4311 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4312 if (!MCE)
4313 return Error(ExprLoc, "immediate value expected for barrier operand");
4314 int64_t Value = MCE->getValue();
4315 if (Mnemonic == "dsb" && Value > 15) {
4316 // This case is a no match here, but it might be matched by the nXS
4317 // variant. Deliberately not unlex the optional '#' as it is not necessary
4318 // to characterize an integer immediate.
4319 Parser.getLexer().UnLex(IntTok);
4320 return ParseStatus::NoMatch;
4321 }
4322 if (Value < 0 || Value > 15)
4323 return Error(ExprLoc, "barrier operand out of range");
4324 auto DB = AArch64DB::lookupDBByEncoding(Value);
4325 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4326 ExprLoc, getContext(),
4327 false /*hasnXSModifier*/));
4328 return ParseStatus::Success;
4329 }
4330
4331 if (Tok.isNot(AsmToken::Identifier))
4332 return TokError("invalid operand for instruction");
4333
4334 StringRef Operand = Tok.getString();
4335 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4336 auto DB = AArch64DB::lookupDBByName(Operand);
4337 // The only valid named option for ISB is 'sy'
4338 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4339 return TokError("'sy' or #imm operand expected");
4340 // The only valid named option for TSB is 'csync'
4341 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4342 return TokError("'csync' operand expected");
4343 if (!DB && !TSB) {
4344 if (Mnemonic == "dsb") {
4345 // This case is a no match here, but it might be matched by the nXS
4346 // variant.
4347 return ParseStatus::NoMatch;
4348 }
4349 return TokError("invalid barrier option name");
4350 }
4351
4352 Operands.push_back(AArch64Operand::CreateBarrier(
4353 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4354 getContext(), false /*hasnXSModifier*/));
4355 Lex(); // Consume the option
4356
4357 return ParseStatus::Success;
4358}
4359
4360ParseStatus
4361AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4362 const AsmToken &Tok = getTok();
4363
4364 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4365 if (Mnemonic != "dsb")
4366 return ParseStatus::Failure;
4367
4368 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4369 // Immediate operand.
4370 const MCExpr *ImmVal;
4371 SMLoc ExprLoc = getLoc();
4372 if (getParser().parseExpression(ImmVal))
4373 return ParseStatus::Failure;
4374 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4375 if (!MCE)
4376 return Error(ExprLoc, "immediate value expected for barrier operand");
4377 int64_t Value = MCE->getValue();
4378 // v8.7-A DSB in the nXS variant accepts only the following immediate
4379 // values: 16, 20, 24, 28.
4380 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4381 return Error(ExprLoc, "barrier operand out of range");
4382 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4383 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4384 ExprLoc, getContext(),
4385 true /*hasnXSModifier*/));
4386 return ParseStatus::Success;
4387 }
4388
4389 if (Tok.isNot(AsmToken::Identifier))
4390 return TokError("invalid operand for instruction");
4391
4392 StringRef Operand = Tok.getString();
4393 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4394
4395 if (!DB)
4396 return TokError("invalid barrier option name");
4397
4398 Operands.push_back(
4399 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4400 getContext(), true /*hasnXSModifier*/));
4401 Lex(); // Consume the option
4402
4403 return ParseStatus::Success;
4404}
4405
4406ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4407 const AsmToken &Tok = getTok();
4408
4409 if (Tok.isNot(AsmToken::Identifier))
4410 return ParseStatus::NoMatch;
4411
4412 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4413 return ParseStatus::NoMatch;
4414
4415 int MRSReg, MSRReg;
4416 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4417 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4418 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4419 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4420 } else
4421 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4422
4423 unsigned PStateImm = -1;
4424 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4425 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4426 PStateImm = PState15->Encoding;
4427 if (!PState15) {
4428 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4429 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4430 PStateImm = PState1->Encoding;
4431 }
4432
4433 Operands.push_back(
4434 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4435 PStateImm, getContext()));
4436 Lex(); // Eat identifier
4437
4438 return ParseStatus::Success;
4439}
4440
4441ParseStatus
4442AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4443 SMLoc S = getLoc();
4444 const AsmToken &Tok = getTok();
4445 if (Tok.isNot(AsmToken::Identifier))
4446 return TokError("invalid operand for instruction");
4447
4449 if (!PH)
4450 return TokError("invalid operand for instruction");
4451
4452 Operands.push_back(AArch64Operand::CreatePHintInst(
4453 PH->Encoding, Tok.getString(), S, getContext()));
4454 Lex(); // Eat identifier token.
4455 return ParseStatus::Success;
4456}
4457
4458/// tryParseNeonVectorRegister - Parse a vector register operand.
4459bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4460 if (getTok().isNot(AsmToken::Identifier))
4461 return true;
4462
4463 SMLoc S = getLoc();
4464 // Check for a vector register specifier first.
4465 StringRef Kind;
4466 MCRegister Reg;
4467 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4468 if (!Res.isSuccess())
4469 return true;
4470
4471 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4472 if (!KindRes)
4473 return true;
4474
4475 unsigned ElementWidth = KindRes->second;
4476 Operands.push_back(
4477 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4478 S, getLoc(), getContext()));
4479
4480 // If there was an explicit qualifier, that goes on as a literal text
4481 // operand.
4482 if (!Kind.empty())
4483 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4484
4485 return tryParseVectorIndex(Operands).isFailure();
4486}
4487
4488ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4489 SMLoc SIdx = getLoc();
4490 if (parseOptionalToken(AsmToken::LBrac)) {
4491 const MCExpr *ImmVal;
4492 if (getParser().parseExpression(ImmVal))
4493 return ParseStatus::NoMatch;
4494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4495 if (!MCE)
4496 return TokError("immediate value expected for vector index");
4497
4498 SMLoc E = getLoc();
4499
4500 if (parseToken(AsmToken::RBrac, "']' expected"))
4501 return ParseStatus::Failure;
4502
4503 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4504 E, getContext()));
4505 return ParseStatus::Success;
4506 }
4507
4508 return ParseStatus::NoMatch;
4509}
4510
4511// tryParseVectorRegister - Try to parse a vector register name with
4512// optional kind specifier. If it is a register specifier, eat the token
4513// and return it.
4514ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4515 StringRef &Kind,
4516 RegKind MatchKind) {
4517 const AsmToken &Tok = getTok();
4518
4519 if (Tok.isNot(AsmToken::Identifier))
4520 return ParseStatus::NoMatch;
4521
4522 StringRef Name = Tok.getString();
4523 // If there is a kind specifier, it's separated from the register name by
4524 // a '.'.
4525 size_t Start = 0, Next = Name.find('.');
4526 StringRef Head = Name.slice(Start, Next);
4527 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4528
4529 if (RegNum) {
4530 if (Next != StringRef::npos) {
4531 Kind = Name.substr(Next);
4532 if (!isValidVectorKind(Kind, MatchKind))
4533 return TokError("invalid vector kind qualifier");
4534 }
4535 Lex(); // Eat the register token.
4536
4537 Reg = RegNum;
4538 return ParseStatus::Success;
4539 }
4540
4541 return ParseStatus::NoMatch;
4542}
4543
4544ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4545 OperandVector &Operands) {
4546 ParseStatus Status =
4547 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4548 if (!Status.isSuccess())
4549 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4550 return Status;
4551}
4552
4553/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4554template <RegKind RK>
4555ParseStatus
4556AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4557 // Check for a SVE predicate register specifier first.
4558 const SMLoc S = getLoc();
4559 StringRef Kind;
4560 MCRegister RegNum;
4561 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4562 if (!Res.isSuccess())
4563 return Res;
4564
4565 const auto &KindRes = parseVectorKind(Kind, RK);
4566 if (!KindRes)
4567 return ParseStatus::NoMatch;
4568
4569 unsigned ElementWidth = KindRes->second;
4570 Operands.push_back(AArch64Operand::CreateVectorReg(
4571 RegNum, RK, ElementWidth, S,
4572 getLoc(), getContext()));
4573
4574 if (getLexer().is(AsmToken::LBrac)) {
4575 if (RK == RegKind::SVEPredicateAsCounter) {
4576 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4577 if (ResIndex.isSuccess())
4578 return ParseStatus::Success;
4579 } else {
4580 // Indexed predicate, there's no comma so try parse the next operand
4581 // immediately.
4582 if (parseOperand(Operands, false, false))
4583 return ParseStatus::NoMatch;
4584 }
4585 }
4586
4587 // Not all predicates are followed by a '/m' or '/z'.
4588 if (getTok().isNot(AsmToken::Slash))
4589 return ParseStatus::Success;
4590
4591 // But when they do they shouldn't have an element type suffix.
4592 if (!Kind.empty())
4593 return Error(S, "not expecting size suffix");
4594
4595 // Add a literal slash as operand
4596 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4597
4598 Lex(); // Eat the slash.
4599
4600 // Zeroing or merging?
4601 auto Pred = getTok().getString().lower();
4602 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4603 return Error(getLoc(), "expecting 'z' predication");
4604
4605 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4606 return Error(getLoc(), "expecting 'm' or 'z' predication");
4607
4608 // Add zero/merge token.
4609 const char *ZM = Pred == "z" ? "z" : "m";
4610 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4611
4612 Lex(); // Eat zero/merge token.
4613 return ParseStatus::Success;
4614}
4615
4616/// parseRegister - Parse a register operand.
4617bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4618 // Try for a Neon vector register.
4619 if (!tryParseNeonVectorRegister(Operands))
4620 return false;
4621
4622 if (tryParseZTOperand(Operands).isSuccess())
4623 return false;
4624
4625 // Otherwise try for a scalar register.
4626 if (tryParseGPROperand<false>(Operands).isSuccess())
4627 return false;
4628
4629 return true;
4630}
4631
4632bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4633 bool HasELFModifier = false;
4634 AArch64::Specifier RefKind;
4635 SMLoc Loc = getLexer().getLoc();
4636 if (parseOptionalToken(AsmToken::Colon)) {
4637 HasELFModifier = true;
4638
4639 if (getTok().isNot(AsmToken::Identifier))
4640 return TokError("expect relocation specifier in operand after ':'");
4641
4642 std::string LowerCase = getTok().getIdentifier().lower();
4643 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4644 .Case("lo12", AArch64::S_LO12)
4645 .Case("abs_g3", AArch64::S_ABS_G3)
4646 .Case("abs_g2", AArch64::S_ABS_G2)
4647 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4648 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4649 .Case("abs_g1", AArch64::S_ABS_G1)
4650 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4651 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4652 .Case("abs_g0", AArch64::S_ABS_G0)
4653 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4654 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4655 .Case("prel_g3", AArch64::S_PREL_G3)
4656 .Case("prel_g2", AArch64::S_PREL_G2)
4657 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4658 .Case("prel_g1", AArch64::S_PREL_G1)
4659 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4660 .Case("prel_g0", AArch64::S_PREL_G0)
4661 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4662 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4663 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4664 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4665 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4666 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4667 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4668 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4669 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4670 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4671 .Case("tprel_g2", AArch64::S_TPREL_G2)
4672 .Case("tprel_g1", AArch64::S_TPREL_G1)
4673 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4674 .Case("tprel_g0", AArch64::S_TPREL_G0)
4675 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4676 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4677 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4678 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4679 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4680 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4681 .Case("got", AArch64::S_GOT_PAGE)
4682 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4683 .Case("got_lo12", AArch64::S_GOT_LO12)
4684 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4685 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4686 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4687 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4688 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4689 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4690 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4691 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4692 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4693 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4694 .Default(AArch64::S_INVALID);
4695
4696 if (RefKind == AArch64::S_INVALID)
4697 return TokError("expect relocation specifier in operand after ':'");
4698
4699 Lex(); // Eat identifier
4700
4701 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4702 return true;
4703 }
4704
4705 if (getParser().parseExpression(ImmVal))
4706 return true;
4707
4708 if (HasELFModifier)
4709 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4710
4711 SMLoc EndLoc;
4712 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4713 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4714 return true;
4715 const MCExpr *Term;
4716 MCBinaryExpr::Opcode Opcode;
4717 if (parseOptionalToken(AsmToken::Plus))
4718 Opcode = MCBinaryExpr::Add;
4719 else if (parseOptionalToken(AsmToken::Minus))
4720 Opcode = MCBinaryExpr::Sub;
4721 else
4722 return false;
4723 if (getParser().parsePrimaryExpr(Term, EndLoc))
4724 return true;
4725 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4726 }
4727
4728 return false;
4729}
4730
4731ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4732 if (getTok().isNot(AsmToken::LCurly))
4733 return ParseStatus::NoMatch;
4734
4735 auto ParseMatrixTile = [this](unsigned &Reg,
4736 unsigned &ElementWidth) -> ParseStatus {
4737 StringRef Name = getTok().getString();
4738 size_t DotPosition = Name.find('.');
4739 if (DotPosition == StringRef::npos)
4740 return ParseStatus::NoMatch;
4741
4742 unsigned RegNum = matchMatrixTileListRegName(Name);
4743 if (!RegNum)
4744 return ParseStatus::NoMatch;
4745
4746 StringRef Tail = Name.drop_front(DotPosition);
4747 const std::optional<std::pair<int, int>> &KindRes =
4748 parseVectorKind(Tail, RegKind::Matrix);
4749 if (!KindRes)
4750 return TokError(
4751 "Expected the register to be followed by element width suffix");
4752 ElementWidth = KindRes->second;
4753 Reg = RegNum;
4754 Lex(); // Eat the register.
4755 return ParseStatus::Success;
4756 };
4757
4758 SMLoc S = getLoc();
4759 auto LCurly = getTok();
4760 Lex(); // Eat left bracket token.
4761
4762 // Empty matrix list
4763 if (parseOptionalToken(AsmToken::RCurly)) {
4764 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4765 /*RegMask=*/0, S, getLoc(), getContext()));
4766 return ParseStatus::Success;
4767 }
4768
4769 // Try parse {za} alias early
4770 if (getTok().getString().equals_insensitive("za")) {
4771 Lex(); // Eat 'za'
4772
4773 if (parseToken(AsmToken::RCurly, "'}' expected"))
4774 return ParseStatus::Failure;
4775
4776 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4777 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4778 return ParseStatus::Success;
4779 }
4780
4781 SMLoc TileLoc = getLoc();
4782
4783 unsigned FirstReg, ElementWidth;
4784 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4785 if (!ParseRes.isSuccess()) {
4786 getLexer().UnLex(LCurly);
4787 return ParseRes;
4788 }
4789
4790 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4791
4792 unsigned PrevReg = FirstReg;
4793
4794 SmallSet<unsigned, 8> DRegs;
4795 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4796
4797 SmallSet<unsigned, 8> SeenRegs;
4798 SeenRegs.insert(FirstReg);
4799
4800 while (parseOptionalToken(AsmToken::Comma)) {
4801 TileLoc = getLoc();
4802 unsigned Reg, NextElementWidth;
4803 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4804 if (!ParseRes.isSuccess())
4805 return ParseRes;
4806
4807 // Element size must match on all regs in the list.
4808 if (ElementWidth != NextElementWidth)
4809 return Error(TileLoc, "mismatched register size suffix");
4810
4811 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4812 Warning(TileLoc, "tile list not in ascending order");
4813
4814 if (SeenRegs.contains(Reg))
4815 Warning(TileLoc, "duplicate tile in list");
4816 else {
4817 SeenRegs.insert(Reg);
4818 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4819 }
4820
4821 PrevReg = Reg;
4822 }
4823
4824 if (parseToken(AsmToken::RCurly, "'}' expected"))
4825 return ParseStatus::Failure;
4826
4827 unsigned RegMask = 0;
4828 for (auto Reg : DRegs)
4829 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4830 RI->getEncodingValue(AArch64::ZAD0));
4831 Operands.push_back(
4832 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4833
4834 return ParseStatus::Success;
4835}
4836
4837template <RegKind VectorKind>
4838ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4839 bool ExpectMatch) {
4840 MCAsmParser &Parser = getParser();
4841 if (!getTok().is(AsmToken::LCurly))
4842 return ParseStatus::NoMatch;
4843
4844 // Wrapper around parse function
4845 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4846 bool NoMatchIsError) -> ParseStatus {
4847 auto RegTok = getTok();
4848 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4849 if (ParseRes.isSuccess()) {
4850 if (parseVectorKind(Kind, VectorKind))
4851 return ParseRes;
4852 llvm_unreachable("Expected a valid vector kind");
4853 }
4854
4855 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4856 RegTok.getString().equals_insensitive("zt0"))
4857 return ParseStatus::NoMatch;
4858
4859 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4860 (ParseRes.isNoMatch() && NoMatchIsError &&
4861 !RegTok.getString().starts_with_insensitive("za")))
4862 return Error(Loc, "vector register expected");
4863
4864 return ParseStatus::NoMatch;
4865 };
4866
4867 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4868 SMLoc S = getLoc();
4869 auto LCurly = getTok();
4870 Lex(); // Eat left bracket token.
4871
4872 StringRef Kind;
4873 MCRegister FirstReg;
4874 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4875
4876 // Put back the original left bracket if there was no match, so that
4877 // different types of list-operands can be matched (e.g. SVE, Neon).
4878 if (ParseRes.isNoMatch())
4879 Parser.getLexer().UnLex(LCurly);
4880
4881 if (!ParseRes.isSuccess())
4882 return ParseRes;
4883
4884 MCRegister PrevReg = FirstReg;
4885 unsigned Count = 1;
4886
4887 unsigned Stride = 1;
4888 if (parseOptionalToken(AsmToken::Minus)) {
4889 SMLoc Loc = getLoc();
4890 StringRef NextKind;
4891
4892 MCRegister Reg;
4893 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4894 if (!ParseRes.isSuccess())
4895 return ParseRes;
4896
4897 // Any Kind suffices must match on all regs in the list.
4898 if (Kind != NextKind)
4899 return Error(Loc, "mismatched register size suffix");
4900
4901 unsigned Space =
4902 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4903
4904 if (Space == 0 || Space > 3)
4905 return Error(Loc, "invalid number of vectors");
4906
4907 Count += Space;
4908 }
4909 else {
4910 bool HasCalculatedStride = false;
4911 while (parseOptionalToken(AsmToken::Comma)) {
4912 SMLoc Loc = getLoc();
4913 StringRef NextKind;
4914 MCRegister Reg;
4915 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4916 if (!ParseRes.isSuccess())
4917 return ParseRes;
4918
4919 // Any Kind suffices must match on all regs in the list.
4920 if (Kind != NextKind)
4921 return Error(Loc, "mismatched register size suffix");
4922
4923 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4924 unsigned PrevRegVal =
4925 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4926 if (!HasCalculatedStride) {
4927 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4928 : (NumRegs - (PrevRegVal - RegVal));
4929 HasCalculatedStride = true;
4930 }
4931
4932 // Register must be incremental (with a wraparound at last register).
4933 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4934 return Error(Loc, "registers must have the same sequential stride");
4935
4936 PrevReg = Reg;
4937 ++Count;
4938 }
4939 }
4940
4941 if (parseToken(AsmToken::RCurly, "'}' expected"))
4942 return ParseStatus::Failure;
4943
4944 if (Count > 4)
4945 return Error(S, "invalid number of vectors");
4946
4947 unsigned NumElements = 0;
4948 unsigned ElementWidth = 0;
4949 if (!Kind.empty()) {
4950 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4951 std::tie(NumElements, ElementWidth) = *VK;
4952 }
4953
4954 Operands.push_back(AArch64Operand::CreateVectorList(
4955 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4956 getLoc(), getContext()));
4957
4958 if (getTok().is(AsmToken::LBrac)) {
4959 ParseStatus Res = tryParseVectorIndex(Operands);
4960 if (Res.isFailure())
4961 return ParseStatus::Failure;
4962 return ParseStatus::Success;
4963 }
4964
4965 return ParseStatus::Success;
4966}
4967
4968/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4969bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4970 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4971 if (!ParseRes.isSuccess())
4972 return true;
4973
4974 return tryParseVectorIndex(Operands).isFailure();
4975}
4976
4977ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4978 SMLoc StartLoc = getLoc();
4979
4980 MCRegister RegNum;
4981 ParseStatus Res = tryParseScalarRegister(RegNum);
4982 if (!Res.isSuccess())
4983 return Res;
4984
4985 if (!parseOptionalToken(AsmToken::Comma)) {
4986 Operands.push_back(AArch64Operand::CreateReg(
4987 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4988 return ParseStatus::Success;
4989 }
4990
4991 parseOptionalToken(AsmToken::Hash);
4992
4993 if (getTok().isNot(AsmToken::Integer))
4994 return Error(getLoc(), "index must be absent or #0");
4995
4996 const MCExpr *ImmVal;
4997 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4998 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4999 return Error(getLoc(), "index must be absent or #0");
5000
5001 Operands.push_back(AArch64Operand::CreateReg(
5002 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
5003 return ParseStatus::Success;
5004}
5005
5006ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5007 SMLoc StartLoc = getLoc();
5008 const AsmToken &Tok = getTok();
5009 std::string Name = Tok.getString().lower();
5010
5011 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5012
5013 if (!Reg)
5014 return ParseStatus::NoMatch;
5015
5016 Operands.push_back(AArch64Operand::CreateReg(
5017 Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
5018 Lex(); // Eat register.
5019
5020 // Check if register is followed by an index
5021 if (parseOptionalToken(AsmToken::LBrac)) {
5022 Operands.push_back(
5023 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5024 const MCExpr *ImmVal;
5025 if (getParser().parseExpression(ImmVal))
5026 return ParseStatus::NoMatch;
5027 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5028 if (!MCE)
5029 return TokError("immediate value expected for vector index");
5030 Operands.push_back(AArch64Operand::CreateImm(
5031 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
5032 getLoc(), getContext()));
5033 if (parseOptionalToken(AsmToken::Comma))
5034 if (parseOptionalMulOperand(Operands))
5035 return ParseStatus::Failure;
5036 if (parseToken(AsmToken::RBrac, "']' expected"))
5037 return ParseStatus::Failure;
5038 Operands.push_back(
5039 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5040 }
5041 return ParseStatus::Success;
5042}
5043
5044template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5045ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5046 SMLoc StartLoc = getLoc();
5047
5048 MCRegister RegNum;
5049 ParseStatus Res = tryParseScalarRegister(RegNum);
5050 if (!Res.isSuccess())
5051 return Res;
5052
5053 // No shift/extend is the default.
5054 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
5055 Operands.push_back(AArch64Operand::CreateReg(
5056 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
5057 return ParseStatus::Success;
5058 }
5059
5060 // Eat the comma
5061 Lex();
5062
5063 // Match the shift
5065 Res = tryParseOptionalShiftExtend(ExtOpnd);
5066 if (!Res.isSuccess())
5067 return Res;
5068
5069 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5070 Operands.push_back(AArch64Operand::CreateReg(
5071 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5072 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5073 Ext->hasShiftExtendAmount()));
5074
5075 return ParseStatus::Success;
5076}
5077
5078bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5079 MCAsmParser &Parser = getParser();
5080
5081 // Some SVE instructions have a decoration after the immediate, i.e.
5082 // "mul vl". We parse them here and add tokens, which must be present in the
5083 // asm string in the tablegen instruction.
5084 bool NextIsVL =
5085 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5086 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5087 if (!getTok().getString().equals_insensitive("mul") ||
5088 !(NextIsVL || NextIsHash))
5089 return true;
5090
5091 Operands.push_back(
5092 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5093 Lex(); // Eat the "mul"
5094
5095 if (NextIsVL) {
5096 Operands.push_back(
5097 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5098 Lex(); // Eat the "vl"
5099 return false;
5100 }
5101
5102 if (NextIsHash) {
5103 Lex(); // Eat the #
5104 SMLoc S = getLoc();
5105
5106 // Parse immediate operand.
5107 const MCExpr *ImmVal;
5108 if (!Parser.parseExpression(ImmVal))
5109 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5110 Operands.push_back(AArch64Operand::CreateImm(
5111 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5112 getContext()));
5113 return false;
5114 }
5115 }
5116
5117 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5118}
5119
5120bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5121 StringRef &VecGroup) {
5122 MCAsmParser &Parser = getParser();
5123 auto Tok = Parser.getTok();
5124 if (Tok.isNot(AsmToken::Identifier))
5125 return true;
5126
5127 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5128 .Case("vgx2", "vgx2")
5129 .Case("vgx4", "vgx4")
5130 .Default("");
5131
5132 if (VG.empty())
5133 return true;
5134
5135 VecGroup = VG;
5136 Parser.Lex(); // Eat vgx[2|4]
5137 return false;
5138}
5139
5140bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5141 auto Tok = getTok();
5142 if (Tok.isNot(AsmToken::Identifier))
5143 return true;
5144
5145 auto Keyword = Tok.getString();
5146 Keyword = StringSwitch<StringRef>(Keyword.lower())
5147 .Case("sm", "sm")
5148 .Case("za", "za")
5149 .Default(Keyword);
5150 Operands.push_back(
5151 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5152
5153 Lex();
5154 return false;
5155}
5156
5157/// parseOperand - Parse a arm instruction operand. For now this parses the
5158/// operand regardless of the mnemonic.
5159bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5160 bool invertCondCode) {
5161 MCAsmParser &Parser = getParser();
5162
5163 ParseStatus ResTy =
5164 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5165
5166 // Check if the current operand has a custom associated parser, if so, try to
5167 // custom parse the operand, or fallback to the general approach.
5168 if (ResTy.isSuccess())
5169 return false;
5170 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5171 // there was a match, but an error occurred, in which case, just return that
5172 // the operand parsing failed.
5173 if (ResTy.isFailure())
5174 return true;
5175
5176 // Nothing custom, so do general case parsing.
5177 SMLoc S, E;
5178 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5179 if (parseOptionalToken(AsmToken::Comma)) {
5180 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5181 if (!Res.isNoMatch())
5182 return Res.isFailure();
5183 getLexer().UnLex(SavedTok);
5184 }
5185 return false;
5186 };
5187 switch (getLexer().getKind()) {
5188 default: {
5189 SMLoc S = getLoc();
5190 const MCExpr *Expr;
5191 if (parseSymbolicImmVal(Expr))
5192 return Error(S, "invalid operand");
5193
5194 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5195 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5196 return parseOptionalShiftExtend(getTok());
5197 }
5198 case AsmToken::LBrac: {
5199 Operands.push_back(
5200 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5201 Lex(); // Eat '['
5202
5203 // There's no comma after a '[', so we can parse the next operand
5204 // immediately.
5205 return parseOperand(Operands, false, false);
5206 }
5207 case AsmToken::LCurly: {
5208 if (!parseNeonVectorList(Operands))
5209 return false;
5210
5211 Operands.push_back(
5212 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5213 Lex(); // Eat '{'
5214
5215 // There's no comma after a '{', so we can parse the next operand
5216 // immediately.
5217 return parseOperand(Operands, false, false);
5218 }
5219 case AsmToken::Identifier: {
5220 // See if this is a "VG" decoration used by SME instructions.
5221 StringRef VecGroup;
5222 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5223 Operands.push_back(
5224 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5225 return false;
5226 }
5227 // If we're expecting a Condition Code operand, then just parse that.
5228 if (isCondCode)
5229 return parseCondCode(Operands, invertCondCode);
5230
5231 // If it's a register name, parse it.
5232 if (!parseRegister(Operands)) {
5233 // Parse an optional shift/extend modifier.
5234 AsmToken SavedTok = getTok();
5235 if (parseOptionalToken(AsmToken::Comma)) {
5236 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5237 // such cases and don't report an error when <label> happens to match a
5238 // shift/extend modifier.
5239 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5240 /*ParseForAllFeatures=*/true);
5241 if (!Res.isNoMatch())
5242 return Res.isFailure();
5243 Res = tryParseOptionalShiftExtend(Operands);
5244 if (!Res.isNoMatch())
5245 return Res.isFailure();
5246 getLexer().UnLex(SavedTok);
5247 }
5248 return false;
5249 }
5250
5251 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5252 // by SVE instructions.
5253 if (!parseOptionalMulOperand(Operands))
5254 return false;
5255
5256 // If this is a two-word mnemonic, parse its special keyword
5257 // operand as an identifier.
5258 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5259 Mnemonic == "gcsb")
5260 return parseKeywordOperand(Operands);
5261
5262 // This was not a register so parse other operands that start with an
5263 // identifier (like labels) as expressions and create them as immediates.
5264 const MCExpr *IdVal, *Term;
5265 S = getLoc();
5266 if (getParser().parseExpression(IdVal))
5267 return true;
5268 if (getParser().parseAtSpecifier(IdVal, E))
5269 return true;
5270 std::optional<MCBinaryExpr::Opcode> Opcode;
5271 if (parseOptionalToken(AsmToken::Plus))
5272 Opcode = MCBinaryExpr::Add;
5273 else if (parseOptionalToken(AsmToken::Minus))
5274 Opcode = MCBinaryExpr::Sub;
5275 if (Opcode) {
5276 if (getParser().parsePrimaryExpr(Term, E))
5277 return true;
5278 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5279 }
5280 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5281
5282 // Parse an optional shift/extend modifier.
5283 return parseOptionalShiftExtend(getTok());
5284 }
5285 case AsmToken::Integer:
5286 case AsmToken::Real:
5287 case AsmToken::Hash: {
5288 // #42 -> immediate.
5289 S = getLoc();
5290
5291 parseOptionalToken(AsmToken::Hash);
5292
5293 // Parse a negative sign
5294 bool isNegative = false;
5295 if (getTok().is(AsmToken::Minus)) {
5296 isNegative = true;
5297 // We need to consume this token only when we have a Real, otherwise
5298 // we let parseSymbolicImmVal take care of it
5299 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5300 Lex();
5301 }
5302
5303 // The only Real that should come through here is a literal #0.0 for
5304 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5305 // so convert the value.
5306 const AsmToken &Tok = getTok();
5307 if (Tok.is(AsmToken::Real)) {
5308 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5309 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5310 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5311 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5312 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5313 return TokError("unexpected floating point literal");
5314 else if (IntVal != 0 || isNegative)
5315 return TokError("expected floating-point constant #0.0");
5316 Lex(); // Eat the token.
5317
5318 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5319 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5320 return false;
5321 }
5322
5323 const MCExpr *ImmVal;
5324 if (parseSymbolicImmVal(ImmVal))
5325 return true;
5326
5327 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5328 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5329
5330 // Parse an optional shift/extend modifier.
5331 return parseOptionalShiftExtend(Tok);
5332 }
5333 case AsmToken::Equal: {
5334 SMLoc Loc = getLoc();
5335 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5336 return TokError("unexpected token in operand");
5337 Lex(); // Eat '='
5338 const MCExpr *SubExprVal;
5339 if (getParser().parseExpression(SubExprVal))
5340 return true;
5341
5342 if (Operands.size() < 2 ||
5343 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5344 return Error(Loc, "Only valid when first operand is register");
5345
5346 bool IsXReg =
5347 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5348 Operands[1]->getReg());
5349
5350 MCContext& Ctx = getContext();
5351 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5352 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5353 if (isa<MCConstantExpr>(SubExprVal)) {
5354 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5355 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5356 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5357 ShiftAmt += 16;
5358 Imm >>= 16;
5359 }
5360 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5361 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5362 Operands.push_back(AArch64Operand::CreateImm(
5363 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5364 if (ShiftAmt)
5365 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5366 ShiftAmt, true, S, E, Ctx));
5367 return false;
5368 }
5369 APInt Simm = APInt(64, Imm << ShiftAmt);
5370 // check if the immediate is an unsigned or signed 32-bit int for W regs
5371 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5372 return Error(Loc, "Immediate too large for register");
5373 }
5374 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5375 const MCExpr *CPLoc =
5376 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5377 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5378 return false;
5379 }
5380 }
5381}
5382
5383bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5384 const MCExpr *Expr = nullptr;
5385 SMLoc L = getLoc();
5386 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5387 return true;
5388 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5389 if (check(!Value, L, "expected constant expression"))
5390 return true;
5391 Out = Value->getValue();
5392 return false;
5393}
5394
5395bool AArch64AsmParser::parseComma() {
5396 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5397 return true;
5398 // Eat the comma
5399 Lex();
5400 return false;
5401}
5402
5403bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5404 unsigned First, unsigned Last) {
5405 MCRegister Reg;
5406 SMLoc Start, End;
5407 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5408 return true;
5409
5410 // Special handling for FP and LR; they aren't linearly after x28 in
5411 // the registers enum.
5412 unsigned RangeEnd = Last;
5413 if (Base == AArch64::X0) {
5414 if (Last == AArch64::FP) {
5415 RangeEnd = AArch64::X28;
5416 if (Reg == AArch64::FP) {
5417 Out = 29;
5418 return false;
5419 }
5420 }
5421 if (Last == AArch64::LR) {
5422 RangeEnd = AArch64::X28;
5423 if (Reg == AArch64::FP) {
5424 Out = 29;
5425 return false;
5426 } else if (Reg == AArch64::LR) {
5427 Out = 30;
5428 return false;
5429 }
5430 }
5431 }
5432
5433 if (check(Reg < First || Reg > RangeEnd, Start,
5434 Twine("expected register in range ") +
5437 return true;
5438 Out = Reg - Base;
5439 return false;
5440}
5441
5442bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5443 const MCParsedAsmOperand &Op2) const {
5444 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5445 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5446
5447 if (AOp1.isVectorList() && AOp2.isVectorList())
5448 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5449 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5450 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5451
5452 if (!AOp1.isReg() || !AOp2.isReg())
5453 return false;
5454
5455 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5456 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5457 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5458
5459 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5460 "Testing equality of non-scalar registers not supported");
5461
5462 // Check if a registers match their sub/super register classes.
5463 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5464 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5465 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5466 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5467 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5468 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5469 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5470 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5471
5472 return false;
5473}
5474
5475/// Parse an AArch64 instruction mnemonic followed by its operands.
5476bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5477 StringRef Name, SMLoc NameLoc,
5478 OperandVector &Operands) {
5479 Name = StringSwitch<StringRef>(Name.lower())
5480 .Case("beq", "b.eq")
5481 .Case("bne", "b.ne")
5482 .Case("bhs", "b.hs")
5483 .Case("bcs", "b.cs")
5484 .Case("blo", "b.lo")
5485 .Case("bcc", "b.cc")
5486 .Case("bmi", "b.mi")
5487 .Case("bpl", "b.pl")
5488 .Case("bvs", "b.vs")
5489 .Case("bvc", "b.vc")
5490 .Case("bhi", "b.hi")
5491 .Case("bls", "b.ls")
5492 .Case("bge", "b.ge")
5493 .Case("blt", "b.lt")
5494 .Case("bgt", "b.gt")
5495 .Case("ble", "b.le")
5496 .Case("bal", "b.al")
5497 .Case("bnv", "b.nv")
5498 .Default(Name);
5499
5500 // First check for the AArch64-specific .req directive.
5501 if (getTok().is(AsmToken::Identifier) &&
5502 getTok().getIdentifier().lower() == ".req") {
5503 parseDirectiveReq(Name, NameLoc);
5504 // We always return 'error' for this, as we're done with this
5505 // statement and don't need to match the 'instruction."
5506 return true;
5507 }
5508
5509 // Create the leading tokens for the mnemonic, split by '.' characters.
5510 size_t Start = 0, Next = Name.find('.');
5511 StringRef Head = Name.slice(Start, Next);
5512
5513 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5514 // instructions are aliases for the SYS instruction.
5515 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5516 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5517 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5518 return parseSysAlias(Head, NameLoc, Operands);
5519
5520 // GICR instructions are aliases for the SYSL instruction.
5521 if (Head == "gicr")
5522 return parseSyslAlias(Head, NameLoc, Operands);
5523
5524 // TLBIP instructions are aliases for the SYSP instruction.
5525 if (Head == "tlbip")
5526 return parseSyspAlias(Head, NameLoc, Operands);
5527
5528 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5529 Mnemonic = Head;
5530
5531 // Handle condition codes for a branch mnemonic
5532 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5533 Start = Next;
5534 Next = Name.find('.', Start + 1);
5535 Head = Name.slice(Start + 1, Next);
5536
5537 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5538 (Head.data() - Name.data()));
5539 std::string Suggestion;
5540 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5541 if (CC == AArch64CC::Invalid) {
5542 std::string Msg = "invalid condition code";
5543 if (!Suggestion.empty())
5544 Msg += ", did you mean " + Suggestion + "?";
5545 return Error(SuffixLoc, Msg);
5546 }
5547 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5548 /*IsSuffix=*/true));
5549 Operands.push_back(
5550 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5551 }
5552
5553 // Add the remaining tokens in the mnemonic.
5554 while (Next != StringRef::npos) {
5555 Start = Next;
5556 Next = Name.find('.', Start + 1);
5557 Head = Name.slice(Start, Next);
5558 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5559 (Head.data() - Name.data()) + 1);
5560 Operands.push_back(AArch64Operand::CreateToken(
5561 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5562 }
5563
5564 // Conditional compare instructions have a Condition Code operand, which needs
5565 // to be parsed and an immediate operand created.
5566 bool condCodeFourthOperand =
5567 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5568 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5569 Head == "csinc" || Head == "csinv" || Head == "csneg");
5570
5571 // These instructions are aliases to some of the conditional select
5572 // instructions. However, the condition code is inverted in the aliased
5573 // instruction.
5574 //
5575 // FIXME: Is this the correct way to handle these? Or should the parser
5576 // generate the aliased instructions directly?
5577 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5578 bool condCodeThirdOperand =
5579 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5580
5581 // Read the remaining operands.
5582 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5583
5584 unsigned N = 1;
5585 do {
5586 // Parse and remember the operand.
5587 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5588 (N == 3 && condCodeThirdOperand) ||
5589 (N == 2 && condCodeSecondOperand),
5590 condCodeSecondOperand || condCodeThirdOperand)) {
5591 return true;
5592 }
5593
5594 // After successfully parsing some operands there are three special cases
5595 // to consider (i.e. notional operands not separated by commas). Two are
5596 // due to memory specifiers:
5597 // + An RBrac will end an address for load/store/prefetch
5598 // + An '!' will indicate a pre-indexed operation.
5599 //
5600 // And a further case is '}', which ends a group of tokens specifying the
5601 // SME accumulator array 'ZA' or tile vector, i.e.
5602 //
5603 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5604 //
5605 // It's someone else's responsibility to make sure these tokens are sane
5606 // in the given context!
5607
5608 if (parseOptionalToken(AsmToken::RBrac))
5609 Operands.push_back(
5610 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5611 if (parseOptionalToken(AsmToken::Exclaim))
5612 Operands.push_back(
5613 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5614 if (parseOptionalToken(AsmToken::RCurly))
5615 Operands.push_back(
5616 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5617
5618 ++N;
5619 } while (parseOptionalToken(AsmToken::Comma));
5620 }
5621
5622 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5623 return true;
5624
5625 return false;
5626}
5627
5628static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5629 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5630 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5631 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5632 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5633 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5634 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5635 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5636}
5637
5638// FIXME: This entire function is a giant hack to provide us with decent
5639// operand range validation/diagnostics until TableGen/MC can be extended
5640// to support autogeneration of this kind of validation.
5641bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5642 SmallVectorImpl<SMLoc> &Loc) {
5643 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5644 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5645
5646 // A prefix only applies to the instruction following it. Here we extract
5647 // prefix information for the next instruction before validating the current
5648 // one so that in the case of failure we don't erroneously continue using the
5649 // current prefix.
5650 PrefixInfo Prefix = NextPrefix;
5651 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5652
5653 // Before validating the instruction in isolation we run through the rules
5654 // applicable when it follows a prefix instruction.
5655 // NOTE: brk & hlt can be prefixed but require no additional validation.
5656 if (Prefix.isActive() &&
5657 (Inst.getOpcode() != AArch64::BRK) &&
5658 (Inst.getOpcode() != AArch64::HLT)) {
5659
5660 // Prefixed instructions must have a destructive operand.
5663 return Error(IDLoc, "instruction is unpredictable when following a"
5664 " movprfx, suggest replacing movprfx with mov");
5665
5666 // Destination operands must match.
5667 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5668 return Error(Loc[0], "instruction is unpredictable when following a"
5669 " movprfx writing to a different destination");
5670
5671 // Destination operand must not be used in any other location.
5672 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5673 if (Inst.getOperand(i).isReg() &&
5674 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5675 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5676 return Error(Loc[0], "instruction is unpredictable when following a"
5677 " movprfx and destination also used as non-destructive"
5678 " source");
5679 }
5680
5681 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5682 if (Prefix.isPredicated()) {
5683 int PgIdx = -1;
5684
5685 // Find the instructions general predicate.
5686 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5687 if (Inst.getOperand(i).isReg() &&
5688 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5689 PgIdx = i;
5690 break;
5691 }
5692
5693 // Instruction must be predicated if the movprfx is predicated.
5694 if (PgIdx == -1 ||
5696 return Error(IDLoc, "instruction is unpredictable when following a"
5697 " predicated movprfx, suggest using unpredicated movprfx");
5698
5699 // Instruction must use same general predicate as the movprfx.
5700 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5701 return Error(IDLoc, "instruction is unpredictable when following a"
5702 " predicated movprfx using a different general predicate");
5703
5704 // Instruction element type must match the movprfx.
5705 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5706 return Error(IDLoc, "instruction is unpredictable when following a"
5707 " predicated movprfx with a different element size");
5708 }
5709 }
5710
5711 // On ARM64EC, only valid registers may be used. Warn against using
5712 // explicitly disallowed registers.
5713 if (IsWindowsArm64EC) {
5714 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5715 if (Inst.getOperand(i).isReg()) {
5716 MCRegister Reg = Inst.getOperand(i).getReg();
5717 // At this point, vector registers are matched to their
5718 // appropriately sized alias.
5719 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5720 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5721 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5722 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5723 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5724 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5725 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5726 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5727 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5728 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5729 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5730 " is disallowed on ARM64EC.");
5731 }
5732 }
5733 }
5734 }
5735
5736 // Check for indexed addressing modes w/ the base register being the
5737 // same as a destination/source register or pair load where
5738 // the Rt == Rt2. All of those are undefined behaviour.
5739 switch (Inst.getOpcode()) {
5740 case AArch64::LDPSWpre:
5741 case AArch64::LDPWpost:
5742 case AArch64::LDPWpre:
5743 case AArch64::LDPXpost:
5744 case AArch64::LDPXpre: {
5745 MCRegister Rt = Inst.getOperand(1).getReg();
5746 MCRegister Rt2 = Inst.getOperand(2).getReg();
5747 MCRegister Rn = Inst.getOperand(3).getReg();
5748 if (RI->isSubRegisterEq(Rn, Rt))
5749 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5750 "is also a destination");
5751 if (RI->isSubRegisterEq(Rn, Rt2))
5752 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5753 "is also a destination");
5754 [[fallthrough]];
5755 }
5756 case AArch64::LDR_ZA:
5757 case AArch64::STR_ZA: {
5758 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5759 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5760 return Error(Loc[1],
5761 "unpredictable instruction, immediate and offset mismatch.");
5762 break;
5763 }
5764 case AArch64::LDPDi:
5765 case AArch64::LDPQi:
5766 case AArch64::LDPSi:
5767 case AArch64::LDPSWi:
5768 case AArch64::LDPWi:
5769 case AArch64::LDPXi: {
5770 MCRegister Rt = Inst.getOperand(0).getReg();
5771 MCRegister Rt2 = Inst.getOperand(1).getReg();
5772 if (Rt == Rt2)
5773 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5774 break;
5775 }
5776 case AArch64::LDPDpost:
5777 case AArch64::LDPDpre:
5778 case AArch64::LDPQpost:
5779 case AArch64::LDPQpre:
5780 case AArch64::LDPSpost:
5781 case AArch64::LDPSpre:
5782 case AArch64::LDPSWpost: {
5783 MCRegister Rt = Inst.getOperand(1).getReg();
5784 MCRegister Rt2 = Inst.getOperand(2).getReg();
5785 if (Rt == Rt2)
5786 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5787 break;
5788 }
5789 case AArch64::STPDpost:
5790 case AArch64::STPDpre:
5791 case AArch64::STPQpost:
5792 case AArch64::STPQpre:
5793 case AArch64::STPSpost:
5794 case AArch64::STPSpre:
5795 case AArch64::STPWpost:
5796 case AArch64::STPWpre:
5797 case AArch64::STPXpost:
5798 case AArch64::STPXpre: {
5799 MCRegister Rt = Inst.getOperand(1).getReg();
5800 MCRegister Rt2 = Inst.getOperand(2).getReg();
5801 MCRegister Rn = Inst.getOperand(3).getReg();
5802 if (RI->isSubRegisterEq(Rn, Rt))
5803 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5804 "is also a source");
5805 if (RI->isSubRegisterEq(Rn, Rt2))
5806 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5807 "is also a source");
5808 break;
5809 }
5810 case AArch64::LDRBBpre:
5811 case AArch64::LDRBpre:
5812 case AArch64::LDRHHpre:
5813 case AArch64::LDRHpre:
5814 case AArch64::LDRSBWpre:
5815 case AArch64::LDRSBXpre:
5816 case AArch64::LDRSHWpre:
5817 case AArch64::LDRSHXpre:
5818 case AArch64::LDRSWpre:
5819 case AArch64::LDRWpre:
5820 case AArch64::LDRXpre:
5821 case AArch64::LDRBBpost:
5822 case AArch64::LDRBpost:
5823 case AArch64::LDRHHpost:
5824 case AArch64::LDRHpost:
5825 case AArch64::LDRSBWpost:
5826 case AArch64::LDRSBXpost:
5827 case AArch64::LDRSHWpost:
5828 case AArch64::LDRSHXpost:
5829 case AArch64::LDRSWpost:
5830 case AArch64::LDRWpost:
5831 case AArch64::LDRXpost: {
5832 MCRegister Rt = Inst.getOperand(1).getReg();
5833 MCRegister Rn = Inst.getOperand(2).getReg();
5834 if (RI->isSubRegisterEq(Rn, Rt))
5835 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5836 "is also a source");
5837 break;
5838 }
5839 case AArch64::STRBBpost:
5840 case AArch64::STRBpost:
5841 case AArch64::STRHHpost:
5842 case AArch64::STRHpost:
5843 case AArch64::STRWpost:
5844 case AArch64::STRXpost:
5845 case AArch64::STRBBpre:
5846 case AArch64::STRBpre:
5847 case AArch64::STRHHpre:
5848 case AArch64::STRHpre:
5849 case AArch64::STRWpre:
5850 case AArch64::STRXpre: {
5851 MCRegister Rt = Inst.getOperand(1).getReg();
5852 MCRegister Rn = Inst.getOperand(2).getReg();
5853 if (RI->isSubRegisterEq(Rn, Rt))
5854 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5855 "is also a source");
5856 break;
5857 }
5858 case AArch64::STXRB:
5859 case AArch64::STXRH:
5860 case AArch64::STXRW:
5861 case AArch64::STXRX:
5862 case AArch64::STLXRB:
5863 case AArch64::STLXRH:
5864 case AArch64::STLXRW:
5865 case AArch64::STLXRX: {
5866 MCRegister Rs = Inst.getOperand(0).getReg();
5867 MCRegister Rt = Inst.getOperand(1).getReg();
5868 MCRegister Rn = Inst.getOperand(2).getReg();
5869 if (RI->isSubRegisterEq(Rt, Rs) ||
5870 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5871 return Error(Loc[0],
5872 "unpredictable STXR instruction, status is also a source");
5873 break;
5874 }
5875 case AArch64::STXPW:
5876 case AArch64::STXPX:
5877 case AArch64::STLXPW:
5878 case AArch64::STLXPX: {
5879 MCRegister Rs = Inst.getOperand(0).getReg();
5880 MCRegister Rt1 = Inst.getOperand(1).getReg();
5881 MCRegister Rt2 = Inst.getOperand(2).getReg();
5882 MCRegister Rn = Inst.getOperand(3).getReg();
5883 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5884 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5885 return Error(Loc[0],
5886 "unpredictable STXP instruction, status is also a source");
5887 break;
5888 }
5889 case AArch64::LDRABwriteback:
5890 case AArch64::LDRAAwriteback: {
5891 MCRegister Xt = Inst.getOperand(0).getReg();
5892 MCRegister Xn = Inst.getOperand(1).getReg();
5893 if (Xt == Xn)
5894 return Error(Loc[0],
5895 "unpredictable LDRA instruction, writeback base"
5896 " is also a destination");
5897 break;
5898 }
5899 }
5900
5901 // Check v8.8-A memops instructions.
5902 switch (Inst.getOpcode()) {
5903 case AArch64::CPYFP:
5904 case AArch64::CPYFPWN:
5905 case AArch64::CPYFPRN:
5906 case AArch64::CPYFPN:
5907 case AArch64::CPYFPWT:
5908 case AArch64::CPYFPWTWN:
5909 case AArch64::CPYFPWTRN:
5910 case AArch64::CPYFPWTN:
5911 case AArch64::CPYFPRT:
5912 case AArch64::CPYFPRTWN:
5913 case AArch64::CPYFPRTRN:
5914 case AArch64::CPYFPRTN:
5915 case AArch64::CPYFPT:
5916 case AArch64::CPYFPTWN:
5917 case AArch64::CPYFPTRN:
5918 case AArch64::CPYFPTN:
5919 case AArch64::CPYFM:
5920 case AArch64::CPYFMWN:
5921 case AArch64::CPYFMRN:
5922 case AArch64::CPYFMN:
5923 case AArch64::CPYFMWT:
5924 case AArch64::CPYFMWTWN:
5925 case AArch64::CPYFMWTRN:
5926 case AArch64::CPYFMWTN:
5927 case AArch64::CPYFMRT:
5928 case AArch64::CPYFMRTWN:
5929 case AArch64::CPYFMRTRN:
5930 case AArch64::CPYFMRTN:
5931 case AArch64::CPYFMT:
5932 case AArch64::CPYFMTWN:
5933 case AArch64::CPYFMTRN:
5934 case AArch64::CPYFMTN:
5935 case AArch64::CPYFE:
5936 case AArch64::CPYFEWN:
5937 case AArch64::CPYFERN:
5938 case AArch64::CPYFEN:
5939 case AArch64::CPYFEWT:
5940 case AArch64::CPYFEWTWN:
5941 case AArch64::CPYFEWTRN:
5942 case AArch64::CPYFEWTN:
5943 case AArch64::CPYFERT:
5944 case AArch64::CPYFERTWN:
5945 case AArch64::CPYFERTRN:
5946 case AArch64::CPYFERTN:
5947 case AArch64::CPYFET:
5948 case AArch64::CPYFETWN:
5949 case AArch64::CPYFETRN:
5950 case AArch64::CPYFETN:
5951 case AArch64::CPYP:
5952 case AArch64::CPYPWN:
5953 case AArch64::CPYPRN:
5954 case AArch64::CPYPN:
5955 case AArch64::CPYPWT:
5956 case AArch64::CPYPWTWN:
5957 case AArch64::CPYPWTRN:
5958 case AArch64::CPYPWTN:
5959 case AArch64::CPYPRT:
5960 case AArch64::CPYPRTWN:
5961 case AArch64::CPYPRTRN:
5962 case AArch64::CPYPRTN:
5963 case AArch64::CPYPT:
5964 case AArch64::CPYPTWN:
5965 case AArch64::CPYPTRN:
5966 case AArch64::CPYPTN:
5967 case AArch64::CPYM:
5968 case AArch64::CPYMWN:
5969 case AArch64::CPYMRN:
5970 case AArch64::CPYMN:
5971 case AArch64::CPYMWT:
5972 case AArch64::CPYMWTWN:
5973 case AArch64::CPYMWTRN:
5974 case AArch64::CPYMWTN:
5975 case AArch64::CPYMRT:
5976 case AArch64::CPYMRTWN:
5977 case AArch64::CPYMRTRN:
5978 case AArch64::CPYMRTN:
5979 case AArch64::CPYMT:
5980 case AArch64::CPYMTWN:
5981 case AArch64::CPYMTRN:
5982 case AArch64::CPYMTN:
5983 case AArch64::CPYE:
5984 case AArch64::CPYEWN:
5985 case AArch64::CPYERN:
5986 case AArch64::CPYEN:
5987 case AArch64::CPYEWT:
5988 case AArch64::CPYEWTWN:
5989 case AArch64::CPYEWTRN:
5990 case AArch64::CPYEWTN:
5991 case AArch64::CPYERT:
5992 case AArch64::CPYERTWN:
5993 case AArch64::CPYERTRN:
5994 case AArch64::CPYERTN:
5995 case AArch64::CPYET:
5996 case AArch64::CPYETWN:
5997 case AArch64::CPYETRN:
5998 case AArch64::CPYETN: {
5999 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
6000 MCRegister Xd = Inst.getOperand(3).getReg();
6001 MCRegister Xs = Inst.getOperand(4).getReg();
6002 MCRegister Xn = Inst.getOperand(5).getReg();
6003
6004 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6005 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6006 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6007
6008 if (Xd == Xs)
6009 return Error(Loc[0], "invalid CPY instruction, destination and source"
6010 " registers are the same");
6011 if (Xd == Xn)
6012 return Error(Loc[0], "invalid CPY instruction, destination and size"
6013 " registers are the same");
6014 if (Xs == Xn)
6015 return Error(Loc[0], "invalid CPY instruction, source and size"
6016 " registers are the same");
6017 break;
6018 }
6019 case AArch64::SETP:
6020 case AArch64::SETPT:
6021 case AArch64::SETPN:
6022 case AArch64::SETPTN:
6023 case AArch64::SETM:
6024 case AArch64::SETMT:
6025 case AArch64::SETMN:
6026 case AArch64::SETMTN:
6027 case AArch64::SETE:
6028 case AArch64::SETET:
6029 case AArch64::SETEN:
6030 case AArch64::SETETN:
6031 case AArch64::SETGP:
6032 case AArch64::SETGPT:
6033 case AArch64::SETGPN:
6034 case AArch64::SETGPTN:
6035 case AArch64::SETGM:
6036 case AArch64::SETGMT:
6037 case AArch64::SETGMN:
6038 case AArch64::SETGMTN:
6039 case AArch64::MOPSSETGE:
6040 case AArch64::MOPSSETGET:
6041 case AArch64::MOPSSETGEN:
6042 case AArch64::MOPSSETGETN: {
6043 // Xd_wb == op0, Xn_wb == op1
6044 MCRegister Xd = Inst.getOperand(2).getReg();
6045 MCRegister Xn = Inst.getOperand(3).getReg();
6046 MCRegister Xm = Inst.getOperand(4).getReg();
6047
6048 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6049 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6050
6051 if (Xd == Xn)
6052 return Error(Loc[0], "invalid SET instruction, destination and size"
6053 " registers are the same");
6054 if (Xd == Xm)
6055 return Error(Loc[0], "invalid SET instruction, destination and source"
6056 " registers are the same");
6057 if (Xn == Xm)
6058 return Error(Loc[0], "invalid SET instruction, source and size"
6059 " registers are the same");
6060 break;
6061 }
6062 case AArch64::SETGOP:
6063 case AArch64::SETGOPT:
6064 case AArch64::SETGOPN:
6065 case AArch64::SETGOPTN:
6066 case AArch64::SETGOM:
6067 case AArch64::SETGOMT:
6068 case AArch64::SETGOMN:
6069 case AArch64::SETGOMTN:
6070 case AArch64::SETGOE:
6071 case AArch64::SETGOET:
6072 case AArch64::SETGOEN:
6073 case AArch64::SETGOETN: {
6074 // Xd_wb == op0, Xn_wb == op1
6075 MCRegister Xd = Inst.getOperand(2).getReg();
6076 MCRegister Xn = Inst.getOperand(3).getReg();
6077
6078 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6079 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6080
6081 if (Xd == Xn)
6082 return Error(Loc[0], "invalid SET instruction, destination and size"
6083 " registers are the same");
6084 break;
6085 }
6086 }
6087
6088 // Now check immediate ranges. Separate from the above as there is overlap
6089 // in the instructions being checked and this keeps the nested conditionals
6090 // to a minimum.
6091 switch (Inst.getOpcode()) {
6092 case AArch64::ADDSWri:
6093 case AArch64::ADDSXri:
6094 case AArch64::ADDWri:
6095 case AArch64::ADDXri:
6096 case AArch64::SUBSWri:
6097 case AArch64::SUBSXri:
6098 case AArch64::SUBWri:
6099 case AArch64::SUBXri: {
6100 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6101 // some slight duplication here.
6102 if (Inst.getOperand(2).isExpr()) {
6103 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6104 AArch64::Specifier ELFSpec;
6105 AArch64::Specifier DarwinSpec;
6106 int64_t Addend;
6107 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6108
6109 // Only allow these with ADDXri.
6110 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6111 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6112 Inst.getOpcode() == AArch64::ADDXri)
6113 return false;
6114
6115 // Only allow these with ADDXri/ADDWri
6123 ELFSpec) &&
6124 (Inst.getOpcode() == AArch64::ADDXri ||
6125 Inst.getOpcode() == AArch64::ADDWri))
6126 return false;
6127
6128 // Don't allow symbol refs in the immediate field otherwise
6129 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6130 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6131 // 'cmp w0, 'borked')
6132 return Error(Loc.back(), "invalid immediate expression");
6133 }
6134 // We don't validate more complex expressions here
6135 }
6136 return false;
6137 }
6138 default:
6139 return false;
6140 }
6141}
6142
6144 const FeatureBitset &FBS,
6145 unsigned VariantID = 0);
6146
6147bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6149 OperandVector &Operands) {
6150 switch (ErrCode) {
6151 case Match_InvalidTiedOperand: {
6152 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6153 if (Op.isVectorList())
6154 return Error(Loc, "operand must match destination register list");
6155
6156 assert(Op.isReg() && "Unexpected operand type");
6157 switch (Op.getRegEqualityTy()) {
6158 case RegConstraintEqualityTy::EqualsSubReg:
6159 return Error(Loc, "operand must be 64-bit form of destination register");
6160 case RegConstraintEqualityTy::EqualsSuperReg:
6161 return Error(Loc, "operand must be 32-bit form of destination register");
6162 case RegConstraintEqualityTy::EqualsReg:
6163 return Error(Loc, "operand must match destination register");
6164 }
6165 llvm_unreachable("Unknown RegConstraintEqualityTy");
6166 }
6167 case Match_MissingFeature:
6168 return Error(Loc,
6169 "instruction requires a CPU feature not currently enabled");
6170 case Match_InvalidOperand:
6171 return Error(Loc, "invalid operand for instruction");
6172 case Match_InvalidSuffix:
6173 return Error(Loc, "invalid type suffix for instruction");
6174 case Match_InvalidCondCode:
6175 return Error(Loc, "expected AArch64 condition code");
6176 case Match_AddSubRegExtendSmall:
6177 return Error(Loc,
6178 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6179 case Match_AddSubRegExtendLarge:
6180 return Error(Loc,
6181 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6182 case Match_AddSubSecondSource:
6183 return Error(Loc,
6184 "expected compatible register, symbol or integer in range [0, 4095]");
6185 case Match_LogicalSecondSource:
6186 return Error(Loc, "expected compatible register or logical immediate");
6187 case Match_InvalidMovImm32Shift:
6188 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6189 case Match_InvalidMovImm64Shift:
6190 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6191 case Match_AddSubRegShift32:
6192 return Error(Loc,
6193 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6194 case Match_AddSubRegShift64:
6195 return Error(Loc,
6196 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6197 case Match_InvalidFPImm:
6198 return Error(Loc,
6199 "expected compatible register or floating-point constant");
6200 case Match_InvalidMemoryIndexedSImm6:
6201 return Error(Loc, "index must be an integer in range [-32, 31].");
6202 case Match_InvalidMemoryIndexedSImm5:
6203 return Error(Loc, "index must be an integer in range [-16, 15].");
6204 case Match_InvalidMemoryIndexed1SImm4:
6205 return Error(Loc, "index must be an integer in range [-8, 7].");
6206 case Match_InvalidMemoryIndexed2SImm4:
6207 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6208 case Match_InvalidMemoryIndexed3SImm4:
6209 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6210 case Match_InvalidMemoryIndexed4SImm4:
6211 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6212 case Match_InvalidMemoryIndexed16SImm4:
6213 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6214 case Match_InvalidMemoryIndexed32SImm4:
6215 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6216 case Match_InvalidMemoryIndexed1SImm6:
6217 return Error(Loc, "index must be an integer in range [-32, 31].");
6218 case Match_InvalidMemoryIndexedSImm8:
6219 return Error(Loc, "index must be an integer in range [-128, 127].");
6220 case Match_InvalidMemoryIndexedSImm9:
6221 return Error(Loc, "index must be an integer in range [-256, 255].");
6222 case Match_InvalidMemoryIndexed16SImm9:
6223 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6224 case Match_InvalidMemoryIndexed8SImm10:
6225 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6226 case Match_InvalidMemoryIndexed4SImm7:
6227 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6228 case Match_InvalidMemoryIndexed8SImm7:
6229 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6230 case Match_InvalidMemoryIndexed16SImm7:
6231 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6232 case Match_InvalidMemoryIndexed8UImm5:
6233 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6234 case Match_InvalidMemoryIndexed8UImm3:
6235 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6236 case Match_InvalidMemoryIndexed4UImm5:
6237 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6238 case Match_InvalidMemoryIndexed2UImm5:
6239 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6240 case Match_InvalidMemoryIndexed8UImm6:
6241 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6242 case Match_InvalidMemoryIndexed16UImm6:
6243 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6244 case Match_InvalidMemoryIndexed4UImm6:
6245 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6246 case Match_InvalidMemoryIndexed2UImm6:
6247 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6248 case Match_InvalidMemoryIndexed1UImm6:
6249 return Error(Loc, "index must be in range [0, 63].");
6250 case Match_InvalidMemoryWExtend8:
6251 return Error(Loc,
6252 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6253 case Match_InvalidMemoryWExtend16:
6254 return Error(Loc,
6255 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6256 case Match_InvalidMemoryWExtend32:
6257 return Error(Loc,
6258 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6259 case Match_InvalidMemoryWExtend64:
6260 return Error(Loc,
6261 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6262 case Match_InvalidMemoryWExtend128:
6263 return Error(Loc,
6264 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6265 case Match_InvalidMemoryXExtend8:
6266 return Error(Loc,
6267 "expected 'lsl' or 'sxtx' with optional shift of #0");
6268 case Match_InvalidMemoryXExtend16:
6269 return Error(Loc,
6270 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6271 case Match_InvalidMemoryXExtend32:
6272 return Error(Loc,
6273 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6274 case Match_InvalidMemoryXExtend64:
6275 return Error(Loc,
6276 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6277 case Match_InvalidMemoryXExtend128:
6278 return Error(Loc,
6279 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6280 case Match_InvalidMemoryIndexed1:
6281 return Error(Loc, "index must be an integer in range [0, 4095].");
6282 case Match_InvalidMemoryIndexed2:
6283 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6284 case Match_InvalidMemoryIndexed4:
6285 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6286 case Match_InvalidMemoryIndexed8:
6287 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6288 case Match_InvalidMemoryIndexed16:
6289 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6290 case Match_InvalidImm0_0:
6291 return Error(Loc, "immediate must be 0.");
6292 case Match_InvalidImm0_1:
6293 return Error(Loc, "immediate must be an integer in range [0, 1].");
6294 case Match_InvalidImm0_3:
6295 return Error(Loc, "immediate must be an integer in range [0, 3].");
6296 case Match_InvalidImm0_7:
6297 return Error(Loc, "immediate must be an integer in range [0, 7].");
6298 case Match_InvalidImm0_15:
6299 return Error(Loc, "immediate must be an integer in range [0, 15].");
6300 case Match_InvalidImm0_31:
6301 return Error(Loc, "immediate must be an integer in range [0, 31].");
6302 case Match_InvalidImm0_63:
6303 return Error(Loc, "immediate must be an integer in range [0, 63].");
6304 case Match_InvalidImm0_127:
6305 return Error(Loc, "immediate must be an integer in range [0, 127].");
6306 case Match_InvalidImm0_255:
6307 return Error(Loc, "immediate must be an integer in range [0, 255].");
6308 case Match_InvalidImm0_65535:
6309 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6310 case Match_InvalidImm1_8:
6311 return Error(Loc, "immediate must be an integer in range [1, 8].");
6312 case Match_InvalidImm1_16:
6313 return Error(Loc, "immediate must be an integer in range [1, 16].");
6314 case Match_InvalidImm1_32:
6315 return Error(Loc, "immediate must be an integer in range [1, 32].");
6316 case Match_InvalidImm1_64:
6317 return Error(Loc, "immediate must be an integer in range [1, 64].");
6318 case Match_InvalidImmM1_62:
6319 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6320 case Match_InvalidMemoryIndexedRange2UImm0:
6321 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6322 case Match_InvalidMemoryIndexedRange2UImm1:
6323 return Error(Loc, "vector select offset must be an immediate range of the "
6324 "form <immf>:<imml>, where the first "
6325 "immediate is a multiple of 2 in the range [0, 2], and "
6326 "the second immediate is immf + 1.");
6327 case Match_InvalidMemoryIndexedRange2UImm2:
6328 case Match_InvalidMemoryIndexedRange2UImm3:
6329 return Error(
6330 Loc,
6331 "vector select offset must be an immediate range of the form "
6332 "<immf>:<imml>, "
6333 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6334 "[0, 14] "
6335 "depending on the instruction, and the second immediate is immf + 1.");
6336 case Match_InvalidMemoryIndexedRange4UImm0:
6337 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6338 case Match_InvalidMemoryIndexedRange4UImm1:
6339 case Match_InvalidMemoryIndexedRange4UImm2:
6340 return Error(
6341 Loc,
6342 "vector select offset must be an immediate range of the form "
6343 "<immf>:<imml>, "
6344 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6345 "[0, 12] "
6346 "depending on the instruction, and the second immediate is immf + 3.");
6347 case Match_InvalidSVEAddSubImm8:
6348 return Error(Loc, "immediate must be an integer in range [0, 255]"
6349 " with a shift amount of 0");
6350 case Match_InvalidSVEAddSubImm16:
6351 case Match_InvalidSVEAddSubImm32:
6352 case Match_InvalidSVEAddSubImm64:
6353 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6354 "multiple of 256 in range [256, 65280]");
6355 case Match_InvalidSVECpyImm8:
6356 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6357 " with a shift amount of 0");
6358 case Match_InvalidSVECpyImm16:
6359 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6360 "multiple of 256 in range [-32768, 65280]");
6361 case Match_InvalidSVECpyImm32:
6362 case Match_InvalidSVECpyImm64:
6363 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6364 "multiple of 256 in range [-32768, 32512]");
6365 case Match_InvalidIndexRange0_0:
6366 return Error(Loc, "expected lane specifier '[0]'");
6367 case Match_InvalidIndexRange1_1:
6368 return Error(Loc, "expected lane specifier '[1]'");
6369 case Match_InvalidIndexRange0_15:
6370 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6371 case Match_InvalidIndexRange0_7:
6372 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6373 case Match_InvalidIndexRange0_3:
6374 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6375 case Match_InvalidIndexRange0_1:
6376 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6377 case Match_InvalidSVEIndexRange0_63:
6378 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6379 case Match_InvalidSVEIndexRange0_31:
6380 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6381 case Match_InvalidSVEIndexRange0_15:
6382 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6383 case Match_InvalidSVEIndexRange0_7:
6384 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6385 case Match_InvalidSVEIndexRange0_3:
6386 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6387 case Match_InvalidLabel:
6388 return Error(Loc, "expected label or encodable integer pc offset");
6389 case Match_MRS:
6390 return Error(Loc, "expected readable system register");
6391 case Match_MSR:
6392 case Match_InvalidSVCR:
6393 return Error(Loc, "expected writable system register or pstate");
6394 case Match_InvalidComplexRotationEven:
6395 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6396 case Match_InvalidComplexRotationOdd:
6397 return Error(Loc, "complex rotation must be 90 or 270.");
6398 case Match_MnemonicFail: {
6399 std::string Suggestion = AArch64MnemonicSpellCheck(
6400 ((AArch64Operand &)*Operands[0]).getToken(),
6401 ComputeAvailableFeatures(STI->getFeatureBits()));
6402 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6403 }
6404 case Match_InvalidGPR64shifted8:
6405 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6406 case Match_InvalidGPR64shifted16:
6407 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6408 case Match_InvalidGPR64shifted32:
6409 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6410 case Match_InvalidGPR64shifted64:
6411 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6412 case Match_InvalidGPR64shifted128:
6413 return Error(
6414 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6415 case Match_InvalidGPR64NoXZRshifted8:
6416 return Error(Loc, "register must be x0..x30 without shift");
6417 case Match_InvalidGPR64NoXZRshifted16:
6418 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6419 case Match_InvalidGPR64NoXZRshifted32:
6420 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6421 case Match_InvalidGPR64NoXZRshifted64:
6422 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6423 case Match_InvalidGPR64NoXZRshifted128:
6424 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6425 case Match_InvalidZPR32UXTW8:
6426 case Match_InvalidZPR32SXTW8:
6427 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6428 case Match_InvalidZPR32UXTW16:
6429 case Match_InvalidZPR32SXTW16:
6430 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6431 case Match_InvalidZPR32UXTW32:
6432 case Match_InvalidZPR32SXTW32:
6433 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6434 case Match_InvalidZPR32UXTW64:
6435 case Match_InvalidZPR32SXTW64:
6436 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6437 case Match_InvalidZPR64UXTW8:
6438 case Match_InvalidZPR64SXTW8:
6439 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6440 case Match_InvalidZPR64UXTW16:
6441 case Match_InvalidZPR64SXTW16:
6442 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6443 case Match_InvalidZPR64UXTW32:
6444 case Match_InvalidZPR64SXTW32:
6445 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6446 case Match_InvalidZPR64UXTW64:
6447 case Match_InvalidZPR64SXTW64:
6448 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6449 case Match_InvalidZPR32LSL8:
6450 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6451 case Match_InvalidZPR32LSL16:
6452 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6453 case Match_InvalidZPR32LSL32:
6454 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6455 case Match_InvalidZPR32LSL64:
6456 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6457 case Match_InvalidZPR64LSL8:
6458 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6459 case Match_InvalidZPR64LSL16:
6460 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6461 case Match_InvalidZPR64LSL32:
6462 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6463 case Match_InvalidZPR64LSL64:
6464 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6465 case Match_InvalidZPR0:
6466 return Error(Loc, "expected register without element width suffix");
6467 case Match_InvalidZPR8:
6468 case Match_InvalidZPR16:
6469 case Match_InvalidZPR32:
6470 case Match_InvalidZPR64:
6471 case Match_InvalidZPR128:
6472 return Error(Loc, "invalid element width");
6473 case Match_InvalidZPR_3b8:
6474 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6475 case Match_InvalidZPR_3b16:
6476 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6477 case Match_InvalidZPR_3b32:
6478 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6479 case Match_InvalidZPR_4b8:
6480 return Error(Loc,
6481 "Invalid restricted vector register, expected z0.b..z15.b");
6482 case Match_InvalidZPR_4b16:
6483 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6484 case Match_InvalidZPR_4b32:
6485 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6486 case Match_InvalidZPR_4b64:
6487 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6488 case Match_InvalidZPRMul2_Lo8:
6489 return Error(Loc, "Invalid restricted vector register, expected even "
6490 "register in z0.b..z14.b");
6491 case Match_InvalidZPRMul2_Hi8:
6492 return Error(Loc, "Invalid restricted vector register, expected even "
6493 "register in z16.b..z30.b");
6494 case Match_InvalidZPRMul2_Lo16:
6495 return Error(Loc, "Invalid restricted vector register, expected even "
6496 "register in z0.h..z14.h");
6497 case Match_InvalidZPRMul2_Hi16:
6498 return Error(Loc, "Invalid restricted vector register, expected even "
6499 "register in z16.h..z30.h");
6500 case Match_InvalidZPRMul2_Lo32:
6501 return Error(Loc, "Invalid restricted vector register, expected even "
6502 "register in z0.s..z14.s");
6503 case Match_InvalidZPRMul2_Hi32:
6504 return Error(Loc, "Invalid restricted vector register, expected even "
6505 "register in z16.s..z30.s");
6506 case Match_InvalidZPRMul2_Lo64:
6507 return Error(Loc, "Invalid restricted vector register, expected even "
6508 "register in z0.d..z14.d");
6509 case Match_InvalidZPRMul2_Hi64:
6510 return Error(Loc, "Invalid restricted vector register, expected even "
6511 "register in z16.d..z30.d");
6512 case Match_InvalidZPR_K0:
6513 return Error(Loc, "invalid restricted vector register, expected register "
6514 "in z20..z23 or z28..z31");
6515 case Match_InvalidSVEPattern:
6516 return Error(Loc, "invalid predicate pattern");
6517 case Match_InvalidSVEPPRorPNRAnyReg:
6518 case Match_InvalidSVEPPRorPNRBReg:
6519 case Match_InvalidSVEPredicateAnyReg:
6520 case Match_InvalidSVEPredicateBReg:
6521 case Match_InvalidSVEPredicateHReg:
6522 case Match_InvalidSVEPredicateSReg:
6523 case Match_InvalidSVEPredicateDReg:
6524 return Error(Loc, "invalid predicate register.");
6525 case Match_InvalidSVEPredicate3bAnyReg:
6526 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6527 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6528 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6529 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6530 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6531 return Error(Loc, "Invalid predicate register, expected PN in range "
6532 "pn8..pn15 with element suffix.");
6533 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6534 return Error(Loc, "invalid restricted predicate-as-counter register "
6535 "expected pn8..pn15");
6536 case Match_InvalidSVEPNPredicateBReg:
6537 case Match_InvalidSVEPNPredicateHReg:
6538 case Match_InvalidSVEPNPredicateSReg:
6539 case Match_InvalidSVEPNPredicateDReg:
6540 return Error(Loc, "Invalid predicate register, expected PN in range "
6541 "pn0..pn15 with element suffix.");
6542 case Match_InvalidSVEVecLenSpecifier:
6543 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6544 case Match_InvalidSVEPredicateListMul2x8:
6545 case Match_InvalidSVEPredicateListMul2x16:
6546 case Match_InvalidSVEPredicateListMul2x32:
6547 case Match_InvalidSVEPredicateListMul2x64:
6548 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6549 "predicate registers, where the first vector is a multiple of 2 "
6550 "and with correct element type");
6551 case Match_InvalidSVEExactFPImmOperandHalfOne:
6552 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6553 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6554 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6555 case Match_InvalidSVEExactFPImmOperandZeroOne:
6556 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6557 case Match_InvalidMatrixTileVectorH8:
6558 case Match_InvalidMatrixTileVectorV8:
6559 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6560 case Match_InvalidMatrixTileVectorH16:
6561 case Match_InvalidMatrixTileVectorV16:
6562 return Error(Loc,
6563 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6564 case Match_InvalidMatrixTileVectorH32:
6565 case Match_InvalidMatrixTileVectorV32:
6566 return Error(Loc,
6567 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6568 case Match_InvalidMatrixTileVectorH64:
6569 case Match_InvalidMatrixTileVectorV64:
6570 return Error(Loc,
6571 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6572 case Match_InvalidMatrixTileVectorH128:
6573 case Match_InvalidMatrixTileVectorV128:
6574 return Error(Loc,
6575 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6576 case Match_InvalidMatrixTile16:
6577 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6578 case Match_InvalidMatrixTile32:
6579 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6580 case Match_InvalidMatrixTile64:
6581 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6582 case Match_InvalidMatrix:
6583 return Error(Loc, "invalid matrix operand, expected za");
6584 case Match_InvalidMatrix8:
6585 return Error(Loc, "invalid matrix operand, expected suffix .b");
6586 case Match_InvalidMatrix16:
6587 return Error(Loc, "invalid matrix operand, expected suffix .h");
6588 case Match_InvalidMatrix32:
6589 return Error(Loc, "invalid matrix operand, expected suffix .s");
6590 case Match_InvalidMatrix64:
6591 return Error(Loc, "invalid matrix operand, expected suffix .d");
6592 case Match_InvalidMatrixIndexGPR32_12_15:
6593 return Error(Loc, "operand must be a register in range [w12, w15]");
6594 case Match_InvalidMatrixIndexGPR32_8_11:
6595 return Error(Loc, "operand must be a register in range [w8, w11]");
6596 case Match_InvalidSVEVectorList2x8Mul2:
6597 case Match_InvalidSVEVectorList2x16Mul2:
6598 case Match_InvalidSVEVectorList2x32Mul2:
6599 case Match_InvalidSVEVectorList2x64Mul2:
6600 case Match_InvalidSVEVectorList2x128Mul2:
6601 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6602 "SVE vectors, where the first vector is a multiple of 2 "
6603 "and with matching element types");
6604 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6605 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6606 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6607 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6608 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6609 "SVE vectors in the range z0-z14, where the first vector "
6610 "is a multiple of 2 "
6611 "and with matching element types");
6612 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6613 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6614 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6615 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6616 return Error(Loc,
6617 "Invalid vector list, expected list with 2 consecutive "
6618 "SVE vectors in the range z16-z30, where the first vector "
6619 "is a multiple of 2 "
6620 "and with matching element types");
6621 case Match_InvalidSVEVectorList4x8Mul4:
6622 case Match_InvalidSVEVectorList4x16Mul4:
6623 case Match_InvalidSVEVectorList4x32Mul4:
6624 case Match_InvalidSVEVectorList4x64Mul4:
6625 case Match_InvalidSVEVectorList4x128Mul4:
6626 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6627 "SVE vectors, where the first vector is a multiple of 4 "
6628 "and with matching element types");
6629 case Match_InvalidLookupTable:
6630 return Error(Loc, "Invalid lookup table, expected zt0");
6631 case Match_InvalidSVEVectorListStrided2x8:
6632 case Match_InvalidSVEVectorListStrided2x16:
6633 case Match_InvalidSVEVectorListStrided2x32:
6634 case Match_InvalidSVEVectorListStrided2x64:
6635 return Error(
6636 Loc,
6637 "Invalid vector list, expected list with each SVE vector in the list "
6638 "8 registers apart, and the first register in the range [z0, z7] or "
6639 "[z16, z23] and with correct element type");
6640 case Match_InvalidSVEVectorListStrided4x8:
6641 case Match_InvalidSVEVectorListStrided4x16:
6642 case Match_InvalidSVEVectorListStrided4x32:
6643 case Match_InvalidSVEVectorListStrided4x64:
6644 return Error(
6645 Loc,
6646 "Invalid vector list, expected list with each SVE vector in the list "
6647 "4 registers apart, and the first register in the range [z0, z3] or "
6648 "[z16, z19] and with correct element type");
6649 case Match_AddSubLSLImm3ShiftLarge:
6650 return Error(Loc,
6651 "expected 'lsl' with optional integer in range [0, 7]");
6652 default:
6653 llvm_unreachable("unexpected error code!");
6654 }
6655}
6656
6657static const char *getSubtargetFeatureName(uint64_t Val);
6658
6659bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6660 OperandVector &Operands,
6661 MCStreamer &Out,
6663 bool MatchingInlineAsm) {
6664 assert(!Operands.empty() && "Unexpected empty operand list!");
6665 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6666 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6667
6668 StringRef Tok = Op.getToken();
6669 unsigned NumOperands = Operands.size();
6670
6671 if (NumOperands == 4 && Tok == "lsl") {
6672 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6673 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6674 if (Op2.isScalarReg() && Op3.isImm()) {
6675 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6676 if (Op3CE) {
6677 uint64_t Op3Val = Op3CE->getValue();
6678 uint64_t NewOp3Val = 0;
6679 uint64_t NewOp4Val = 0;
6680 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6681 Op2.getReg())) {
6682 NewOp3Val = (32 - Op3Val) & 0x1f;
6683 NewOp4Val = 31 - Op3Val;
6684 } else {
6685 NewOp3Val = (64 - Op3Val) & 0x3f;
6686 NewOp4Val = 63 - Op3Val;
6687 }
6688
6689 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6690 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6691
6692 Operands[0] =
6693 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6694 Operands.push_back(AArch64Operand::CreateImm(
6695 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6696 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6697 Op3.getEndLoc(), getContext());
6698 }
6699 }
6700 } else if (NumOperands == 4 && Tok == "bfc") {
6701 // FIXME: Horrible hack to handle BFC->BFM alias.
6702 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6703 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6704 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6705
6706 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6707 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6708 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6709
6710 if (LSBCE && WidthCE) {
6711 uint64_t LSB = LSBCE->getValue();
6712 uint64_t Width = WidthCE->getValue();
6713
6714 uint64_t RegWidth = 0;
6715 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6716 Op1.getReg()))
6717 RegWidth = 64;
6718 else
6719 RegWidth = 32;
6720
6721 if (LSB >= RegWidth)
6722 return Error(LSBOp.getStartLoc(),
6723 "expected integer in range [0, 31]");
6724 if (Width < 1 || Width > RegWidth)
6725 return Error(WidthOp.getStartLoc(),
6726 "expected integer in range [1, 32]");
6727
6728 uint64_t ImmR = 0;
6729 if (RegWidth == 32)
6730 ImmR = (32 - LSB) & 0x1f;
6731 else
6732 ImmR = (64 - LSB) & 0x3f;
6733
6734 uint64_t ImmS = Width - 1;
6735
6736 if (ImmR != 0 && ImmS >= ImmR)
6737 return Error(WidthOp.getStartLoc(),
6738 "requested insert overflows register");
6739
6740 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6741 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6742 Operands[0] =
6743 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6744 Operands[2] = AArch64Operand::CreateReg(
6745 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6746 SMLoc(), SMLoc(), getContext());
6747 Operands[3] = AArch64Operand::CreateImm(
6748 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6749 Operands.emplace_back(
6750 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6751 WidthOp.getEndLoc(), getContext()));
6752 }
6753 }
6754 } else if (NumOperands == 5) {
6755 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6756 // UBFIZ -> UBFM aliases.
6757 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6758 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6759 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6760 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6761
6762 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6763 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6764 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6765
6766 if (Op3CE && Op4CE) {
6767 uint64_t Op3Val = Op3CE->getValue();
6768 uint64_t Op4Val = Op4CE->getValue();
6769
6770 uint64_t RegWidth = 0;
6771 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6772 Op1.getReg()))
6773 RegWidth = 64;
6774 else
6775 RegWidth = 32;
6776
6777 if (Op3Val >= RegWidth)
6778 return Error(Op3.getStartLoc(),
6779 "expected integer in range [0, 31]");
6780 if (Op4Val < 1 || Op4Val > RegWidth)
6781 return Error(Op4.getStartLoc(),
6782 "expected integer in range [1, 32]");
6783
6784 uint64_t NewOp3Val = 0;
6785 if (RegWidth == 32)
6786 NewOp3Val = (32 - Op3Val) & 0x1f;
6787 else
6788 NewOp3Val = (64 - Op3Val) & 0x3f;
6789
6790 uint64_t NewOp4Val = Op4Val - 1;
6791
6792 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6793 return Error(Op4.getStartLoc(),
6794 "requested insert overflows register");
6795
6796 const MCExpr *NewOp3 =
6797 MCConstantExpr::create(NewOp3Val, getContext());
6798 const MCExpr *NewOp4 =
6799 MCConstantExpr::create(NewOp4Val, getContext());
6800 Operands[3] = AArch64Operand::CreateImm(
6801 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6802 Operands[4] = AArch64Operand::CreateImm(
6803 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6804 if (Tok == "bfi")
6805 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6806 getContext());
6807 else if (Tok == "sbfiz")
6808 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6809 getContext());
6810 else if (Tok == "ubfiz")
6811 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6812 getContext());
6813 else
6814 llvm_unreachable("No valid mnemonic for alias?");
6815 }
6816 }
6817
6818 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6819 // UBFX -> UBFM aliases.
6820 } else if (NumOperands == 5 &&
6821 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6822 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6823 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6824 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6825
6826 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6827 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6828 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6829
6830 if (Op3CE && Op4CE) {
6831 uint64_t Op3Val = Op3CE->getValue();
6832 uint64_t Op4Val = Op4CE->getValue();
6833
6834 uint64_t RegWidth = 0;
6835 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6836 Op1.getReg()))
6837 RegWidth = 64;
6838 else
6839 RegWidth = 32;
6840
6841 if (Op3Val >= RegWidth)
6842 return Error(Op3.getStartLoc(),
6843 "expected integer in range [0, 31]");
6844 if (Op4Val < 1 || Op4Val > RegWidth)
6845 return Error(Op4.getStartLoc(),
6846 "expected integer in range [1, 32]");
6847
6848 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6849
6850 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6851 return Error(Op4.getStartLoc(),
6852 "requested extract overflows register");
6853
6854 const MCExpr *NewOp4 =
6855 MCConstantExpr::create(NewOp4Val, getContext());
6856 Operands[4] = AArch64Operand::CreateImm(
6857 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6858 if (Tok == "bfxil")
6859 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6860 getContext());
6861 else if (Tok == "sbfx")
6862 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6863 getContext());
6864 else if (Tok == "ubfx")
6865 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6866 getContext());
6867 else
6868 llvm_unreachable("No valid mnemonic for alias?");
6869 }
6870 }
6871 }
6872 }
6873
6874 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6875 // instruction for FP registers correctly in some rare circumstances. Convert
6876 // it to a safe instruction and warn (because silently changing someone's
6877 // assembly is rude).
6878 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6879 NumOperands == 4 && Tok == "movi") {
6880 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6881 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6882 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6883 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6884 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6885 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6886 if (Suffix.lower() == ".2d" &&
6887 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6888 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6889 " correctly on this CPU, converting to equivalent movi.16b");
6890 // Switch the suffix to .16b.
6891 unsigned Idx = Op1.isToken() ? 1 : 2;
6892 Operands[Idx] =
6893 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6894 }
6895 }
6896 }
6897
6898 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6899 // InstAlias can't quite handle this since the reg classes aren't
6900 // subclasses.
6901 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6902 // The source register can be Wn here, but the matcher expects a
6903 // GPR64. Twiddle it here if necessary.
6904 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6905 if (Op.isScalarReg()) {
6906 MCRegister Reg = getXRegFromWReg(Op.getReg());
6907 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6908 Op.getStartLoc(), Op.getEndLoc(),
6909 getContext());
6910 }
6911 }
6912 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6913 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6914 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6915 if (Op.isScalarReg() &&
6916 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6917 Op.getReg())) {
6918 // The source register can be Wn here, but the matcher expects a
6919 // GPR64. Twiddle it here if necessary.
6920 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6921 if (Op.isScalarReg()) {
6922 MCRegister Reg = getXRegFromWReg(Op.getReg());
6923 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6924 Op.getStartLoc(),
6925 Op.getEndLoc(), getContext());
6926 }
6927 }
6928 }
6929 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6930 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6931 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6932 if (Op.isScalarReg() &&
6933 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6934 Op.getReg())) {
6935 // The source register can be Wn here, but the matcher expects a
6936 // GPR32. Twiddle it here if necessary.
6937 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6938 if (Op.isScalarReg()) {
6939 MCRegister Reg = getWRegFromXReg(Op.getReg());
6940 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6941 Op.getStartLoc(),
6942 Op.getEndLoc(), getContext());
6943 }
6944 }
6945 }
6946
6947 MCInst Inst;
6948 FeatureBitset MissingFeatures;
6949 // First try to match against the secondary set of tables containing the
6950 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6951 unsigned MatchResult =
6952 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6953 MatchingInlineAsm, 1);
6954
6955 // If that fails, try against the alternate table containing long-form NEON:
6956 // "fadd v0.2s, v1.2s, v2.2s"
6957 if (MatchResult != Match_Success) {
6958 // But first, save the short-form match result: we can use it in case the
6959 // long-form match also fails.
6960 auto ShortFormNEONErrorInfo = ErrorInfo;
6961 auto ShortFormNEONMatchResult = MatchResult;
6962 auto ShortFormNEONMissingFeatures = MissingFeatures;
6963
6964 MatchResult =
6965 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6966 MatchingInlineAsm, 0);
6967
6968 // Now, both matches failed, and the long-form match failed on the mnemonic
6969 // suffix token operand. The short-form match failure is probably more
6970 // relevant: use it instead.
6971 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6972 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6973 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6974 MatchResult = ShortFormNEONMatchResult;
6975 ErrorInfo = ShortFormNEONErrorInfo;
6976 MissingFeatures = ShortFormNEONMissingFeatures;
6977 }
6978 }
6979
6980 switch (MatchResult) {
6981 case Match_Success: {
6982 // Perform range checking and other semantic validations
6983 SmallVector<SMLoc, 8> OperandLocs;
6984 NumOperands = Operands.size();
6985 for (unsigned i = 1; i < NumOperands; ++i)
6986 OperandLocs.push_back(Operands[i]->getStartLoc());
6987 if (validateInstruction(Inst, IDLoc, OperandLocs))
6988 return true;
6989
6990 Inst.setLoc(IDLoc);
6991 Out.emitInstruction(Inst, getSTI());
6992 return false;
6993 }
6994 case Match_MissingFeature: {
6995 assert(MissingFeatures.any() && "Unknown missing feature!");
6996 // Special case the error message for the very common case where only
6997 // a single subtarget feature is missing (neon, e.g.).
6998 std::string Msg = "instruction requires:";
6999 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
7000 if (MissingFeatures[i]) {
7001 Msg += " ";
7002 Msg += getSubtargetFeatureName(i);
7003 }
7004 }
7005 return Error(IDLoc, Msg);
7006 }
7007 case Match_MnemonicFail:
7008 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
7009 case Match_InvalidOperand: {
7010 SMLoc ErrorLoc = IDLoc;
7011
7012 if (ErrorInfo != ~0ULL) {
7013 if (ErrorInfo >= Operands.size())
7014 return Error(IDLoc, "too few operands for instruction",
7015 SMRange(IDLoc, getTok().getLoc()));
7016
7017 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7018 if (ErrorLoc == SMLoc())
7019 ErrorLoc = IDLoc;
7020 }
7021 // If the match failed on a suffix token operand, tweak the diagnostic
7022 // accordingly.
7023 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7024 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7025 MatchResult = Match_InvalidSuffix;
7026
7027 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7028 }
7029 case Match_InvalidTiedOperand:
7030 case Match_InvalidMemoryIndexed1:
7031 case Match_InvalidMemoryIndexed2:
7032 case Match_InvalidMemoryIndexed4:
7033 case Match_InvalidMemoryIndexed8:
7034 case Match_InvalidMemoryIndexed16:
7035 case Match_InvalidCondCode:
7036 case Match_AddSubLSLImm3ShiftLarge:
7037 case Match_AddSubRegExtendSmall:
7038 case Match_AddSubRegExtendLarge:
7039 case Match_AddSubSecondSource:
7040 case Match_LogicalSecondSource:
7041 case Match_AddSubRegShift32:
7042 case Match_AddSubRegShift64:
7043 case Match_InvalidMovImm32Shift:
7044 case Match_InvalidMovImm64Shift:
7045 case Match_InvalidFPImm:
7046 case Match_InvalidMemoryWExtend8:
7047 case Match_InvalidMemoryWExtend16:
7048 case Match_InvalidMemoryWExtend32:
7049 case Match_InvalidMemoryWExtend64:
7050 case Match_InvalidMemoryWExtend128:
7051 case Match_InvalidMemoryXExtend8:
7052 case Match_InvalidMemoryXExtend16:
7053 case Match_InvalidMemoryXExtend32:
7054 case Match_InvalidMemoryXExtend64:
7055 case Match_InvalidMemoryXExtend128:
7056 case Match_InvalidMemoryIndexed1SImm4:
7057 case Match_InvalidMemoryIndexed2SImm4:
7058 case Match_InvalidMemoryIndexed3SImm4:
7059 case Match_InvalidMemoryIndexed4SImm4:
7060 case Match_InvalidMemoryIndexed1SImm6:
7061 case Match_InvalidMemoryIndexed16SImm4:
7062 case Match_InvalidMemoryIndexed32SImm4:
7063 case Match_InvalidMemoryIndexed4SImm7:
7064 case Match_InvalidMemoryIndexed8SImm7:
7065 case Match_InvalidMemoryIndexed16SImm7:
7066 case Match_InvalidMemoryIndexed8UImm5:
7067 case Match_InvalidMemoryIndexed8UImm3:
7068 case Match_InvalidMemoryIndexed4UImm5:
7069 case Match_InvalidMemoryIndexed2UImm5:
7070 case Match_InvalidMemoryIndexed1UImm6:
7071 case Match_InvalidMemoryIndexed2UImm6:
7072 case Match_InvalidMemoryIndexed4UImm6:
7073 case Match_InvalidMemoryIndexed8UImm6:
7074 case Match_InvalidMemoryIndexed16UImm6:
7075 case Match_InvalidMemoryIndexedSImm6:
7076 case Match_InvalidMemoryIndexedSImm5:
7077 case Match_InvalidMemoryIndexedSImm8:
7078 case Match_InvalidMemoryIndexedSImm9:
7079 case Match_InvalidMemoryIndexed16SImm9:
7080 case Match_InvalidMemoryIndexed8SImm10:
7081 case Match_InvalidImm0_0:
7082 case Match_InvalidImm0_1:
7083 case Match_InvalidImm0_3:
7084 case Match_InvalidImm0_7:
7085 case Match_InvalidImm0_15:
7086 case Match_InvalidImm0_31:
7087 case Match_InvalidImm0_63:
7088 case Match_InvalidImm0_127:
7089 case Match_InvalidImm0_255:
7090 case Match_InvalidImm0_65535:
7091 case Match_InvalidImm1_8:
7092 case Match_InvalidImm1_16:
7093 case Match_InvalidImm1_32:
7094 case Match_InvalidImm1_64:
7095 case Match_InvalidImmM1_62:
7096 case Match_InvalidMemoryIndexedRange2UImm0:
7097 case Match_InvalidMemoryIndexedRange2UImm1:
7098 case Match_InvalidMemoryIndexedRange2UImm2:
7099 case Match_InvalidMemoryIndexedRange2UImm3:
7100 case Match_InvalidMemoryIndexedRange4UImm0:
7101 case Match_InvalidMemoryIndexedRange4UImm1:
7102 case Match_InvalidMemoryIndexedRange4UImm2:
7103 case Match_InvalidSVEAddSubImm8:
7104 case Match_InvalidSVEAddSubImm16:
7105 case Match_InvalidSVEAddSubImm32:
7106 case Match_InvalidSVEAddSubImm64:
7107 case Match_InvalidSVECpyImm8:
7108 case Match_InvalidSVECpyImm16:
7109 case Match_InvalidSVECpyImm32:
7110 case Match_InvalidSVECpyImm64:
7111 case Match_InvalidIndexRange0_0:
7112 case Match_InvalidIndexRange1_1:
7113 case Match_InvalidIndexRange0_15:
7114 case Match_InvalidIndexRange0_7:
7115 case Match_InvalidIndexRange0_3:
7116 case Match_InvalidIndexRange0_1:
7117 case Match_InvalidSVEIndexRange0_63:
7118 case Match_InvalidSVEIndexRange0_31:
7119 case Match_InvalidSVEIndexRange0_15:
7120 case Match_InvalidSVEIndexRange0_7:
7121 case Match_InvalidSVEIndexRange0_3:
7122 case Match_InvalidLabel:
7123 case Match_InvalidComplexRotationEven:
7124 case Match_InvalidComplexRotationOdd:
7125 case Match_InvalidGPR64shifted8:
7126 case Match_InvalidGPR64shifted16:
7127 case Match_InvalidGPR64shifted32:
7128 case Match_InvalidGPR64shifted64:
7129 case Match_InvalidGPR64shifted128:
7130 case Match_InvalidGPR64NoXZRshifted8:
7131 case Match_InvalidGPR64NoXZRshifted16:
7132 case Match_InvalidGPR64NoXZRshifted32:
7133 case Match_InvalidGPR64NoXZRshifted64:
7134 case Match_InvalidGPR64NoXZRshifted128:
7135 case Match_InvalidZPR32UXTW8:
7136 case Match_InvalidZPR32UXTW16:
7137 case Match_InvalidZPR32UXTW32:
7138 case Match_InvalidZPR32UXTW64:
7139 case Match_InvalidZPR32SXTW8:
7140 case Match_InvalidZPR32SXTW16:
7141 case Match_InvalidZPR32SXTW32:
7142 case Match_InvalidZPR32SXTW64:
7143 case Match_InvalidZPR64UXTW8:
7144 case Match_InvalidZPR64SXTW8:
7145 case Match_InvalidZPR64UXTW16:
7146 case Match_InvalidZPR64SXTW16:
7147 case Match_InvalidZPR64UXTW32:
7148 case Match_InvalidZPR64SXTW32:
7149 case Match_InvalidZPR64UXTW64:
7150 case Match_InvalidZPR64SXTW64:
7151 case Match_InvalidZPR32LSL8:
7152 case Match_InvalidZPR32LSL16:
7153 case Match_InvalidZPR32LSL32:
7154 case Match_InvalidZPR32LSL64:
7155 case Match_InvalidZPR64LSL8:
7156 case Match_InvalidZPR64LSL16:
7157 case Match_InvalidZPR64LSL32:
7158 case Match_InvalidZPR64LSL64:
7159 case Match_InvalidZPR0:
7160 case Match_InvalidZPR8:
7161 case Match_InvalidZPR16:
7162 case Match_InvalidZPR32:
7163 case Match_InvalidZPR64:
7164 case Match_InvalidZPR128:
7165 case Match_InvalidZPR_3b8:
7166 case Match_InvalidZPR_3b16:
7167 case Match_InvalidZPR_3b32:
7168 case Match_InvalidZPR_4b8:
7169 case Match_InvalidZPR_4b16:
7170 case Match_InvalidZPR_4b32:
7171 case Match_InvalidZPR_4b64:
7172 case Match_InvalidSVEPPRorPNRAnyReg:
7173 case Match_InvalidSVEPPRorPNRBReg:
7174 case Match_InvalidSVEPredicateAnyReg:
7175 case Match_InvalidSVEPattern:
7176 case Match_InvalidSVEVecLenSpecifier:
7177 case Match_InvalidSVEPredicateBReg:
7178 case Match_InvalidSVEPredicateHReg:
7179 case Match_InvalidSVEPredicateSReg:
7180 case Match_InvalidSVEPredicateDReg:
7181 case Match_InvalidSVEPredicate3bAnyReg:
7182 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7183 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7184 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7186 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7187 case Match_InvalidSVEPNPredicateBReg:
7188 case Match_InvalidSVEPNPredicateHReg:
7189 case Match_InvalidSVEPNPredicateSReg:
7190 case Match_InvalidSVEPNPredicateDReg:
7191 case Match_InvalidSVEPredicateListMul2x8:
7192 case Match_InvalidSVEPredicateListMul2x16:
7193 case Match_InvalidSVEPredicateListMul2x32:
7194 case Match_InvalidSVEPredicateListMul2x64:
7195 case Match_InvalidSVEExactFPImmOperandHalfOne:
7196 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7197 case Match_InvalidSVEExactFPImmOperandZeroOne:
7198 case Match_InvalidMatrixTile16:
7199 case Match_InvalidMatrixTile32:
7200 case Match_InvalidMatrixTile64:
7201 case Match_InvalidMatrix:
7202 case Match_InvalidMatrix8:
7203 case Match_InvalidMatrix16:
7204 case Match_InvalidMatrix32:
7205 case Match_InvalidMatrix64:
7206 case Match_InvalidMatrixTileVectorH8:
7207 case Match_InvalidMatrixTileVectorH16:
7208 case Match_InvalidMatrixTileVectorH32:
7209 case Match_InvalidMatrixTileVectorH64:
7210 case Match_InvalidMatrixTileVectorH128:
7211 case Match_InvalidMatrixTileVectorV8:
7212 case Match_InvalidMatrixTileVectorV16:
7213 case Match_InvalidMatrixTileVectorV32:
7214 case Match_InvalidMatrixTileVectorV64:
7215 case Match_InvalidMatrixTileVectorV128:
7216 case Match_InvalidSVCR:
7217 case Match_InvalidMatrixIndexGPR32_12_15:
7218 case Match_InvalidMatrixIndexGPR32_8_11:
7219 case Match_InvalidLookupTable:
7220 case Match_InvalidZPRMul2_Lo8:
7221 case Match_InvalidZPRMul2_Hi8:
7222 case Match_InvalidZPRMul2_Lo16:
7223 case Match_InvalidZPRMul2_Hi16:
7224 case Match_InvalidZPRMul2_Lo32:
7225 case Match_InvalidZPRMul2_Hi32:
7226 case Match_InvalidZPRMul2_Lo64:
7227 case Match_InvalidZPRMul2_Hi64:
7228 case Match_InvalidZPR_K0:
7229 case Match_InvalidSVEVectorList2x8Mul2:
7230 case Match_InvalidSVEVectorList2x16Mul2:
7231 case Match_InvalidSVEVectorList2x32Mul2:
7232 case Match_InvalidSVEVectorList2x64Mul2:
7233 case Match_InvalidSVEVectorList2x128Mul2:
7234 case Match_InvalidSVEVectorList4x8Mul4:
7235 case Match_InvalidSVEVectorList4x16Mul4:
7236 case Match_InvalidSVEVectorList4x32Mul4:
7237 case Match_InvalidSVEVectorList4x64Mul4:
7238 case Match_InvalidSVEVectorList4x128Mul4:
7239 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7240 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7241 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7242 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7243 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7244 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7245 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7246 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7247 case Match_InvalidSVEVectorListStrided2x8:
7248 case Match_InvalidSVEVectorListStrided2x16:
7249 case Match_InvalidSVEVectorListStrided2x32:
7250 case Match_InvalidSVEVectorListStrided2x64:
7251 case Match_InvalidSVEVectorListStrided4x8:
7252 case Match_InvalidSVEVectorListStrided4x16:
7253 case Match_InvalidSVEVectorListStrided4x32:
7254 case Match_InvalidSVEVectorListStrided4x64:
7255 case Match_MSR:
7256 case Match_MRS: {
7257 if (ErrorInfo >= Operands.size())
7258 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7259 // Any time we get here, there's nothing fancy to do. Just get the
7260 // operand SMLoc and display the diagnostic.
7261 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7262 if (ErrorLoc == SMLoc())
7263 ErrorLoc = IDLoc;
7264 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7265 }
7266 }
7267
7268 llvm_unreachable("Implement any new match types added!");
7269}
7270
7271/// ParseDirective parses the arm specific directives
7272bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7273 const MCContext::Environment Format = getContext().getObjectFileType();
7274 bool IsMachO = Format == MCContext::IsMachO;
7275 bool IsCOFF = Format == MCContext::IsCOFF;
7276 bool IsELF = Format == MCContext::IsELF;
7277
7278 auto IDVal = DirectiveID.getIdentifier().lower();
7279 SMLoc Loc = DirectiveID.getLoc();
7280 if (IDVal == ".arch")
7281 parseDirectiveArch(Loc);
7282 else if (IDVal == ".cpu")
7283 parseDirectiveCPU(Loc);
7284 else if (IDVal == ".tlsdesccall")
7285 parseDirectiveTLSDescCall(Loc);
7286 else if (IDVal == ".ltorg" || IDVal == ".pool")
7287 parseDirectiveLtorg(Loc);
7288 else if (IDVal == ".unreq")
7289 parseDirectiveUnreq(Loc);
7290 else if (IDVal == ".inst")
7291 parseDirectiveInst(Loc);
7292 else if (IDVal == ".cfi_negate_ra_state")
7293 parseDirectiveCFINegateRAState();
7294 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7295 parseDirectiveCFINegateRAStateWithPC();
7296 else if (IDVal == ".cfi_b_key_frame")
7297 parseDirectiveCFIBKeyFrame();
7298 else if (IDVal == ".cfi_mte_tagged_frame")
7299 parseDirectiveCFIMTETaggedFrame();
7300 else if (IDVal == ".arch_extension")
7301 parseDirectiveArchExtension(Loc);
7302 else if (IDVal == ".variant_pcs")
7303 parseDirectiveVariantPCS(Loc);
7304 else if (IsMachO) {
7305 if (IDVal == MCLOHDirectiveName())
7306 parseDirectiveLOH(IDVal, Loc);
7307 else
7308 return true;
7309 } else if (IsCOFF) {
7310 if (IDVal == ".seh_stackalloc")
7311 parseDirectiveSEHAllocStack(Loc);
7312 else if (IDVal == ".seh_endprologue")
7313 parseDirectiveSEHPrologEnd(Loc);
7314 else if (IDVal == ".seh_save_r19r20_x")
7315 parseDirectiveSEHSaveR19R20X(Loc);
7316 else if (IDVal == ".seh_save_fplr")
7317 parseDirectiveSEHSaveFPLR(Loc);
7318 else if (IDVal == ".seh_save_fplr_x")
7319 parseDirectiveSEHSaveFPLRX(Loc);
7320 else if (IDVal == ".seh_save_reg")
7321 parseDirectiveSEHSaveReg(Loc);
7322 else if (IDVal == ".seh_save_reg_x")
7323 parseDirectiveSEHSaveRegX(Loc);
7324 else if (IDVal == ".seh_save_regp")
7325 parseDirectiveSEHSaveRegP(Loc);
7326 else if (IDVal == ".seh_save_regp_x")
7327 parseDirectiveSEHSaveRegPX(Loc);
7328 else if (IDVal == ".seh_save_lrpair")
7329 parseDirectiveSEHSaveLRPair(Loc);
7330 else if (IDVal == ".seh_save_freg")
7331 parseDirectiveSEHSaveFReg(Loc);
7332 else if (IDVal == ".seh_save_freg_x")
7333 parseDirectiveSEHSaveFRegX(Loc);
7334 else if (IDVal == ".seh_save_fregp")
7335 parseDirectiveSEHSaveFRegP(Loc);
7336 else if (IDVal == ".seh_save_fregp_x")
7337 parseDirectiveSEHSaveFRegPX(Loc);
7338 else if (IDVal == ".seh_set_fp")
7339 parseDirectiveSEHSetFP(Loc);
7340 else if (IDVal == ".seh_add_fp")
7341 parseDirectiveSEHAddFP(Loc);
7342 else if (IDVal == ".seh_nop")
7343 parseDirectiveSEHNop(Loc);
7344 else if (IDVal == ".seh_save_next")
7345 parseDirectiveSEHSaveNext(Loc);
7346 else if (IDVal == ".seh_startepilogue")
7347 parseDirectiveSEHEpilogStart(Loc);
7348 else if (IDVal == ".seh_endepilogue")
7349 parseDirectiveSEHEpilogEnd(Loc);
7350 else if (IDVal == ".seh_trap_frame")
7351 parseDirectiveSEHTrapFrame(Loc);
7352 else if (IDVal == ".seh_pushframe")
7353 parseDirectiveSEHMachineFrame(Loc);
7354 else if (IDVal == ".seh_context")
7355 parseDirectiveSEHContext(Loc);
7356 else if (IDVal == ".seh_ec_context")
7357 parseDirectiveSEHECContext(Loc);
7358 else if (IDVal == ".seh_clear_unwound_to_call")
7359 parseDirectiveSEHClearUnwoundToCall(Loc);
7360 else if (IDVal == ".seh_pac_sign_lr")
7361 parseDirectiveSEHPACSignLR(Loc);
7362 else if (IDVal == ".seh_save_any_reg")
7363 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7364 else if (IDVal == ".seh_save_any_reg_p")
7365 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7366 else if (IDVal == ".seh_save_any_reg_x")
7367 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7368 else if (IDVal == ".seh_save_any_reg_px")
7369 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7370 else if (IDVal == ".seh_allocz")
7371 parseDirectiveSEHAllocZ(Loc);
7372 else if (IDVal == ".seh_save_zreg")
7373 parseDirectiveSEHSaveZReg(Loc);
7374 else if (IDVal == ".seh_save_preg")
7375 parseDirectiveSEHSavePReg(Loc);
7376 else
7377 return true;
7378 } else if (IsELF) {
7379 if (IDVal == ".aeabi_subsection")
7380 parseDirectiveAeabiSubSectionHeader(Loc);
7381 else if (IDVal == ".aeabi_attribute")
7382 parseDirectiveAeabiAArch64Attr(Loc);
7383 else
7384 return true;
7385 } else
7386 return true;
7387 return false;
7388}
7389
7390static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7391 SmallVector<StringRef, 4> &RequestedExtensions) {
7392 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7393 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7394
7395 if (!NoCrypto && Crypto) {
7396 // Map 'generic' (and others) to sha2 and aes, because
7397 // that was the traditional meaning of crypto.
7398 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7399 ArchInfo == AArch64::ARMV8_3A) {
7400 RequestedExtensions.push_back("sha2");
7401 RequestedExtensions.push_back("aes");
7402 }
7403 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7404 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7405 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7406 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7407 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7408 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7409 RequestedExtensions.push_back("sm4");
7410 RequestedExtensions.push_back("sha3");
7411 RequestedExtensions.push_back("sha2");
7412 RequestedExtensions.push_back("aes");
7413 }
7414 } else if (NoCrypto) {
7415 // Map 'generic' (and others) to sha2 and aes, because
7416 // that was the traditional meaning of crypto.
7417 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7418 ArchInfo == AArch64::ARMV8_3A) {
7419 RequestedExtensions.push_back("nosha2");
7420 RequestedExtensions.push_back("noaes");
7421 }
7422 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7423 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7424 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7425 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7426 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7427 ArchInfo == AArch64::ARMV9_4A) {
7428 RequestedExtensions.push_back("nosm4");
7429 RequestedExtensions.push_back("nosha3");
7430 RequestedExtensions.push_back("nosha2");
7431 RequestedExtensions.push_back("noaes");
7432 }
7433 }
7434}
7435
7437 return SMLoc::getFromPointer(L.getPointer() + Offset);
7438}
7439
7440/// parseDirectiveArch
7441/// ::= .arch token
7442bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7443 SMLoc CurLoc = getLoc();
7444
7445 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7446 StringRef Arch, ExtensionString;
7447 std::tie(Arch, ExtensionString) = Name.split('+');
7448
7449 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7450 if (!ArchInfo)
7451 return Error(CurLoc, "unknown arch name");
7452
7453 if (parseToken(AsmToken::EndOfStatement))
7454 return true;
7455
7456 // Get the architecture and extension features.
7457 std::vector<StringRef> AArch64Features;
7458 AArch64Features.push_back(ArchInfo->ArchFeature);
7459 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7460
7461 MCSubtargetInfo &STI = copySTI();
7462 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7463 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7464 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7465
7466 SmallVector<StringRef, 4> RequestedExtensions;
7467 if (!ExtensionString.empty())
7468 ExtensionString.split(RequestedExtensions, '+');
7469
7470 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7471 CurLoc = incrementLoc(CurLoc, Arch.size());
7472
7473 for (auto Name : RequestedExtensions) {
7474 // Advance source location past '+'.
7475 CurLoc = incrementLoc(CurLoc, 1);
7476
7477 bool EnableFeature = !Name.consume_front_insensitive("no");
7478
7479 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7480 return Extension.Name == Name;
7481 });
7482
7483 if (It == std::end(ExtensionMap))
7484 return Error(CurLoc, "unsupported architectural extension: " + Name);
7485
7486 if (EnableFeature)
7487 STI.SetFeatureBitsTransitively(It->Features);
7488 else
7489 STI.ClearFeatureBitsTransitively(It->Features);
7490 CurLoc = incrementLoc(CurLoc, Name.size());
7491 }
7492 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7493 setAvailableFeatures(Features);
7494
7495 getTargetStreamer().emitDirectiveArch(Name);
7496 return false;
7497}
7498
7499/// parseDirectiveArchExtension
7500/// ::= .arch_extension [no]feature
7501bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7502 SMLoc ExtLoc = getLoc();
7503
7504 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7505
7506 if (parseEOL())
7507 return true;
7508
7509 bool EnableFeature = true;
7510 StringRef Name = FullName;
7511 if (Name.starts_with_insensitive("no")) {
7512 EnableFeature = false;
7513 Name = Name.substr(2);
7514 }
7515
7516 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7517 return Extension.Name == Name;
7518 });
7519
7520 if (It == std::end(ExtensionMap))
7521 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7522
7523 MCSubtargetInfo &STI = copySTI();
7524 if (EnableFeature)
7525 STI.SetFeatureBitsTransitively(It->Features);
7526 else
7527 STI.ClearFeatureBitsTransitively(It->Features);
7528 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7529 setAvailableFeatures(Features);
7530
7531 getTargetStreamer().emitDirectiveArchExtension(FullName);
7532 return false;
7533}
7534
7535/// parseDirectiveCPU
7536/// ::= .cpu id
7537bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7538 SMLoc CurLoc = getLoc();
7539
7540 StringRef CPU, ExtensionString;
7541 std::tie(CPU, ExtensionString) =
7542 getParser().parseStringToEndOfStatement().trim().split('+');
7543
7544 if (parseToken(AsmToken::EndOfStatement))
7545 return true;
7546
7547 SmallVector<StringRef, 4> RequestedExtensions;
7548 if (!ExtensionString.empty())
7549 ExtensionString.split(RequestedExtensions, '+');
7550
7551 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7552 if (!CpuArch) {
7553 Error(CurLoc, "unknown CPU name");
7554 return false;
7555 }
7556 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7557
7558 MCSubtargetInfo &STI = copySTI();
7559 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7560 CurLoc = incrementLoc(CurLoc, CPU.size());
7561
7562 for (auto Name : RequestedExtensions) {
7563 // Advance source location past '+'.
7564 CurLoc = incrementLoc(CurLoc, 1);
7565
7566 bool EnableFeature = !Name.consume_front_insensitive("no");
7567
7568 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7569 return Extension.Name == Name;
7570 });
7571
7572 if (It == std::end(ExtensionMap))
7573 return Error(CurLoc, "unsupported architectural extension: " + Name);
7574
7575 if (EnableFeature)
7576 STI.SetFeatureBitsTransitively(It->Features);
7577 else
7578 STI.ClearFeatureBitsTransitively(It->Features);
7579 CurLoc = incrementLoc(CurLoc, Name.size());
7580 }
7581 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7582 setAvailableFeatures(Features);
7583 return false;
7584}
7585
7586/// parseDirectiveInst
7587/// ::= .inst opcode [, ...]
7588bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7589 if (getLexer().is(AsmToken::EndOfStatement))
7590 return Error(Loc, "expected expression following '.inst' directive");
7591
7592 auto parseOp = [&]() -> bool {
7593 SMLoc L = getLoc();
7594 const MCExpr *Expr = nullptr;
7595 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7596 return true;
7597 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7598 if (check(!Value, L, "expected constant expression"))
7599 return true;
7600 getTargetStreamer().emitInst(Value->getValue());
7601 return false;
7602 };
7603
7604 return parseMany(parseOp);
7605}
7606
7607// parseDirectiveTLSDescCall:
7608// ::= .tlsdesccall symbol
7609bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7610 StringRef Name;
7611 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7612 parseToken(AsmToken::EndOfStatement))
7613 return true;
7614
7615 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7616 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7618
7619 MCInst Inst;
7620 Inst.setOpcode(AArch64::TLSDESCCALL);
7622
7623 getParser().getStreamer().emitInstruction(Inst, getSTI());
7624 return false;
7625}
7626
7627/// ::= .loh <lohName | lohId> label1, ..., labelN
7628/// The number of arguments depends on the loh identifier.
7629bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7631 if (getTok().isNot(AsmToken::Identifier)) {
7632 if (getTok().isNot(AsmToken::Integer))
7633 return TokError("expected an identifier or a number in directive");
7634 // We successfully get a numeric value for the identifier.
7635 // Check if it is valid.
7636 int64_t Id = getTok().getIntVal();
7637 if (Id <= -1U && !isValidMCLOHType(Id))
7638 return TokError("invalid numeric identifier in directive");
7639 Kind = (MCLOHType)Id;
7640 } else {
7641 StringRef Name = getTok().getIdentifier();
7642 // We successfully parse an identifier.
7643 // Check if it is a recognized one.
7644 int Id = MCLOHNameToId(Name);
7645
7646 if (Id == -1)
7647 return TokError("invalid identifier in directive");
7648 Kind = (MCLOHType)Id;
7649 }
7650 // Consume the identifier.
7651 Lex();
7652 // Get the number of arguments of this LOH.
7653 int NbArgs = MCLOHIdToNbArgs(Kind);
7654
7655 assert(NbArgs != -1 && "Invalid number of arguments");
7656
7658 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7659 StringRef Name;
7660 if (getParser().parseIdentifier(Name))
7661 return TokError("expected identifier in directive");
7662 Args.push_back(getContext().getOrCreateSymbol(Name));
7663
7664 if (Idx + 1 == NbArgs)
7665 break;
7666 if (parseComma())
7667 return true;
7668 }
7669 if (parseEOL())
7670 return true;
7671
7672 getStreamer().emitLOHDirective(Kind, Args);
7673 return false;
7674}
7675
7676/// parseDirectiveLtorg
7677/// ::= .ltorg | .pool
7678bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7679 if (parseEOL())
7680 return true;
7681 getTargetStreamer().emitCurrentConstantPool();
7682 return false;
7683}
7684
7685/// parseDirectiveReq
7686/// ::= name .req registername
7687bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7688 Lex(); // Eat the '.req' token.
7689 SMLoc SRegLoc = getLoc();
7690 RegKind RegisterKind = RegKind::Scalar;
7691 MCRegister RegNum;
7692 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7693
7694 if (!ParseRes.isSuccess()) {
7695 StringRef Kind;
7696 RegisterKind = RegKind::NeonVector;
7697 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7698
7699 if (ParseRes.isFailure())
7700 return true;
7701
7702 if (ParseRes.isSuccess() && !Kind.empty())
7703 return Error(SRegLoc, "vector register without type specifier expected");
7704 }
7705
7706 if (!ParseRes.isSuccess()) {
7707 StringRef Kind;
7708 RegisterKind = RegKind::SVEDataVector;
7709 ParseRes =
7710 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7711
7712 if (ParseRes.isFailure())
7713 return true;
7714
7715 if (ParseRes.isSuccess() && !Kind.empty())
7716 return Error(SRegLoc,
7717 "sve vector register without type specifier expected");
7718 }
7719
7720 if (!ParseRes.isSuccess()) {
7721 StringRef Kind;
7722 RegisterKind = RegKind::SVEPredicateVector;
7723 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7724
7725 if (ParseRes.isFailure())
7726 return true;
7727
7728 if (ParseRes.isSuccess() && !Kind.empty())
7729 return Error(SRegLoc,
7730 "sve predicate register without type specifier expected");
7731 }
7732
7733 if (!ParseRes.isSuccess())
7734 return Error(SRegLoc, "register name or alias expected");
7735
7736 // Shouldn't be anything else.
7737 if (parseEOL())
7738 return true;
7739
7740 auto pair = std::make_pair(RegisterKind, RegNum);
7741 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7742 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7743
7744 return false;
7745}
7746
7747/// parseDirectiveUneq
7748/// ::= .unreq registername
7749bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7750 if (getTok().isNot(AsmToken::Identifier))
7751 return TokError("unexpected input in .unreq directive.");
7752 RegisterReqs.erase(getTok().getIdentifier().lower());
7753 Lex(); // Eat the identifier.
7754 return parseToken(AsmToken::EndOfStatement);
7755}
7756
7757bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7758 if (parseEOL())
7759 return true;
7760 getStreamer().emitCFINegateRAState();
7761 return false;
7762}
7763
7764bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7765 if (parseEOL())
7766 return true;
7767 getStreamer().emitCFINegateRAStateWithPC();
7768 return false;
7769}
7770
7771/// parseDirectiveCFIBKeyFrame
7772/// ::= .cfi_b_key
7773bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7774 if (parseEOL())
7775 return true;
7776 getStreamer().emitCFIBKeyFrame();
7777 return false;
7778}
7779
7780/// parseDirectiveCFIMTETaggedFrame
7781/// ::= .cfi_mte_tagged_frame
7782bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7783 if (parseEOL())
7784 return true;
7785 getStreamer().emitCFIMTETaggedFrame();
7786 return false;
7787}
7788
7789/// parseDirectiveVariantPCS
7790/// ::= .variant_pcs symbolname
7791bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7792 StringRef Name;
7793 if (getParser().parseIdentifier(Name))
7794 return TokError("expected symbol name");
7795 if (parseEOL())
7796 return true;
7797 getTargetStreamer().emitDirectiveVariantPCS(
7798 getContext().getOrCreateSymbol(Name));
7799 return false;
7800}
7801
7802/// parseDirectiveSEHAllocStack
7803/// ::= .seh_stackalloc
7804bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7805 int64_t Size;
7806 if (parseImmExpr(Size))
7807 return true;
7808 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7809 return false;
7810}
7811
7812/// parseDirectiveSEHPrologEnd
7813/// ::= .seh_endprologue
7814bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7815 getTargetStreamer().emitARM64WinCFIPrologEnd();
7816 return false;
7817}
7818
7819/// parseDirectiveSEHSaveR19R20X
7820/// ::= .seh_save_r19r20_x
7821bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7822 int64_t Offset;
7823 if (parseImmExpr(Offset))
7824 return true;
7825 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7826 return false;
7827}
7828
7829/// parseDirectiveSEHSaveFPLR
7830/// ::= .seh_save_fplr
7831bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7832 int64_t Offset;
7833 if (parseImmExpr(Offset))
7834 return true;
7835 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7836 return false;
7837}
7838
7839/// parseDirectiveSEHSaveFPLRX
7840/// ::= .seh_save_fplr_x
7841bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7842 int64_t Offset;
7843 if (parseImmExpr(Offset))
7844 return true;
7845 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7846 return false;
7847}
7848
7849/// parseDirectiveSEHSaveReg
7850/// ::= .seh_save_reg
7851bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7852 unsigned Reg;
7853 int64_t Offset;
7854 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7855 parseComma() || parseImmExpr(Offset))
7856 return true;
7857 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7858 return false;
7859}
7860
7861/// parseDirectiveSEHSaveRegX
7862/// ::= .seh_save_reg_x
7863bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7864 unsigned Reg;
7865 int64_t Offset;
7866 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7867 parseComma() || parseImmExpr(Offset))
7868 return true;
7869 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7870 return false;
7871}
7872
7873/// parseDirectiveSEHSaveRegP
7874/// ::= .seh_save_regp
7875bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7876 unsigned Reg;
7877 int64_t Offset;
7878 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7879 parseComma() || parseImmExpr(Offset))
7880 return true;
7881 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7882 return false;
7883}
7884
7885/// parseDirectiveSEHSaveRegPX
7886/// ::= .seh_save_regp_x
7887bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7888 unsigned Reg;
7889 int64_t Offset;
7890 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7891 parseComma() || parseImmExpr(Offset))
7892 return true;
7893 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7894 return false;
7895}
7896
7897/// parseDirectiveSEHSaveLRPair
7898/// ::= .seh_save_lrpair
7899bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7900 unsigned Reg;
7901 int64_t Offset;
7902 L = getLoc();
7903 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7904 parseComma() || parseImmExpr(Offset))
7905 return true;
7906 if (check(((Reg - 19) % 2 != 0), L,
7907 "expected register with even offset from x19"))
7908 return true;
7909 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7910 return false;
7911}
7912
7913/// parseDirectiveSEHSaveFReg
7914/// ::= .seh_save_freg
7915bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7916 unsigned Reg;
7917 int64_t Offset;
7918 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7919 parseComma() || parseImmExpr(Offset))
7920 return true;
7921 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7922 return false;
7923}
7924
7925/// parseDirectiveSEHSaveFRegX
7926/// ::= .seh_save_freg_x
7927bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7928 unsigned Reg;
7929 int64_t Offset;
7930 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7931 parseComma() || parseImmExpr(Offset))
7932 return true;
7933 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7934 return false;
7935}
7936
7937/// parseDirectiveSEHSaveFRegP
7938/// ::= .seh_save_fregp
7939bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7940 unsigned Reg;
7941 int64_t Offset;
7942 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7943 parseComma() || parseImmExpr(Offset))
7944 return true;
7945 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7946 return false;
7947}
7948
7949/// parseDirectiveSEHSaveFRegPX
7950/// ::= .seh_save_fregp_x
7951bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7952 unsigned Reg;
7953 int64_t Offset;
7954 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7955 parseComma() || parseImmExpr(Offset))
7956 return true;
7957 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7958 return false;
7959}
7960
7961/// parseDirectiveSEHSetFP
7962/// ::= .seh_set_fp
7963bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7964 getTargetStreamer().emitARM64WinCFISetFP();
7965 return false;
7966}
7967
7968/// parseDirectiveSEHAddFP
7969/// ::= .seh_add_fp
7970bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7971 int64_t Size;
7972 if (parseImmExpr(Size))
7973 return true;
7974 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7975 return false;
7976}
7977
7978/// parseDirectiveSEHNop
7979/// ::= .seh_nop
7980bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7981 getTargetStreamer().emitARM64WinCFINop();
7982 return false;
7983}
7984
7985/// parseDirectiveSEHSaveNext
7986/// ::= .seh_save_next
7987bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7988 getTargetStreamer().emitARM64WinCFISaveNext();
7989 return false;
7990}
7991
7992/// parseDirectiveSEHEpilogStart
7993/// ::= .seh_startepilogue
7994bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7995 getTargetStreamer().emitARM64WinCFIEpilogStart();
7996 return false;
7997}
7998
7999/// parseDirectiveSEHEpilogEnd
8000/// ::= .seh_endepilogue
8001bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8002 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8003 return false;
8004}
8005
8006/// parseDirectiveSEHTrapFrame
8007/// ::= .seh_trap_frame
8008bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8009 getTargetStreamer().emitARM64WinCFITrapFrame();
8010 return false;
8011}
8012
8013/// parseDirectiveSEHMachineFrame
8014/// ::= .seh_pushframe
8015bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8016 getTargetStreamer().emitARM64WinCFIMachineFrame();
8017 return false;
8018}
8019
8020/// parseDirectiveSEHContext
8021/// ::= .seh_context
8022bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8023 getTargetStreamer().emitARM64WinCFIContext();
8024 return false;
8025}
8026
8027/// parseDirectiveSEHECContext
8028/// ::= .seh_ec_context
8029bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8030 getTargetStreamer().emitARM64WinCFIECContext();
8031 return false;
8032}
8033
8034/// parseDirectiveSEHClearUnwoundToCall
8035/// ::= .seh_clear_unwound_to_call
8036bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8037 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8038 return false;
8039}
8040
8041/// parseDirectiveSEHPACSignLR
8042/// ::= .seh_pac_sign_lr
8043bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8044 getTargetStreamer().emitARM64WinCFIPACSignLR();
8045 return false;
8046}
8047
8048/// parseDirectiveSEHSaveAnyReg
8049/// ::= .seh_save_any_reg
8050/// ::= .seh_save_any_reg_p
8051/// ::= .seh_save_any_reg_x
8052/// ::= .seh_save_any_reg_px
8053bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8054 bool Writeback) {
8055 MCRegister Reg;
8056 SMLoc Start, End;
8057 int64_t Offset;
8058 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
8059 parseComma() || parseImmExpr(Offset))
8060 return true;
8061
8062 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8063 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8064 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8065 return Error(L, "invalid save_any_reg offset");
8066 unsigned EncodedReg;
8067 if (Reg == AArch64::FP)
8068 EncodedReg = 29;
8069 else if (Reg == AArch64::LR)
8070 EncodedReg = 30;
8071 else
8072 EncodedReg = Reg - AArch64::X0;
8073 if (Paired) {
8074 if (Reg == AArch64::LR)
8075 return Error(Start, "lr cannot be paired with another register");
8076 if (Writeback)
8077 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
8078 else
8079 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
8080 } else {
8081 if (Writeback)
8082 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
8083 else
8084 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
8085 }
8086 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8087 unsigned EncodedReg = Reg - AArch64::D0;
8088 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8089 return Error(L, "invalid save_any_reg offset");
8090 if (Paired) {
8091 if (Reg == AArch64::D31)
8092 return Error(Start, "d31 cannot be paired with another register");
8093 if (Writeback)
8094 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8095 else
8096 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8097 } else {
8098 if (Writeback)
8099 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8100 else
8101 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8102 }
8103 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8104 unsigned EncodedReg = Reg - AArch64::Q0;
8105 if (Offset < 0 || Offset % 16)
8106 return Error(L, "invalid save_any_reg offset");
8107 if (Paired) {
8108 if (Reg == AArch64::Q31)
8109 return Error(Start, "q31 cannot be paired with another register");
8110 if (Writeback)
8111 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8112 else
8113 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8114 } else {
8115 if (Writeback)
8116 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8117 else
8118 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8119 }
8120 } else {
8121 return Error(Start, "save_any_reg register must be x, q or d register");
8122 }
8123 return false;
8124}
8125
8126/// parseDirectiveAllocZ
8127/// ::= .seh_allocz
8128bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8129 int64_t Offset;
8130 if (parseImmExpr(Offset))
8131 return true;
8132 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8133 return false;
8134}
8135
8136/// parseDirectiveSEHSaveZReg
8137/// ::= .seh_save_zreg
8138bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8139 MCRegister RegNum;
8140 StringRef Kind;
8141 int64_t Offset;
8142 ParseStatus Res =
8143 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8144 if (!Res.isSuccess())
8145 return true;
8146 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8147 "expected register in range z8 to z23"))
8148 return true;
8149 if (parseComma() || parseImmExpr(Offset))
8150 return true;
8151 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8152 return false;
8153}
8154
8155/// parseDirectiveSEHSavePReg
8156/// ::= .seh_save_preg
8157bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8158 MCRegister RegNum;
8159 StringRef Kind;
8160 int64_t Offset;
8161 ParseStatus Res =
8162 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8163 if (!Res.isSuccess())
8164 return true;
8165 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8166 "expected register in range p4 to p15"))
8167 return true;
8168 if (parseComma() || parseImmExpr(Offset))
8169 return true;
8170 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8171 return false;
8172}
8173
8174bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8175 // Handle parsing of .aeabi_subsection directives
8176 // - On first declaration of a subsection, expect exactly three identifiers
8177 // after `.aeabi_subsection`: the subsection name and two parameters.
8178 // - When switching to an existing subsection, it is valid to provide only
8179 // the subsection name, or the name together with the two parameters.
8180 MCAsmParser &Parser = getParser();
8181
8182 // Consume the name (subsection name)
8183 StringRef SubsectionName;
8184 AArch64BuildAttributes::VendorID SubsectionNameID;
8185 if (Parser.getTok().is(AsmToken::Identifier)) {
8186 SubsectionName = Parser.getTok().getIdentifier();
8187 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8188 } else {
8189 Error(Parser.getTok().getLoc(), "subsection name not found");
8190 return true;
8191 }
8192 Parser.Lex();
8193
8194 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8195 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8196 // Check whether only the subsection name was provided.
8197 // If so, the user is trying to switch to a subsection that should have been
8198 // declared before.
8200 if (SubsectionExists) {
8201 getTargetStreamer().emitAttributesSubsection(
8202 SubsectionName,
8204 SubsectionExists->IsOptional),
8206 SubsectionExists->ParameterType));
8207 return false;
8208 }
8209 // If subsection does not exists, report error.
8210 else {
8211 Error(Parser.getTok().getLoc(),
8212 "Could not switch to subsection '" + SubsectionName +
8213 "' using subsection name, subsection has not been defined");
8214 return true;
8215 }
8216 }
8217
8218 // Otherwise, expecting 2 more parameters: consume a comma
8219 // parseComma() return *false* on success, and call Lex(), no need to call
8220 // Lex() again.
8221 if (Parser.parseComma()) {
8222 return true;
8223 }
8224
8225 // Consume the first parameter (optionality parameter)
8227 // options: optional/required
8228 if (Parser.getTok().is(AsmToken::Identifier)) {
8229 StringRef Optionality = Parser.getTok().getIdentifier();
8230 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8232 Error(Parser.getTok().getLoc(),
8234 return true;
8235 }
8236 if (SubsectionExists) {
8237 if (IsOptional != SubsectionExists->IsOptional) {
8238 Error(Parser.getTok().getLoc(),
8239 "optionality mismatch! subsection '" + SubsectionName +
8240 "' already exists with optionality defined as '" +
8242 SubsectionExists->IsOptional) +
8243 "' and not '" +
8244 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8245 return true;
8246 }
8247 }
8248 } else {
8249 Error(Parser.getTok().getLoc(),
8250 "optionality parameter not found, expected required|optional");
8251 return true;
8252 }
8253 // Check for possible IsOptional unaccepted values for known subsections
8254 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8255 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8256 Error(Parser.getTok().getLoc(),
8257 "aeabi_feature_and_bits must be marked as optional");
8258 return true;
8259 }
8260 }
8261 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8262 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8263 Error(Parser.getTok().getLoc(),
8264 "aeabi_pauthabi must be marked as required");
8265 return true;
8266 }
8267 }
8268 Parser.Lex();
8269 // consume a comma
8270 if (Parser.parseComma()) {
8271 return true;
8272 }
8273
8274 // Consume the second parameter (type parameter)
8276 if (Parser.getTok().is(AsmToken::Identifier)) {
8277 StringRef Name = Parser.getTok().getIdentifier();
8280 Error(Parser.getTok().getLoc(),
8282 return true;
8283 }
8284 if (SubsectionExists) {
8285 if (Type != SubsectionExists->ParameterType) {
8286 Error(Parser.getTok().getLoc(),
8287 "type mismatch! subsection '" + SubsectionName +
8288 "' already exists with type defined as '" +
8290 SubsectionExists->ParameterType) +
8291 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8292 "'");
8293 return true;
8294 }
8295 }
8296 } else {
8297 Error(Parser.getTok().getLoc(),
8298 "type parameter not found, expected uleb128|ntbs");
8299 return true;
8300 }
8301 // Check for possible unaccepted 'type' values for known subsections
8302 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8303 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8305 Error(Parser.getTok().getLoc(),
8306 SubsectionName + " must be marked as ULEB128");
8307 return true;
8308 }
8309 }
8310 Parser.Lex();
8311
8312 // Parsing finished, check for trailing tokens.
8314 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8315 "attributes subsection header directive");
8316 return true;
8317 }
8318
8319 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8320
8321 return false;
8322}
8323
8324bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8325 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8326 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8327 // separated by a comma.
8328 MCAsmParser &Parser = getParser();
8329
8330 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8331 getTargetStreamer().getActiveAttributesSubsection();
8332 if (nullptr == ActiveSubsection) {
8333 Error(Parser.getTok().getLoc(),
8334 "no active subsection, build attribute can not be added");
8335 return true;
8336 }
8337 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8338 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8339
8340 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8342 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8343 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8346 ActiveSubsectionName)
8348
8349 StringRef TagStr = "";
8350 unsigned Tag;
8351 if (Parser.getTok().is(AsmToken::Integer)) {
8352 Tag = getTok().getIntVal();
8353 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8354 TagStr = Parser.getTok().getIdentifier();
8355 switch (ActiveSubsectionID) {
8357 // Tag was provided as an unrecognized string instead of an unsigned
8358 // integer
8359 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8360 "' \nExcept for public subsections, "
8361 "tags have to be an unsigned int.");
8362 return true;
8363 break;
8367 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8368 TagStr + "' for subsection '" +
8369 ActiveSubsectionName + "'");
8370 return true;
8371 }
8372 break;
8376 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8377 TagStr + "' for subsection '" +
8378 ActiveSubsectionName + "'");
8379 return true;
8380 }
8381 break;
8382 }
8383 } else {
8384 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8385 return true;
8386 }
8387 Parser.Lex();
8388 // consume a comma
8389 // parseComma() return *false* on success, and call Lex(), no need to call
8390 // Lex() again.
8391 if (Parser.parseComma()) {
8392 return true;
8393 }
8394
8395 // Consume the second parameter (attribute value)
8396 unsigned ValueInt = unsigned(-1);
8397 std::string ValueStr = "";
8398 if (Parser.getTok().is(AsmToken::Integer)) {
8399 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8400 Error(
8401 Parser.getTok().getLoc(),
8402 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8403 return true;
8404 }
8405 ValueInt = getTok().getIntVal();
8406 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8407 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8408 Error(
8409 Parser.getTok().getLoc(),
8410 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8411 return true;
8412 }
8413 ValueStr = Parser.getTok().getIdentifier();
8414 } else if (Parser.getTok().is(AsmToken::String)) {
8415 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8416 Error(
8417 Parser.getTok().getLoc(),
8418 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8419 return true;
8420 }
8421 ValueStr = Parser.getTok().getString();
8422 } else {
8423 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8424 return true;
8425 }
8426 // Check for possible unaccepted values for known tags
8427 // (AEABI_FEATURE_AND_BITS)
8428 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8429 if (0 != ValueInt && 1 != ValueInt) {
8430 Error(Parser.getTok().getLoc(),
8431 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8432 "' options are 0|1");
8433 return true;
8434 }
8435 }
8436 Parser.Lex();
8437
8438 // Parsing finished. Check for trailing tokens.
8440 Error(Parser.getTok().getLoc(),
8441 "unexpected token for AArch64 build attributes tag and value "
8442 "attribute directive");
8443 return true;
8444 }
8445
8446 if (unsigned(-1) != ValueInt) {
8447 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8448 }
8449 if ("" != ValueStr) {
8450 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8451 ValueStr);
8452 }
8453 return false;
8454}
8455
8456bool AArch64AsmParser::parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E) {
8457 SMLoc Loc = getLoc();
8458 if (getLexer().getKind() != AsmToken::Identifier)
8459 return TokError("expected '%' relocation specifier");
8460 StringRef Identifier = getParser().getTok().getIdentifier();
8461 auto Spec = AArch64::parsePercentSpecifierName(Identifier);
8462 if (!Spec)
8463 return TokError("invalid relocation specifier");
8464
8465 getParser().Lex(); // Eat the identifier
8466 if (parseToken(AsmToken::LParen, "expected '('"))
8467 return true;
8468
8469 const MCExpr *SubExpr;
8470 if (getParser().parseParenExpression(SubExpr, E))
8471 return true;
8472
8473 Res = MCSpecifierExpr::create(SubExpr, Spec, getContext(), Loc);
8474 return false;
8475}
8476
8477bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8478 SMLoc EndLoc;
8479 if (parseOptionalToken(AsmToken::Percent))
8480 return parseExprWithSpecifier(Res, EndLoc);
8481
8482 if (getParser().parseExpression(Res))
8483 return true;
8484 MCAsmParser &Parser = getParser();
8485 if (!parseOptionalToken(AsmToken::At))
8486 return false;
8487 if (getLexer().getKind() != AsmToken::Identifier)
8488 return Error(getLoc(), "expected relocation specifier");
8489
8490 std::string Identifier = Parser.getTok().getIdentifier().lower();
8491 SMLoc Loc = getLoc();
8492 Lex();
8493 if (Identifier == "auth")
8494 return parseAuthExpr(Res, EndLoc);
8495
8496 auto Spec = AArch64::S_None;
8497 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8498 if (Identifier == "got")
8499 Spec = AArch64::S_MACHO_GOT;
8500 }
8501 if (Spec == AArch64::S_None)
8502 return Error(Loc, "invalid relocation specifier");
8503 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8504 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8505 SRE->getLoc());
8506 else
8507 return Error(Loc, "@ specifier only allowed after a symbol");
8508
8509 for (;;) {
8510 std::optional<MCBinaryExpr::Opcode> Opcode;
8511 if (parseOptionalToken(AsmToken::Plus))
8512 Opcode = MCBinaryExpr::Add;
8513 else if (parseOptionalToken(AsmToken::Minus))
8514 Opcode = MCBinaryExpr::Sub;
8515 else
8516 break;
8517 const MCExpr *Term;
8518 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8519 return true;
8520 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8521 }
8522 return false;
8523}
8524
8525/// parseAuthExpr
8526/// ::= _sym@AUTH(ib,123[,addr])
8527/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8528/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8529bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8530 MCAsmParser &Parser = getParser();
8531 MCContext &Ctx = getContext();
8532 AsmToken Tok = Parser.getTok();
8533
8534 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8535 if (parseToken(AsmToken::LParen, "expected '('"))
8536 return true;
8537
8538 if (Parser.getTok().isNot(AsmToken::Identifier))
8539 return TokError("expected key name");
8540
8541 StringRef KeyStr = Parser.getTok().getIdentifier();
8542 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8543 if (!KeyIDOrNone)
8544 return TokError("invalid key '" + KeyStr + "'");
8545 Parser.Lex();
8546
8547 if (parseToken(AsmToken::Comma, "expected ','"))
8548 return true;
8549
8550 if (Parser.getTok().isNot(AsmToken::Integer))
8551 return TokError("expected integer discriminator");
8552 int64_t Discriminator = Parser.getTok().getIntVal();
8553
8554 if (!isUInt<16>(Discriminator))
8555 return TokError("integer discriminator " + Twine(Discriminator) +
8556 " out of range [0, 0xFFFF]");
8557 Parser.Lex();
8558
8559 bool UseAddressDiversity = false;
8560 if (Parser.getTok().is(AsmToken::Comma)) {
8561 Parser.Lex();
8562 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8563 Parser.getTok().getIdentifier() != "addr")
8564 return TokError("expected 'addr'");
8565 UseAddressDiversity = true;
8566 Parser.Lex();
8567 }
8568
8569 EndLoc = Parser.getTok().getEndLoc();
8570 if (parseToken(AsmToken::RParen, "expected ')'"))
8571 return true;
8572
8573 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8574 UseAddressDiversity, Ctx, Res->getLoc());
8575 return false;
8576}
8577
8578bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8579 AArch64::Specifier &ELFSpec,
8580 AArch64::Specifier &DarwinSpec,
8581 int64_t &Addend) {
8582 ELFSpec = AArch64::S_INVALID;
8583 DarwinSpec = AArch64::S_None;
8584 Addend = 0;
8585
8586 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8587 ELFSpec = AE->getSpecifier();
8588 Expr = AE->getSubExpr();
8589 }
8590
8591 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8592 if (SE) {
8593 // It's a simple symbol reference with no addend.
8594 DarwinSpec = AArch64::Specifier(SE->getKind());
8595 return true;
8596 }
8597
8598 // Check that it looks like a symbol + an addend
8599 MCValue Res;
8600 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8601 if (!Relocatable || Res.getSubSym())
8602 return false;
8603
8604 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8605 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8606 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8607 return false;
8608
8609 if (Res.getAddSym())
8610 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8611 Addend = Res.getConstant();
8612
8613 // It's some symbol reference + a constant addend, but really
8614 // shouldn't use both Darwin and ELF syntax.
8615 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8616}
8617
8618/// Force static initialization.
8619extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8627
8628#define GET_REGISTER_MATCHER
8629#define GET_SUBTARGET_FEATURE_NAME
8630#define GET_MATCHER_IMPLEMENTATION
8631#define GET_MNEMONIC_SPELL_CHECKER
8632#include "AArch64GenAsmMatcher.inc"
8633
8634// Define this matcher function after the auto-generated include so we
8635// have the match class enum definitions.
8636unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8637 unsigned Kind) {
8638 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8639
8640 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8641 if (!Op.isImm())
8642 return Match_InvalidOperand;
8643 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8644 if (!CE)
8645 return Match_InvalidOperand;
8646 if (CE->getValue() == ExpectedVal)
8647 return Match_Success;
8648 return Match_InvalidOperand;
8649 };
8650
8651 switch (Kind) {
8652 default:
8653 return Match_InvalidOperand;
8654 case MCK_MPR:
8655 // If the Kind is a token for the MPR register class which has the "za"
8656 // register (SME accumulator array), check if the asm is a literal "za"
8657 // token. This is for the "smstart za" alias that defines the register
8658 // as a literal token.
8659 if (Op.isTokenEqual("za"))
8660 return Match_Success;
8661 return Match_InvalidOperand;
8662
8663 // If the kind is a token for a literal immediate, check if our asm operand
8664 // matches. This is for InstAliases which have a fixed-value immediate in
8665 // the asm string, such as hints which are parsed into a specific
8666 // instruction definition.
8667#define MATCH_HASH(N) \
8668 case MCK__HASH_##N: \
8669 return MatchesOpImmediate(N);
8670 MATCH_HASH(0)
8671 MATCH_HASH(1)
8672 MATCH_HASH(2)
8673 MATCH_HASH(3)
8674 MATCH_HASH(4)
8675 MATCH_HASH(6)
8676 MATCH_HASH(7)
8677 MATCH_HASH(8)
8678 MATCH_HASH(10)
8679 MATCH_HASH(12)
8680 MATCH_HASH(14)
8681 MATCH_HASH(16)
8682 MATCH_HASH(24)
8683 MATCH_HASH(25)
8684 MATCH_HASH(26)
8685 MATCH_HASH(27)
8686 MATCH_HASH(28)
8687 MATCH_HASH(29)
8688 MATCH_HASH(30)
8689 MATCH_HASH(31)
8690 MATCH_HASH(32)
8691 MATCH_HASH(40)
8692 MATCH_HASH(48)
8693 MATCH_HASH(64)
8694#undef MATCH_HASH
8695#define MATCH_HASH_MINUS(N) \
8696 case MCK__HASH__MINUS_##N: \
8697 return MatchesOpImmediate(-N);
8701#undef MATCH_HASH_MINUS
8702 }
8703}
8704
8705ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8706
8707 SMLoc S = getLoc();
8708
8709 if (getTok().isNot(AsmToken::Identifier))
8710 return Error(S, "expected register");
8711
8712 MCRegister FirstReg;
8713 ParseStatus Res = tryParseScalarRegister(FirstReg);
8714 if (!Res.isSuccess())
8715 return Error(S, "expected first even register of a consecutive same-size "
8716 "even/odd register pair");
8717
8718 const MCRegisterClass &WRegClass =
8719 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8720 const MCRegisterClass &XRegClass =
8721 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8722
8723 bool isXReg = XRegClass.contains(FirstReg),
8724 isWReg = WRegClass.contains(FirstReg);
8725 if (!isXReg && !isWReg)
8726 return Error(S, "expected first even register of a consecutive same-size "
8727 "even/odd register pair");
8728
8729 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8730 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8731
8732 if (FirstEncoding & 0x1)
8733 return Error(S, "expected first even register of a consecutive same-size "
8734 "even/odd register pair");
8735
8736 if (getTok().isNot(AsmToken::Comma))
8737 return Error(getLoc(), "expected comma");
8738 // Eat the comma
8739 Lex();
8740
8741 SMLoc E = getLoc();
8742 MCRegister SecondReg;
8743 Res = tryParseScalarRegister(SecondReg);
8744 if (!Res.isSuccess())
8745 return Error(E, "expected second odd register of a consecutive same-size "
8746 "even/odd register pair");
8747
8748 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8749 (isXReg && !XRegClass.contains(SecondReg)) ||
8750 (isWReg && !WRegClass.contains(SecondReg)))
8751 return Error(E, "expected second odd register of a consecutive same-size "
8752 "even/odd register pair");
8753
8754 MCRegister Pair;
8755 if (isXReg) {
8756 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8757 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8758 } else {
8759 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8760 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8761 }
8762
8763 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8764 getLoc(), getContext()));
8765
8766 return ParseStatus::Success;
8767}
8768
8769template <bool ParseShiftExtend, bool ParseSuffix>
8770ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8771 const SMLoc S = getLoc();
8772 // Check for a SVE vector register specifier first.
8773 MCRegister RegNum;
8774 StringRef Kind;
8775
8776 ParseStatus Res =
8777 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8778
8779 if (!Res.isSuccess())
8780 return Res;
8781
8782 if (ParseSuffix && Kind.empty())
8783 return ParseStatus::NoMatch;
8784
8785 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8786 if (!KindRes)
8787 return ParseStatus::NoMatch;
8788
8789 unsigned ElementWidth = KindRes->second;
8790
8791 // No shift/extend is the default.
8792 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8793 Operands.push_back(AArch64Operand::CreateVectorReg(
8794 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8795
8796 ParseStatus Res = tryParseVectorIndex(Operands);
8797 if (Res.isFailure())
8798 return ParseStatus::Failure;
8799 return ParseStatus::Success;
8800 }
8801
8802 // Eat the comma
8803 Lex();
8804
8805 // Match the shift
8807 Res = tryParseOptionalShiftExtend(ExtOpnd);
8808 if (!Res.isSuccess())
8809 return Res;
8810
8811 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8812 Operands.push_back(AArch64Operand::CreateVectorReg(
8813 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8814 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8815 Ext->hasShiftExtendAmount()));
8816
8817 return ParseStatus::Success;
8818}
8819
8820ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8821 MCAsmParser &Parser = getParser();
8822
8823 SMLoc SS = getLoc();
8824 const AsmToken &TokE = getTok();
8825 bool IsHash = TokE.is(AsmToken::Hash);
8826
8827 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8828 return ParseStatus::NoMatch;
8829
8830 int64_t Pattern;
8831 if (IsHash) {
8832 Lex(); // Eat hash
8833
8834 // Parse the immediate operand.
8835 const MCExpr *ImmVal;
8836 SS = getLoc();
8837 if (Parser.parseExpression(ImmVal))
8838 return ParseStatus::Failure;
8839
8840 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8841 if (!MCE)
8842 return TokError("invalid operand for instruction");
8843
8844 Pattern = MCE->getValue();
8845 } else {
8846 // Parse the pattern
8847 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8848 if (!Pat)
8849 return ParseStatus::NoMatch;
8850
8851 Lex();
8852 Pattern = Pat->Encoding;
8853 assert(Pattern >= 0 && Pattern < 32);
8854 }
8855
8856 Operands.push_back(
8857 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8858 SS, getLoc(), getContext()));
8859
8860 return ParseStatus::Success;
8861}
8862
8863ParseStatus
8864AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8865 int64_t Pattern;
8866 SMLoc SS = getLoc();
8867 const AsmToken &TokE = getTok();
8868 // Parse the pattern
8869 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8870 TokE.getString());
8871 if (!Pat)
8872 return ParseStatus::NoMatch;
8873
8874 Lex();
8875 Pattern = Pat->Encoding;
8876 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8877
8878 Operands.push_back(
8879 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8880 SS, getLoc(), getContext()));
8881
8882 return ParseStatus::Success;
8883}
8884
8885ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8886 SMLoc SS = getLoc();
8887
8888 MCRegister XReg;
8889 if (!tryParseScalarRegister(XReg).isSuccess())
8890 return ParseStatus::NoMatch;
8891
8892 MCContext &ctx = getContext();
8893 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8894 MCRegister X8Reg = RI->getMatchingSuperReg(
8895 XReg, AArch64::x8sub_0,
8896 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8897 if (!X8Reg)
8898 return Error(SS,
8899 "expected an even-numbered x-register in the range [x0,x22]");
8900
8901 Operands.push_back(
8902 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8903 return ParseStatus::Success;
8904}
8905
8906ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8907 SMLoc S = getLoc();
8908
8909 if (getTok().isNot(AsmToken::Integer))
8910 return ParseStatus::NoMatch;
8911
8912 if (getLexer().peekTok().isNot(AsmToken::Colon))
8913 return ParseStatus::NoMatch;
8914
8915 const MCExpr *ImmF;
8916 if (getParser().parseExpression(ImmF))
8917 return ParseStatus::NoMatch;
8918
8919 if (getTok().isNot(AsmToken::Colon))
8920 return ParseStatus::NoMatch;
8921
8922 Lex(); // Eat ':'
8923 if (getTok().isNot(AsmToken::Integer))
8924 return ParseStatus::NoMatch;
8925
8926 SMLoc E = getTok().getLoc();
8927 const MCExpr *ImmL;
8928 if (getParser().parseExpression(ImmL))
8929 return ParseStatus::NoMatch;
8930
8931 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8932 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8933
8934 Operands.push_back(
8935 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8936 return ParseStatus::Success;
8937}
8938
8939template <int Adj>
8940ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8941 SMLoc S = getLoc();
8942
8943 parseOptionalToken(AsmToken::Hash);
8944 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8945
8946 if (getTok().isNot(AsmToken::Integer))
8947 return ParseStatus::NoMatch;
8948
8949 const MCExpr *Ex;
8950 if (getParser().parseExpression(Ex))
8951 return ParseStatus::NoMatch;
8952
8953 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8954 if (IsNegative)
8955 Imm = -Imm;
8956
8957 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8958 // return a value, which is certain to trigger a error message about invalid
8959 // immediate range instead of a non-descriptive invalid operand error.
8960 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8961 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8962 Imm = -2;
8963 else
8964 Imm += Adj;
8965
8966 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8967 Operands.push_back(AArch64Operand::CreateImm(
8969
8970 return ParseStatus::Success;
8971}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1404
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:221
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:332
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
const FeatureBitset & ClearFeatureBitsTransitively(const FeatureBitset &FB)
const FeatureBitset & SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:196
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:629
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:609
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:844
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:169
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:816
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Specifier parsePercentSpecifierName(StringRef)
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name