LLVM 23.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E);
186 bool parseDataExpr(const MCExpr *&Res) override;
187 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
188
189 bool parseDirectiveArch(SMLoc L);
190 bool parseDirectiveArchExtension(SMLoc L);
191 bool parseDirectiveCPU(SMLoc L);
192 bool parseDirectiveInst(SMLoc L);
193
194 bool parseDirectiveTLSDescCall(SMLoc L);
195
196 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
197 bool parseDirectiveLtorg(SMLoc L);
198
199 bool parseDirectiveReq(StringRef Name, SMLoc L);
200 bool parseDirectiveUnreq(SMLoc L);
201 bool parseDirectiveCFINegateRAState();
202 bool parseDirectiveCFINegateRAStateWithPC();
203 bool parseDirectiveCFIBKeyFrame();
204 bool parseDirectiveCFIMTETaggedFrame();
205
206 bool parseDirectiveVariantPCS(SMLoc L);
207
208 bool parseDirectiveSEHAllocStack(SMLoc L);
209 bool parseDirectiveSEHPrologEnd(SMLoc L);
210 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
211 bool parseDirectiveSEHSaveFPLR(SMLoc L);
212 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
213 bool parseDirectiveSEHSaveReg(SMLoc L);
214 bool parseDirectiveSEHSaveRegX(SMLoc L);
215 bool parseDirectiveSEHSaveRegP(SMLoc L);
216 bool parseDirectiveSEHSaveRegPX(SMLoc L);
217 bool parseDirectiveSEHSaveLRPair(SMLoc L);
218 bool parseDirectiveSEHSaveFReg(SMLoc L);
219 bool parseDirectiveSEHSaveFRegX(SMLoc L);
220 bool parseDirectiveSEHSaveFRegP(SMLoc L);
221 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
222 bool parseDirectiveSEHSetFP(SMLoc L);
223 bool parseDirectiveSEHAddFP(SMLoc L);
224 bool parseDirectiveSEHNop(SMLoc L);
225 bool parseDirectiveSEHSaveNext(SMLoc L);
226 bool parseDirectiveSEHEpilogStart(SMLoc L);
227 bool parseDirectiveSEHEpilogEnd(SMLoc L);
228 bool parseDirectiveSEHTrapFrame(SMLoc L);
229 bool parseDirectiveSEHMachineFrame(SMLoc L);
230 bool parseDirectiveSEHContext(SMLoc L);
231 bool parseDirectiveSEHECContext(SMLoc L);
232 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
233 bool parseDirectiveSEHPACSignLR(SMLoc L);
234 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
235 bool parseDirectiveSEHAllocZ(SMLoc L);
236 bool parseDirectiveSEHSaveZReg(SMLoc L);
237 bool parseDirectiveSEHSavePReg(SMLoc L);
238 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
239 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240
241 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
242 SmallVectorImpl<SMLoc> &Loc);
243 unsigned getNumRegsForRegKind(RegKind K);
244 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
245 OperandVector &Operands, MCStreamer &Out,
246 uint64_t &ErrorInfo,
247 bool MatchingInlineAsm) override;
248 /// @name Auto-generated Match Functions
249 /// {
250
251#define GET_ASSEMBLER_HEADER
252#include "AArch64GenAsmMatcher.inc"
253
254 /// }
255
256 ParseStatus tryParseScalarRegister(MCRegister &Reg);
257 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
258 RegKind MatchKind);
259 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
260 ParseStatus tryParseSVCR(OperandVector &Operands);
261 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
262 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
263 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
264 ParseStatus tryParseSysReg(OperandVector &Operands);
265 ParseStatus tryParseSysCROperand(OperandVector &Operands);
266 template <bool IsSVEPrefetch = false>
267 ParseStatus tryParsePrefetch(OperandVector &Operands);
268 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
269 ParseStatus tryParsePSBHint(OperandVector &Operands);
270 ParseStatus tryParseBTIHint(OperandVector &Operands);
271 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
272 ParseStatus tryParseTIndexHint(OperandVector &Operands);
273 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
274 ParseStatus tryParseAdrLabel(OperandVector &Operands);
275 template <bool AddFPZeroAsLiteral>
276 ParseStatus tryParseFPImm(OperandVector &Operands);
277 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
278 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
279 bool tryParseNeonVectorRegister(OperandVector &Operands);
280 ParseStatus tryParseVectorIndex(OperandVector &Operands);
281 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
282 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
283 template <bool ParseShiftExtend,
284 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
285 ParseStatus tryParseGPROperand(OperandVector &Operands);
286 ParseStatus tryParseZTOperand(OperandVector &Operands);
287 template <bool ParseShiftExtend, bool ParseSuffix>
288 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
289 template <RegKind RK>
290 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
292 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
293 template <RegKind VectorKind>
294 ParseStatus tryParseVectorList(OperandVector &Operands,
295 bool ExpectMatch = false);
296 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
297 ParseStatus tryParseSVEPattern(OperandVector &Operands);
298 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
299 ParseStatus tryParseGPR64x8(OperandVector &Operands);
300 ParseStatus tryParseImmRange(OperandVector &Operands);
301 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
302 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
303
304public:
305 enum AArch64MatchResultTy {
306 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
307#define GET_OPERAND_DIAGNOSTIC_TYPES
308#include "AArch64GenAsmMatcher.inc"
309 };
310 bool IsILP32;
311 bool IsWindowsArm64EC;
312
313 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
314 const MCInstrInfo &MII, const MCTargetOptions &Options)
315 : MCTargetAsmParser(Options, STI, MII) {
316 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
317 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
319 MCStreamer &S = getParser().getStreamer();
320 if (S.getTargetStreamer() == nullptr)
321 new AArch64TargetStreamer(S);
322
323 // Alias .hword/.word/.[dx]word to the target-independent
324 // .2byte/.4byte/.8byte directives as they have the same form and
325 // semantics:
326 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
327 Parser.addAliasForDirective(".hword", ".2byte");
328 Parser.addAliasForDirective(".word", ".4byte");
329 Parser.addAliasForDirective(".dword", ".8byte");
330 Parser.addAliasForDirective(".xword", ".8byte");
331
332 // Initialize the set of available features.
333 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
334 }
335
336 bool areEqualRegs(const MCParsedAsmOperand &Op1,
337 const MCParsedAsmOperand &Op2) const override;
338 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
339 SMLoc NameLoc, OperandVector &Operands) override;
340 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
341 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
342 SMLoc &EndLoc) override;
343 bool ParseDirective(AsmToken DirectiveID) override;
344 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
345 unsigned Kind) override;
346
347 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
348 AArch64::Specifier &DarwinSpec,
349 int64_t &Addend);
350};
351
352/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
353/// instruction.
354class AArch64Operand : public MCParsedAsmOperand {
355private:
356 enum KindTy {
357 k_Immediate,
358 k_ShiftedImm,
359 k_ImmRange,
360 k_CondCode,
361 k_Register,
362 k_MatrixRegister,
363 k_MatrixTileList,
364 k_SVCR,
365 k_VectorList,
366 k_VectorIndex,
367 k_Token,
368 k_SysReg,
369 k_SysCR,
370 k_Prefetch,
371 k_ShiftExtend,
372 k_FPImm,
373 k_Barrier,
374 k_PSBHint,
375 k_PHint,
376 k_BTIHint,
377 k_CMHPriorityHint,
378 k_TIndexHint,
379 } Kind;
380
381 SMLoc StartLoc, EndLoc;
382
383 struct TokOp {
384 const char *Data;
385 unsigned Length;
386 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
387 };
388
389 // Separate shift/extend operand.
390 struct ShiftExtendOp {
392 unsigned Amount;
393 bool HasExplicitAmount;
394 };
395
396 struct RegOp {
397 MCRegister Reg;
398 RegKind Kind;
399 int ElementWidth;
400
401 // The register may be allowed as a different register class,
402 // e.g. for GPR64as32 or GPR32as64.
403 RegConstraintEqualityTy EqualityTy;
404
405 // In some cases the shift/extend needs to be explicitly parsed together
406 // with the register, rather than as a separate operand. This is needed
407 // for addressing modes where the instruction as a whole dictates the
408 // scaling/extend, rather than specific bits in the instruction.
409 // By parsing them as a single operand, we avoid the need to pass an
410 // extra operand in all CodeGen patterns (because all operands need to
411 // have an associated value), and we avoid the need to update TableGen to
412 // accept operands that have no associated bits in the instruction.
413 //
414 // An added benefit of parsing them together is that the assembler
415 // can give a sensible diagnostic if the scaling is not correct.
416 //
417 // The default is 'lsl #0' (HasExplicitAmount = false) if no
418 // ShiftExtend is specified.
419 ShiftExtendOp ShiftExtend;
420 };
421
422 struct MatrixRegOp {
423 MCRegister Reg;
424 unsigned ElementWidth;
425 MatrixKind Kind;
426 };
427
428 struct MatrixTileListOp {
429 unsigned RegMask = 0;
430 };
431
432 struct VectorListOp {
433 MCRegister Reg;
434 unsigned Count;
435 unsigned Stride;
436 unsigned NumElements;
437 unsigned ElementWidth;
438 RegKind RegisterKind;
439 };
440
441 struct VectorIndexOp {
442 int Val;
443 };
444
445 struct ImmOp {
446 const MCExpr *Val;
447 };
448
449 struct ShiftedImmOp {
450 const MCExpr *Val;
451 unsigned ShiftAmount;
452 };
453
454 struct ImmRangeOp {
455 unsigned First;
456 unsigned Last;
457 };
458
459 struct CondCodeOp {
461 };
462
463 struct FPImmOp {
464 uint64_t Val; // APFloat value bitcasted to uint64_t.
465 bool IsExact; // describes whether parsed value was exact.
466 };
467
468 struct BarrierOp {
469 const char *Data;
470 unsigned Length;
471 unsigned Val; // Not the enum since not all values have names.
472 bool HasnXSModifier;
473 };
474
475 struct SysRegOp {
476 const char *Data;
477 unsigned Length;
478 uint32_t MRSReg;
479 uint32_t MSRReg;
480 uint32_t PStateField;
481 };
482
483 struct SysCRImmOp {
484 unsigned Val;
485 };
486
487 struct PrefetchOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492
493 struct PSBHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498 struct PHintOp {
499 const char *Data;
500 unsigned Length;
501 unsigned Val;
502 };
503 struct BTIHintOp {
504 const char *Data;
505 unsigned Length;
506 unsigned Val;
507 };
508 struct CMHPriorityHintOp {
509 const char *Data;
510 unsigned Length;
511 unsigned Val;
512 };
513 struct TIndexHintOp {
514 const char *Data;
515 unsigned Length;
516 unsigned Val;
517 };
518
519 struct SVCROp {
520 const char *Data;
521 unsigned Length;
522 unsigned PStateField;
523 };
524
525 union {
526 struct TokOp Tok;
527 struct RegOp Reg;
528 struct MatrixRegOp MatrixReg;
529 struct MatrixTileListOp MatrixTileList;
530 struct VectorListOp VectorList;
531 struct VectorIndexOp VectorIndex;
532 struct ImmOp Imm;
533 struct ShiftedImmOp ShiftedImm;
534 struct ImmRangeOp ImmRange;
535 struct CondCodeOp CondCode;
536 struct FPImmOp FPImm;
537 struct BarrierOp Barrier;
538 struct SysRegOp SysReg;
539 struct SysCRImmOp SysCRImm;
540 struct PrefetchOp Prefetch;
541 struct PSBHintOp PSBHint;
542 struct PHintOp PHint;
543 struct BTIHintOp BTIHint;
544 struct CMHPriorityHintOp CMHPriorityHint;
545 struct TIndexHintOp TIndexHint;
546 struct ShiftExtendOp ShiftExtend;
547 struct SVCROp SVCR;
548 };
549
550 // Keep the MCContext around as the MCExprs may need manipulated during
551 // the add<>Operands() calls.
552 MCContext &Ctx;
553
554public:
555 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
556
557 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
558 Kind = o.Kind;
559 StartLoc = o.StartLoc;
560 EndLoc = o.EndLoc;
561 switch (Kind) {
562 case k_Token:
563 Tok = o.Tok;
564 break;
565 case k_Immediate:
566 Imm = o.Imm;
567 break;
568 case k_ShiftedImm:
569 ShiftedImm = o.ShiftedImm;
570 break;
571 case k_ImmRange:
572 ImmRange = o.ImmRange;
573 break;
574 case k_CondCode:
575 CondCode = o.CondCode;
576 break;
577 case k_FPImm:
578 FPImm = o.FPImm;
579 break;
580 case k_Barrier:
581 Barrier = o.Barrier;
582 break;
583 case k_Register:
584 Reg = o.Reg;
585 break;
586 case k_MatrixRegister:
587 MatrixReg = o.MatrixReg;
588 break;
589 case k_MatrixTileList:
590 MatrixTileList = o.MatrixTileList;
591 break;
592 case k_VectorList:
593 VectorList = o.VectorList;
594 break;
595 case k_VectorIndex:
596 VectorIndex = o.VectorIndex;
597 break;
598 case k_SysReg:
599 SysReg = o.SysReg;
600 break;
601 case k_SysCR:
602 SysCRImm = o.SysCRImm;
603 break;
604 case k_Prefetch:
605 Prefetch = o.Prefetch;
606 break;
607 case k_PSBHint:
608 PSBHint = o.PSBHint;
609 break;
610 case k_PHint:
611 PHint = o.PHint;
612 break;
613 case k_BTIHint:
614 BTIHint = o.BTIHint;
615 break;
616 case k_CMHPriorityHint:
617 CMHPriorityHint = o.CMHPriorityHint;
618 break;
619 case k_TIndexHint:
620 TIndexHint = o.TIndexHint;
621 break;
622 case k_ShiftExtend:
623 ShiftExtend = o.ShiftExtend;
624 break;
625 case k_SVCR:
626 SVCR = o.SVCR;
627 break;
628 }
629 }
630
631 /// getStartLoc - Get the location of the first token of this operand.
632 SMLoc getStartLoc() const override { return StartLoc; }
633 /// getEndLoc - Get the location of the last token of this operand.
634 SMLoc getEndLoc() const override { return EndLoc; }
635
636 StringRef getToken() const {
637 assert(Kind == k_Token && "Invalid access!");
638 return StringRef(Tok.Data, Tok.Length);
639 }
640
641 bool isTokenSuffix() const {
642 assert(Kind == k_Token && "Invalid access!");
643 return Tok.IsSuffix;
644 }
645
646 const MCExpr *getImm() const {
647 assert(Kind == k_Immediate && "Invalid access!");
648 return Imm.Val;
649 }
650
651 const MCExpr *getShiftedImmVal() const {
652 assert(Kind == k_ShiftedImm && "Invalid access!");
653 return ShiftedImm.Val;
654 }
655
656 unsigned getShiftedImmShift() const {
657 assert(Kind == k_ShiftedImm && "Invalid access!");
658 return ShiftedImm.ShiftAmount;
659 }
660
661 unsigned getFirstImmVal() const {
662 assert(Kind == k_ImmRange && "Invalid access!");
663 return ImmRange.First;
664 }
665
666 unsigned getLastImmVal() const {
667 assert(Kind == k_ImmRange && "Invalid access!");
668 return ImmRange.Last;
669 }
670
672 assert(Kind == k_CondCode && "Invalid access!");
673 return CondCode.Code;
674 }
675
676 APFloat getFPImm() const {
677 assert (Kind == k_FPImm && "Invalid access!");
678 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
679 }
680
681 bool getFPImmIsExact() const {
682 assert (Kind == k_FPImm && "Invalid access!");
683 return FPImm.IsExact;
684 }
685
686 unsigned getBarrier() const {
687 assert(Kind == k_Barrier && "Invalid access!");
688 return Barrier.Val;
689 }
690
691 StringRef getBarrierName() const {
692 assert(Kind == k_Barrier && "Invalid access!");
693 return StringRef(Barrier.Data, Barrier.Length);
694 }
695
696 bool getBarriernXSModifier() const {
697 assert(Kind == k_Barrier && "Invalid access!");
698 return Barrier.HasnXSModifier;
699 }
700
701 MCRegister getReg() const override {
702 assert(Kind == k_Register && "Invalid access!");
703 return Reg.Reg;
704 }
705
706 MCRegister getMatrixReg() const {
707 assert(Kind == k_MatrixRegister && "Invalid access!");
708 return MatrixReg.Reg;
709 }
710
711 unsigned getMatrixElementWidth() const {
712 assert(Kind == k_MatrixRegister && "Invalid access!");
713 return MatrixReg.ElementWidth;
714 }
715
716 MatrixKind getMatrixKind() const {
717 assert(Kind == k_MatrixRegister && "Invalid access!");
718 return MatrixReg.Kind;
719 }
720
721 unsigned getMatrixTileListRegMask() const {
722 assert(isMatrixTileList() && "Invalid access!");
723 return MatrixTileList.RegMask;
724 }
725
726 RegConstraintEqualityTy getRegEqualityTy() const {
727 assert(Kind == k_Register && "Invalid access!");
728 return Reg.EqualityTy;
729 }
730
731 MCRegister getVectorListStart() const {
732 assert(Kind == k_VectorList && "Invalid access!");
733 return VectorList.Reg;
734 }
735
736 unsigned getVectorListCount() const {
737 assert(Kind == k_VectorList && "Invalid access!");
738 return VectorList.Count;
739 }
740
741 unsigned getVectorListStride() const {
742 assert(Kind == k_VectorList && "Invalid access!");
743 return VectorList.Stride;
744 }
745
746 int getVectorIndex() const {
747 assert(Kind == k_VectorIndex && "Invalid access!");
748 return VectorIndex.Val;
749 }
750
751 StringRef getSysReg() const {
752 assert(Kind == k_SysReg && "Invalid access!");
753 return StringRef(SysReg.Data, SysReg.Length);
754 }
755
756 unsigned getSysCR() const {
757 assert(Kind == k_SysCR && "Invalid access!");
758 return SysCRImm.Val;
759 }
760
761 unsigned getPrefetch() const {
762 assert(Kind == k_Prefetch && "Invalid access!");
763 return Prefetch.Val;
764 }
765
766 unsigned getPSBHint() const {
767 assert(Kind == k_PSBHint && "Invalid access!");
768 return PSBHint.Val;
769 }
770
771 unsigned getPHint() const {
772 assert(Kind == k_PHint && "Invalid access!");
773 return PHint.Val;
774 }
775
776 StringRef getPSBHintName() const {
777 assert(Kind == k_PSBHint && "Invalid access!");
778 return StringRef(PSBHint.Data, PSBHint.Length);
779 }
780
781 StringRef getPHintName() const {
782 assert(Kind == k_PHint && "Invalid access!");
783 return StringRef(PHint.Data, PHint.Length);
784 }
785
786 unsigned getBTIHint() const {
787 assert(Kind == k_BTIHint && "Invalid access!");
788 return BTIHint.Val;
789 }
790
791 StringRef getBTIHintName() const {
792 assert(Kind == k_BTIHint && "Invalid access!");
793 return StringRef(BTIHint.Data, BTIHint.Length);
794 }
795
796 unsigned getCMHPriorityHint() const {
797 assert(Kind == k_CMHPriorityHint && "Invalid access!");
798 return CMHPriorityHint.Val;
799 }
800
801 StringRef getCMHPriorityHintName() const {
802 assert(Kind == k_CMHPriorityHint && "Invalid access!");
803 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
804 }
805
806 unsigned getTIndexHint() const {
807 assert(Kind == k_TIndexHint && "Invalid access!");
808 return TIndexHint.Val;
809 }
810
811 StringRef getTIndexHintName() const {
812 assert(Kind == k_TIndexHint && "Invalid access!");
813 return StringRef(TIndexHint.Data, TIndexHint.Length);
814 }
815
816 StringRef getSVCR() const {
817 assert(Kind == k_SVCR && "Invalid access!");
818 return StringRef(SVCR.Data, SVCR.Length);
819 }
820
821 StringRef getPrefetchName() const {
822 assert(Kind == k_Prefetch && "Invalid access!");
823 return StringRef(Prefetch.Data, Prefetch.Length);
824 }
825
826 AArch64_AM::ShiftExtendType getShiftExtendType() const {
827 if (Kind == k_ShiftExtend)
828 return ShiftExtend.Type;
829 if (Kind == k_Register)
830 return Reg.ShiftExtend.Type;
831 llvm_unreachable("Invalid access!");
832 }
833
834 unsigned getShiftExtendAmount() const {
835 if (Kind == k_ShiftExtend)
836 return ShiftExtend.Amount;
837 if (Kind == k_Register)
838 return Reg.ShiftExtend.Amount;
839 llvm_unreachable("Invalid access!");
840 }
841
842 bool hasShiftExtendAmount() const {
843 if (Kind == k_ShiftExtend)
844 return ShiftExtend.HasExplicitAmount;
845 if (Kind == k_Register)
846 return Reg.ShiftExtend.HasExplicitAmount;
847 llvm_unreachable("Invalid access!");
848 }
849
850 bool isImm() const override { return Kind == k_Immediate; }
851 bool isMem() const override { return false; }
852
853 bool isUImm6() const {
854 if (!isImm())
855 return false;
856 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857 if (!MCE)
858 return false;
859 int64_t Val = MCE->getValue();
860 return (Val >= 0 && Val < 64);
861 }
862
863 template <int Width> bool isSImm() const {
864 return bool(isSImmScaled<Width, 1>());
865 }
866
867 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
868 return isImmScaled<Bits, Scale>(true);
869 }
870
871 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
872 DiagnosticPredicate isUImmScaled() const {
873 if (IsRange && isImmRange() &&
874 (getLastImmVal() != getFirstImmVal() + Offset))
876
877 return isImmScaled<Bits, Scale, IsRange>(false);
878 }
879
880 template <int Bits, int Scale, bool IsRange = false>
881 DiagnosticPredicate isImmScaled(bool Signed) const {
882 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
883 (isImmRange() && !IsRange))
885
886 int64_t Val;
887 if (isImmRange())
888 Val = getFirstImmVal();
889 else {
890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
891 if (!MCE)
893 Val = MCE->getValue();
894 }
895
896 int64_t MinVal, MaxVal;
897 if (Signed) {
898 int64_t Shift = Bits - 1;
899 MinVal = (int64_t(1) << Shift) * -Scale;
900 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
901 } else {
902 MinVal = 0;
903 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
904 }
905
906 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
908
910 }
911
912 DiagnosticPredicate isSVEPattern() const {
913 if (!isImm())
915 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
916 if (!MCE)
918 int64_t Val = MCE->getValue();
919 if (Val >= 0 && Val < 32)
922 }
923
924 DiagnosticPredicate isSVEVecLenSpecifier() const {
925 if (!isImm())
927 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
928 if (!MCE)
930 int64_t Val = MCE->getValue();
931 if (Val >= 0 && Val <= 1)
934 }
935
936 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
937 AArch64::Specifier ELFSpec;
938 AArch64::Specifier DarwinSpec;
939 int64_t Addend;
940 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
941 Addend)) {
942 // If we don't understand the expression, assume the best and
943 // let the fixup and relocation code deal with it.
944 return true;
945 }
946
947 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
955 ELFSpec)) {
956 // Note that we don't range-check the addend. It's adjusted modulo page
957 // size when converted, so there is no "out of range" condition when using
958 // @pageoff.
959 return true;
960 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
961 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
962 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
963 return Addend == 0;
964 }
965
966 return false;
967 }
968
969 template <int Scale> bool isUImm12Offset() const {
970 if (!isImm())
971 return false;
972
973 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
974 if (!MCE)
975 return isSymbolicUImm12Offset(getImm());
976
977 int64_t Val = MCE->getValue();
978 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
979 }
980
981 template <int N, int M>
982 bool isImmInRange() const {
983 if (!isImm())
984 return false;
985 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
986 if (!MCE)
987 return false;
988 int64_t Val = MCE->getValue();
989 return (Val >= N && Val <= M);
990 }
991
992 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
993 // a logical immediate can always be represented when inverted.
994 template <typename T>
995 bool isLogicalImm() const {
996 if (!isImm())
997 return false;
998 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
999 if (!MCE)
1000 return false;
1001
1002 int64_t Val = MCE->getValue();
1003 // Avoid left shift by 64 directly.
1004 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1005 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1006 if ((Val & Upper) && (Val & Upper) != Upper)
1007 return false;
1008
1009 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
1010 }
1011
1012 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1013
1014 bool isImmRange() const { return Kind == k_ImmRange; }
1015
1016 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1017 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1018 /// immediate that can be shifted by 'Shift'.
1019 template <unsigned Width>
1020 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1021 if (isShiftedImm() && Width == getShiftedImmShift())
1022 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1023 return std::make_pair(CE->getValue(), Width);
1024
1025 if (isImm())
1026 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1027 int64_t Val = CE->getValue();
1028 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1029 return std::make_pair(Val >> Width, Width);
1030 else
1031 return std::make_pair(Val, 0u);
1032 }
1033
1034 return {};
1035 }
1036
1037 bool isAddSubImm() const {
1038 if (!isShiftedImm() && !isImm())
1039 return false;
1040
1041 const MCExpr *Expr;
1042
1043 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1044 if (isShiftedImm()) {
1045 unsigned Shift = ShiftedImm.ShiftAmount;
1046 Expr = ShiftedImm.Val;
1047 if (Shift != 0 && Shift != 12)
1048 return false;
1049 } else {
1050 Expr = getImm();
1051 }
1052
1053 AArch64::Specifier ELFSpec;
1054 AArch64::Specifier DarwinSpec;
1055 int64_t Addend;
1056 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1057 Addend)) {
1058 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1059 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1060 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1068 ELFSpec);
1069 }
1070
1071 // If it's a constant, it should be a real immediate in range.
1072 if (auto ShiftedVal = getShiftedVal<12>())
1073 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1074
1075 // If it's an expression, we hope for the best and let the fixup/relocation
1076 // code deal with it.
1077 return true;
1078 }
1079
1080 bool isAddSubImmNeg() const {
1081 if (!isShiftedImm() && !isImm())
1082 return false;
1083
1084 // Otherwise it should be a real negative immediate in range.
1085 if (auto ShiftedVal = getShiftedVal<12>())
1086 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1087
1088 return false;
1089 }
1090
1091 // Signed value in the range -128 to +127. For element widths of
1092 // 16 bits or higher it may also be a signed multiple of 256 in the
1093 // range -32768 to +32512.
1094 // For element-width of 8 bits a range of -128 to 255 is accepted,
1095 // since a copy of a byte can be either signed/unsigned.
1096 template <typename T>
1097 DiagnosticPredicate isSVECpyImm() const {
1098 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1100
1101 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1102 std::is_same<int8_t, T>::value;
1103 if (auto ShiftedImm = getShiftedVal<8>())
1104 if (!(IsByte && ShiftedImm->second) &&
1105 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1106 << ShiftedImm->second))
1108
1110 }
1111
1112 // Unsigned value in the range 0 to 255. For element widths of
1113 // 16 bits or higher it may also be a signed multiple of 256 in the
1114 // range 0 to 65280.
1115 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1116 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1118
1119 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1120 std::is_same<int8_t, T>::value;
1121 if (auto ShiftedImm = getShiftedVal<8>())
1122 if (!(IsByte && ShiftedImm->second) &&
1123 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1124 << ShiftedImm->second))
1126
1128 }
1129
1130 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1131 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1134 }
1135
1136 bool isCondCode() const { return Kind == k_CondCode; }
1137
1138 bool isSIMDImmType10() const {
1139 if (!isImm())
1140 return false;
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1142 if (!MCE)
1143 return false;
1145 }
1146
1147 template<int N>
1148 bool isBranchTarget() const {
1149 if (!isImm())
1150 return false;
1151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1152 if (!MCE)
1153 return true;
1154 int64_t Val = MCE->getValue();
1155 if (Val & 0x3)
1156 return false;
1157 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1158 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1159 }
1160
1161 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1162 if (!isImm())
1163 return false;
1164
1165 AArch64::Specifier ELFSpec;
1166 AArch64::Specifier DarwinSpec;
1167 int64_t Addend;
1168 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1169 Addend)) {
1170 return false;
1171 }
1172 if (DarwinSpec != AArch64::S_None)
1173 return false;
1174
1175 return llvm::is_contained(AllowedModifiers, ELFSpec);
1176 }
1177
1178 bool isMovWSymbolG3() const {
1179 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1180 }
1181
1182 bool isMovWSymbolG2() const {
1183 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1187 }
1188
1189 bool isMovWSymbolG1() const {
1190 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1195 }
1196
1197 bool isMovWSymbolG0() const {
1198 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1203 }
1204
1205 template<int RegWidth, int Shift>
1206 bool isMOVZMovAlias() const {
1207 if (!isImm()) return false;
1208
1209 const MCExpr *E = getImm();
1210 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1211 uint64_t Value = CE->getValue();
1212
1213 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1214 }
1215 // Only supports the case of Shift being 0 if an expression is used as an
1216 // operand
1217 return !Shift && E;
1218 }
1219
1220 template<int RegWidth, int Shift>
1221 bool isMOVNMovAlias() const {
1222 if (!isImm()) return false;
1223
1224 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1225 if (!CE) return false;
1226 uint64_t Value = CE->getValue();
1227
1228 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1229 }
1230
1231 bool isFPImm() const {
1232 return Kind == k_FPImm &&
1233 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1234 }
1235
1236 bool isBarrier() const {
1237 return Kind == k_Barrier && !getBarriernXSModifier();
1238 }
1239 bool isBarriernXS() const {
1240 return Kind == k_Barrier && getBarriernXSModifier();
1241 }
1242 bool isSysReg() const { return Kind == k_SysReg; }
1243
1244 bool isMRSSystemRegister() const {
1245 if (!isSysReg()) return false;
1246
1247 return SysReg.MRSReg != -1U;
1248 }
1249
1250 bool isMSRSystemRegister() const {
1251 if (!isSysReg()) return false;
1252 return SysReg.MSRReg != -1U;
1253 }
1254
1255 bool isSystemPStateFieldWithImm0_1() const {
1256 if (!isSysReg()) return false;
1257 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1258 }
1259
1260 bool isSystemPStateFieldWithImm0_15() const {
1261 if (!isSysReg())
1262 return false;
1263 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1264 }
1265
1266 bool isSVCR() const {
1267 if (Kind != k_SVCR)
1268 return false;
1269 return SVCR.PStateField != -1U;
1270 }
1271
1272 bool isReg() const override {
1273 return Kind == k_Register;
1274 }
1275
1276 bool isVectorList() const { return Kind == k_VectorList; }
1277
1278 bool isScalarReg() const {
1279 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1280 }
1281
1282 bool isNeonVectorReg() const {
1283 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1284 }
1285
1286 bool isNeonVectorRegLo() const {
1287 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1288 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 Reg.Reg) ||
1290 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1291 Reg.Reg));
1292 }
1293
1294 bool isNeonVectorReg0to7() const {
1295 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1296 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1297 Reg.Reg));
1298 }
1299
1300 bool isMatrix() const { return Kind == k_MatrixRegister; }
1301 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1302
1303 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1304 RegKind RK;
1305 switch (Class) {
1306 case AArch64::PPRRegClassID:
1307 case AArch64::PPR_3bRegClassID:
1308 case AArch64::PPR_p8to15RegClassID:
1309 case AArch64::PNRRegClassID:
1310 case AArch64::PNR_p8to15RegClassID:
1311 case AArch64::PPRorPNRRegClassID:
1312 RK = RegKind::SVEPredicateAsCounter;
1313 break;
1314 default:
1315 llvm_unreachable("Unsupported register class");
1316 }
1317
1318 return (Kind == k_Register && Reg.Kind == RK) &&
1319 AArch64MCRegisterClasses[Class].contains(getReg());
1320 }
1321
1322 template <unsigned Class> bool isSVEVectorReg() const {
1323 RegKind RK;
1324 switch (Class) {
1325 case AArch64::ZPRRegClassID:
1326 case AArch64::ZPR_3bRegClassID:
1327 case AArch64::ZPR_4bRegClassID:
1328 case AArch64::ZPRMul2_LoRegClassID:
1329 case AArch64::ZPRMul2_HiRegClassID:
1330 case AArch64::ZPR_KRegClassID:
1331 RK = RegKind::SVEDataVector;
1332 break;
1333 case AArch64::PPRRegClassID:
1334 case AArch64::PPR_3bRegClassID:
1335 case AArch64::PPR_p8to15RegClassID:
1336 case AArch64::PNRRegClassID:
1337 case AArch64::PNR_p8to15RegClassID:
1338 case AArch64::PPRorPNRRegClassID:
1339 RK = RegKind::SVEPredicateVector;
1340 break;
1341 default:
1342 llvm_unreachable("Unsupported register class");
1343 }
1344
1345 return (Kind == k_Register && Reg.Kind == RK) &&
1346 AArch64MCRegisterClasses[Class].contains(getReg());
1347 }
1348
1349 template <unsigned Class> bool isFPRasZPR() const {
1350 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1351 AArch64MCRegisterClasses[Class].contains(getReg());
1352 }
1353
1354 template <int ElementWidth, unsigned Class>
1355 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1356 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1358
1359 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1361
1363 }
1364
1365 template <int ElementWidth, unsigned Class>
1366 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1367 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1368 Reg.Kind != RegKind::SVEPredicateVector))
1370
1371 if ((isSVEPredicateAsCounterReg<Class>() ||
1372 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1373 Reg.ElementWidth == ElementWidth)
1375
1377 }
1378
1379 template <int ElementWidth, unsigned Class>
1380 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1381 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1383
1384 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1386
1388 }
1389
1390 template <int ElementWidth, unsigned Class>
1391 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1392 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1394
1395 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1397
1399 }
1400
1401 template <int ElementWidth, unsigned Class,
1402 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1403 bool ShiftWidthAlwaysSame>
1404 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1405 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1406 if (!VectorMatch.isMatch())
1408
1409 // Give a more specific diagnostic when the user has explicitly typed in
1410 // a shift-amount that does not match what is expected, but for which
1411 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1412 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1413 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1414 ShiftExtendTy == AArch64_AM::SXTW) &&
1415 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1417
1418 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1420
1422 }
1423
1424 bool isGPR32as64() const {
1425 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1426 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
1427 }
1428
1429 bool isGPR64as32() const {
1430 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1431 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
1432 }
1433
1434 bool isGPR64x8() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1437 Reg.Reg);
1438 }
1439
1440 bool isWSeqPair() const {
1441 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1442 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1443 Reg.Reg);
1444 }
1445
1446 bool isXSeqPair() const {
1447 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1448 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1449 Reg.Reg);
1450 }
1451
1452 bool isSyspXzrPair() const {
1453 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1454 }
1455
1456 template<int64_t Angle, int64_t Remainder>
1457 DiagnosticPredicate isComplexRotation() const {
1458 if (!isImm())
1460
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1462 if (!CE)
1464 uint64_t Value = CE->getValue();
1465
1466 if (Value % Angle == Remainder && Value <= 270)
1469 }
1470
1471 template <unsigned RegClassID> bool isGPR64() const {
1472 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1473 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1474 }
1475
1476 template <unsigned RegClassID, int ExtWidth>
1477 DiagnosticPredicate isGPR64WithShiftExtend() const {
1478 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1480
1481 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1482 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1485 }
1486
1487 /// Is this a vector list with the type implicit (presumably attached to the
1488 /// instruction itself)?
1489 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1490 bool isImplicitlyTypedVectorList() const {
1491 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1492 VectorList.NumElements == 0 &&
1493 VectorList.RegisterKind == VectorKind &&
1494 (!IsConsecutive || (VectorList.Stride == 1));
1495 }
1496
1497 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1498 unsigned ElementWidth, unsigned Stride = 1>
1499 bool isTypedVectorList() const {
1500 if (Kind != k_VectorList)
1501 return false;
1502 if (VectorList.Count != NumRegs)
1503 return false;
1504 if (VectorList.RegisterKind != VectorKind)
1505 return false;
1506 if (VectorList.ElementWidth != ElementWidth)
1507 return false;
1508 if (VectorList.Stride != Stride)
1509 return false;
1510 return VectorList.NumElements == NumElements;
1511 }
1512
1513 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1514 unsigned ElementWidth, unsigned RegClass>
1515 DiagnosticPredicate isTypedVectorListMultiple() const {
1516 bool Res =
1517 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1518 if (!Res)
1520 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
1523 }
1524
1525 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1526 unsigned ElementWidth>
1527 DiagnosticPredicate isTypedVectorListStrided() const {
1528 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1529 ElementWidth, Stride>();
1530 if (!Res)
1532 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1533 ((VectorList.Reg >= AArch64::Z16) &&
1534 (VectorList.Reg < (AArch64::Z16 + Stride))))
1537 }
1538
1539 template <int Min, int Max>
1540 DiagnosticPredicate isVectorIndex() const {
1541 if (Kind != k_VectorIndex)
1543 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1546 }
1547
1548 bool isToken() const override { return Kind == k_Token; }
1549
1550 bool isTokenEqual(StringRef Str) const {
1551 return Kind == k_Token && getToken() == Str;
1552 }
1553 bool isSysCR() const { return Kind == k_SysCR; }
1554 bool isPrefetch() const { return Kind == k_Prefetch; }
1555 bool isPSBHint() const { return Kind == k_PSBHint; }
1556 bool isPHint() const { return Kind == k_PHint; }
1557 bool isBTIHint() const { return Kind == k_BTIHint; }
1558 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1559 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1560 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1561 bool isShifter() const {
1562 if (!isShiftExtend())
1563 return false;
1564
1565 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1566 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1567 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1568 ST == AArch64_AM::MSL);
1569 }
1570
1571 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1572 if (Kind != k_FPImm)
1574
1575 if (getFPImmIsExact()) {
1576 // Lookup the immediate from table of supported immediates.
1577 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1578 assert(Desc && "Unknown enum value");
1579
1580 // Calculate its FP value.
1581 APFloat RealVal(APFloat::IEEEdouble());
1582 auto StatusOrErr =
1583 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1584 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1585 llvm_unreachable("FP immediate is not exact");
1586
1587 if (getFPImm().bitwiseIsEqual(RealVal))
1589 }
1590
1592 }
1593
1594 template <unsigned ImmA, unsigned ImmB>
1595 DiagnosticPredicate isExactFPImm() const {
1596 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1597 if ((Res = isExactFPImm<ImmA>()))
1599 if ((Res = isExactFPImm<ImmB>()))
1601 return Res;
1602 }
1603
1604 bool isExtend() const {
1605 if (!isShiftExtend())
1606 return false;
1607
1608 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1609 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1610 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1611 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1612 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1613 ET == AArch64_AM::LSL) &&
1614 getShiftExtendAmount() <= 4;
1615 }
1616
1617 bool isExtend64() const {
1618 if (!isExtend())
1619 return false;
1620 // Make sure the extend expects a 32-bit source register.
1621 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1622 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1623 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1624 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1625 }
1626
1627 bool isExtendLSL64() const {
1628 if (!isExtend())
1629 return false;
1630 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1631 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1632 ET == AArch64_AM::LSL) &&
1633 getShiftExtendAmount() <= 4;
1634 }
1635
1636 bool isLSLImm3Shift() const {
1637 if (!isShiftExtend())
1638 return false;
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1641 }
1642
1643 template<int Width> bool isMemXExtend() const {
1644 if (!isExtend())
1645 return false;
1646 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1647 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1648 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1649 getShiftExtendAmount() == 0);
1650 }
1651
1652 template<int Width> bool isMemWExtend() const {
1653 if (!isExtend())
1654 return false;
1655 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1656 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1657 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1658 getShiftExtendAmount() == 0);
1659 }
1660
1661 template <unsigned width>
1662 bool isArithmeticShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // An arithmetic shifter is LSL, LSR, or ASR.
1667 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1668 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1669 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1670 }
1671
1672 template <unsigned width>
1673 bool isLogicalShifter() const {
1674 if (!isShifter())
1675 return false;
1676
1677 // A logical shifter is LSL, LSR, ASR or ROR.
1678 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1679 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1680 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1681 getShiftExtendAmount() < width;
1682 }
1683
1684 bool isMovImm32Shifter() const {
1685 if (!isShifter())
1686 return false;
1687
1688 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1689 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1690 if (ST != AArch64_AM::LSL)
1691 return false;
1692 uint64_t Val = getShiftExtendAmount();
1693 return (Val == 0 || Val == 16);
1694 }
1695
1696 bool isMovImm64Shifter() const {
1697 if (!isShifter())
1698 return false;
1699
1700 // A MOVi shifter is LSL of 0 or 16.
1701 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1702 if (ST != AArch64_AM::LSL)
1703 return false;
1704 uint64_t Val = getShiftExtendAmount();
1705 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1706 }
1707
1708 bool isLogicalVecShifter() const {
1709 if (!isShifter())
1710 return false;
1711
1712 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1713 unsigned Shift = getShiftExtendAmount();
1714 return getShiftExtendType() == AArch64_AM::LSL &&
1715 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1716 }
1717
1718 bool isLogicalVecHalfWordShifter() const {
1719 if (!isLogicalVecShifter())
1720 return false;
1721
1722 // A logical vector shifter is a left shift by 0 or 8.
1723 unsigned Shift = getShiftExtendAmount();
1724 return getShiftExtendType() == AArch64_AM::LSL &&
1725 (Shift == 0 || Shift == 8);
1726 }
1727
1728 bool isMoveVecShifter() const {
1729 if (!isShiftExtend())
1730 return false;
1731
1732 // A logical vector shifter is a left shift by 8 or 16.
1733 unsigned Shift = getShiftExtendAmount();
1734 return getShiftExtendType() == AArch64_AM::MSL &&
1735 (Shift == 8 || Shift == 16);
1736 }
1737
1738 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1739 // to LDUR/STUR when the offset is not legal for the former but is for
1740 // the latter. As such, in addition to checking for being a legal unscaled
1741 // address, also check that it is not a legal scaled address. This avoids
1742 // ambiguity in the matcher.
1743 template<int Width>
1744 bool isSImm9OffsetFB() const {
1745 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1746 }
1747
1748 bool isAdrpLabel() const {
1749 // Validation was handled during parsing, so we just verify that
1750 // something didn't go haywire.
1751 if (!isImm())
1752 return false;
1753
1754 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1755 int64_t Val = CE->getValue();
1756 int64_t Min = - (4096 * (1LL << (21 - 1)));
1757 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1758 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1759 }
1760
1761 return true;
1762 }
1763
1764 bool isAdrLabel() const {
1765 // Validation was handled during parsing, so we just verify that
1766 // something didn't go haywire.
1767 if (!isImm())
1768 return false;
1769
1770 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1771 int64_t Val = CE->getValue();
1772 int64_t Min = - (1LL << (21 - 1));
1773 int64_t Max = ((1LL << (21 - 1)) - 1);
1774 return Val >= Min && Val <= Max;
1775 }
1776
1777 return true;
1778 }
1779
1780 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1781 DiagnosticPredicate isMatrixRegOperand() const {
1782 if (!isMatrix())
1784 if (getMatrixKind() != Kind ||
1785 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1786 EltSize != getMatrixElementWidth())
1789 }
1790
1791 bool isPAuthPCRelLabel16Operand() const {
1792 // PAuth PCRel16 operands are similar to regular branch targets, but only
1793 // negative values are allowed for concrete immediates as signing instr
1794 // should be in a lower address.
1795 if (!isImm())
1796 return false;
1797 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1798 if (!MCE)
1799 return true;
1800 int64_t Val = MCE->getValue();
1801 if (Val & 0b11)
1802 return false;
1803 return (Val <= 0) && (Val > -(1 << 18));
1804 }
1805
1806 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1807 // Add as immediates when possible. Null MCExpr = 0.
1808 if (!Expr)
1810 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1811 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1812 else
1814 }
1815
1816 void addRegOperands(MCInst &Inst, unsigned N) const {
1817 assert(N == 1 && "Invalid number of operands!");
1819 }
1820
1821 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1822 assert(N == 1 && "Invalid number of operands!");
1823 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1824 }
1825
1826 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 assert(
1829 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1830
1831 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1832 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1834
1836 }
1837
1838 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1842
1843 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1844 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1846
1848 }
1849
1850 template <int Width>
1851 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1852 unsigned Base;
1853 switch (Width) {
1854 case 8: Base = AArch64::B0; break;
1855 case 16: Base = AArch64::H0; break;
1856 case 32: Base = AArch64::S0; break;
1857 case 64: Base = AArch64::D0; break;
1858 case 128: Base = AArch64::Q0; break;
1859 default:
1860 llvm_unreachable("Unsupported width");
1861 }
1862 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1863 }
1864
1865 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1866 assert(N == 1 && "Invalid number of operands!");
1867 MCRegister Reg = getReg();
1868 // Normalise to PPR
1869 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1870 Reg = Reg - AArch64::PN0 + AArch64::P0;
1872 }
1873
1874 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 Inst.addOperand(
1877 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1878 }
1879
1880 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1881 assert(N == 1 && "Invalid number of operands!");
1882 assert(
1883 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1884 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1885 }
1886
1887 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1888 assert(N == 1 && "Invalid number of operands!");
1889 assert(
1890 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1892 }
1893
1894 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1897 }
1898
1899 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1900 assert(N == 1 && "Invalid number of operands!");
1902 }
1903
1904 enum VecListIndexType {
1905 VecListIdx_DReg = 0,
1906 VecListIdx_QReg = 1,
1907 VecListIdx_ZReg = 2,
1908 VecListIdx_PReg = 3,
1909 };
1910
1911 template <VecListIndexType RegTy, unsigned NumRegs,
1912 bool IsConsecutive = false>
1913 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1914 assert(N == 1 && "Invalid number of operands!");
1915 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1916 "Expected consecutive registers");
1917 static const unsigned FirstRegs[][5] = {
1918 /* DReg */ { AArch64::Q0,
1919 AArch64::D0, AArch64::D0_D1,
1920 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 /* QReg */ { AArch64::Q0,
1922 AArch64::Q0, AArch64::Q0_Q1,
1923 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 /* ZReg */ { AArch64::Z0,
1925 AArch64::Z0, AArch64::Z0_Z1,
1926 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 /* PReg */ { AArch64::P0,
1928 AArch64::P0, AArch64::P0_P1 }
1929 };
1930
1931 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1932 " NumRegs must be <= 4 for ZRegs");
1933
1934 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1935 " NumRegs must be <= 2 for PRegs");
1936
1937 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1939 FirstRegs[(unsigned)RegTy][0]));
1940 }
1941
1942 template <unsigned NumRegs>
1943 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1944 assert(N == 1 && "Invalid number of operands!");
1945 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1946
1947 switch (NumRegs) {
1948 case 2:
1949 if (getVectorListStart() < AArch64::Z16) {
1950 assert((getVectorListStart() < AArch64::Z8) &&
1951 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1953 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 } else {
1955 assert((getVectorListStart() < AArch64::Z24) &&
1956 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1958 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1959 }
1960 break;
1961 case 4:
1962 if (getVectorListStart() < AArch64::Z16) {
1963 assert((getVectorListStart() < AArch64::Z4) &&
1964 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1966 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 } else {
1968 assert((getVectorListStart() < AArch64::Z20) &&
1969 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1971 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1972 }
1973 break;
1974 default:
1975 llvm_unreachable("Unsupported number of registers for strided vec list");
1976 }
1977 }
1978
1979 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1980 assert(N == 1 && "Invalid number of operands!");
1981 unsigned RegMask = getMatrixTileListRegMask();
1982 assert(RegMask <= 0xFF && "Invalid mask!");
1983 Inst.addOperand(MCOperand::createImm(RegMask));
1984 }
1985
1986 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1989 }
1990
1991 template <unsigned ImmIs0, unsigned ImmIs1>
1992 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1994 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1995 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1996 }
1997
1998 void addImmOperands(MCInst &Inst, unsigned N) const {
1999 assert(N == 1 && "Invalid number of operands!");
2000 // If this is a pageoff symrefexpr with an addend, adjust the addend
2001 // to be only the page-offset portion. Otherwise, just add the expr
2002 // as-is.
2003 addExpr(Inst, getImm());
2004 }
2005
2006 template <int Shift>
2007 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2008 assert(N == 2 && "Invalid number of operands!");
2009 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2010 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
2011 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2012 } else if (isShiftedImm()) {
2013 addExpr(Inst, getShiftedImmVal());
2014 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
2015 } else {
2016 addExpr(Inst, getImm());
2018 }
2019 }
2020
2021 template <int Shift>
2022 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 2 && "Invalid number of operands!");
2024 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2025 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2026 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2027 } else
2028 llvm_unreachable("Not a shifted negative immediate");
2029 }
2030
2031 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2032 assert(N == 1 && "Invalid number of operands!");
2034 }
2035
2036 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2039 if (!MCE)
2040 addExpr(Inst, getImm());
2041 else
2042 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2043 }
2044
2045 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2046 addImmOperands(Inst, N);
2047 }
2048
2049 template<int Scale>
2050 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2051 assert(N == 1 && "Invalid number of operands!");
2052 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2053
2054 if (!MCE) {
2056 return;
2057 }
2058 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2059 }
2060
2061 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2065 }
2066
2067 template <int Scale>
2068 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2071 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2072 }
2073
2074 template <int Scale>
2075 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2076 assert(N == 1 && "Invalid number of operands!");
2077 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2078 }
2079
2080 template <typename T>
2081 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2084 std::make_unsigned_t<T> Val = MCE->getValue();
2085 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2086 Inst.addOperand(MCOperand::createImm(encoding));
2087 }
2088
2089 template <typename T>
2090 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2093 std::make_unsigned_t<T> Val = ~MCE->getValue();
2094 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2095 Inst.addOperand(MCOperand::createImm(encoding));
2096 }
2097
2098 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2099 assert(N == 1 && "Invalid number of operands!");
2100 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2101 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2102 Inst.addOperand(MCOperand::createImm(encoding));
2103 }
2104
2105 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2120 // PC-relative operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2130 }
2131
2132 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2133 // Branch operands don't encode the low bits, so shift them off
2134 // here. If it's a label, however, just put it on directly as there's
2135 // not enough information now to do anything.
2136 assert(N == 1 && "Invalid number of operands!");
2137 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2138 if (!MCE) {
2139 addExpr(Inst, getImm());
2140 return;
2141 }
2142 assert(MCE && "Invalid constant immediate operand!");
2143 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2144 }
2145
2146 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2147 // Branch operands don't encode the low bits, so shift them off
2148 // here. If it's a label, however, just put it on directly as there's
2149 // not enough information now to do anything.
2150 assert(N == 1 && "Invalid number of operands!");
2151 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2152 if (!MCE) {
2153 addExpr(Inst, getImm());
2154 return;
2155 }
2156 assert(MCE && "Invalid constant immediate operand!");
2157 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2158 }
2159
2160 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2161 // Branch operands don't encode the low bits, so shift them off
2162 // here. If it's a label, however, just put it on directly as there's
2163 // not enough information now to do anything.
2164 assert(N == 1 && "Invalid number of operands!");
2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2166 if (!MCE) {
2167 addExpr(Inst, getImm());
2168 return;
2169 }
2170 assert(MCE && "Invalid constant immediate operand!");
2171 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2172 }
2173
2174 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2177 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2178 }
2179
2180 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(MCOperand::createImm(getBarrier()));
2183 }
2184
2185 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(MCOperand::createImm(getBarrier()));
2188 }
2189
2190 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192
2193 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2194 }
2195
2196 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198
2199 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2200 }
2201
2202 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2203 assert(N == 1 && "Invalid number of operands!");
2204
2205 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2206 }
2207
2208 void addSVCROperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210
2211 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2212 }
2213
2214 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216
2217 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2218 }
2219
2220 void addSysCROperands(MCInst &Inst, unsigned N) const {
2221 assert(N == 1 && "Invalid number of operands!");
2222 Inst.addOperand(MCOperand::createImm(getSysCR()));
2223 }
2224
2225 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2226 assert(N == 1 && "Invalid number of operands!");
2227 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2228 }
2229
2230 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2231 assert(N == 1 && "Invalid number of operands!");
2232 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2233 }
2234
2235 void addPHintOperands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 Inst.addOperand(MCOperand::createImm(getPHint()));
2238 }
2239
2240 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2243 }
2244
2245 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2246 assert(N == 1 && "Invalid number of operands!");
2247 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2248 }
2249
2250 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 1 && "Invalid number of operands!");
2252 Inst.addOperand(MCOperand::createImm(getTIndexHint()));
2253 }
2254
2255 void addShifterOperands(MCInst &Inst, unsigned N) const {
2256 assert(N == 1 && "Invalid number of operands!");
2257 unsigned Imm =
2258 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2260 }
2261
2262 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2263 assert(N == 1 && "Invalid number of operands!");
2264 unsigned Imm = getShiftExtendAmount();
2266 }
2267
2268 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2269 assert(N == 1 && "Invalid number of operands!");
2270
2271 if (!isScalarReg())
2272 return;
2273
2274 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2275 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2277 if (Reg != AArch64::XZR)
2278 llvm_unreachable("wrong register");
2279
2280 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2281 }
2282
2283 void addExtendOperands(MCInst &Inst, unsigned N) const {
2284 assert(N == 1 && "Invalid number of operands!");
2285 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2286 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2287 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2289 }
2290
2291 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2294 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2295 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2297 }
2298
2299 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2300 assert(N == 2 && "Invalid number of operands!");
2301 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2302 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2303 Inst.addOperand(MCOperand::createImm(IsSigned));
2304 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2305 }
2306
2307 // For 8-bit load/store instructions with a register offset, both the
2308 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2309 // they're disambiguated by whether the shift was explicit or implicit rather
2310 // than its size.
2311 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2312 assert(N == 2 && "Invalid number of operands!");
2313 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2314 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2315 Inst.addOperand(MCOperand::createImm(IsSigned));
2316 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2317 }
2318
2319 template<int Shift>
2320 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2321 assert(N == 1 && "Invalid number of operands!");
2322
2323 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2324 if (CE) {
2325 uint64_t Value = CE->getValue();
2326 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2327 } else {
2328 addExpr(Inst, getImm());
2329 }
2330 }
2331
2332 template<int Shift>
2333 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2334 assert(N == 1 && "Invalid number of operands!");
2335
2336 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2337 uint64_t Value = CE->getValue();
2338 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2339 }
2340
2341 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2342 assert(N == 1 && "Invalid number of operands!");
2343 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2344 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2345 }
2346
2347 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2348 assert(N == 1 && "Invalid number of operands!");
2349 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2350 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2351 }
2352
2353 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2354
2355 static std::unique_ptr<AArch64Operand>
2356 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2357 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2358 Op->Tok.Data = Str.data();
2359 Op->Tok.Length = Str.size();
2360 Op->Tok.IsSuffix = IsSuffix;
2361 Op->StartLoc = S;
2362 Op->EndLoc = S;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2368 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2370 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2371 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2372 Op->Reg.Reg = Reg;
2373 Op->Reg.Kind = Kind;
2374 Op->Reg.ElementWidth = 0;
2375 Op->Reg.EqualityTy = EqTy;
2376 Op->Reg.ShiftExtend.Type = ExtTy;
2377 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2378 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2385 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2386 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2387 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2388 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2389 Kind == RegKind::SVEPredicateVector ||
2390 Kind == RegKind::SVEPredicateAsCounter) &&
2391 "Invalid vector kind");
2392 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2393 HasExplicitAmount);
2394 Op->Reg.ElementWidth = ElementWidth;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand>
2399 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2400 unsigned NumElements, unsigned ElementWidth,
2401 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2402 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2403 Op->VectorList.Reg = Reg;
2404 Op->VectorList.Count = Count;
2405 Op->VectorList.Stride = Stride;
2406 Op->VectorList.NumElements = NumElements;
2407 Op->VectorList.ElementWidth = ElementWidth;
2408 Op->VectorList.RegisterKind = RegisterKind;
2409 Op->StartLoc = S;
2410 Op->EndLoc = E;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand>
2415 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2417 Op->VectorIndex.Val = Idx;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand>
2424 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2425 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2426 Op->MatrixTileList.RegMask = RegMask;
2427 Op->StartLoc = S;
2428 Op->EndLoc = E;
2429 return Op;
2430 }
2431
2432 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2433 const unsigned ElementWidth) {
2434 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 RegMap = {
2436 {{0, AArch64::ZAB0},
2437 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2438 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2439 {{8, AArch64::ZAB0},
2440 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2441 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2442 {{16, AArch64::ZAH0},
2443 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2444 {{16, AArch64::ZAH1},
2445 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2446 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2447 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2448 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2449 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2450 };
2451
2452 if (ElementWidth == 64)
2453 OutRegs.insert(Reg);
2454 else {
2455 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2456 assert(!Regs.empty() && "Invalid tile or element width!");
2457 OutRegs.insert_range(Regs);
2458 }
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2462 SMLoc E, MCContext &Ctx) {
2463 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2464 Op->Imm.Val = Val;
2465 Op->StartLoc = S;
2466 Op->EndLoc = E;
2467 return Op;
2468 }
2469
2470 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2471 unsigned ShiftAmount,
2472 SMLoc S, SMLoc E,
2473 MCContext &Ctx) {
2474 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2475 Op->ShiftedImm .Val = Val;
2476 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2477 Op->StartLoc = S;
2478 Op->EndLoc = E;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2483 unsigned Last, SMLoc S,
2484 SMLoc E,
2485 MCContext &Ctx) {
2486 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2487 Op->ImmRange.First = First;
2488 Op->ImmRange.Last = Last;
2489 Op->EndLoc = E;
2490 return Op;
2491 }
2492
2493 static std::unique_ptr<AArch64Operand>
2494 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2495 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2496 Op->CondCode.Code = Code;
2497 Op->StartLoc = S;
2498 Op->EndLoc = E;
2499 return Op;
2500 }
2501
2502 static std::unique_ptr<AArch64Operand>
2503 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2504 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2505 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2506 Op->FPImm.IsExact = IsExact;
2507 Op->StartLoc = S;
2508 Op->EndLoc = S;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx,
2516 bool HasnXSModifier) {
2517 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2518 Op->Barrier.Val = Val;
2519 Op->Barrier.Data = Str.data();
2520 Op->Barrier.Length = Str.size();
2521 Op->Barrier.HasnXSModifier = HasnXSModifier;
2522 Op->StartLoc = S;
2523 Op->EndLoc = S;
2524 return Op;
2525 }
2526
2527 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2528 uint32_t MRSReg,
2529 uint32_t MSRReg,
2530 uint32_t PStateField,
2531 MCContext &Ctx) {
2532 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2533 Op->SysReg.Data = Str.data();
2534 Op->SysReg.Length = Str.size();
2535 Op->SysReg.MRSReg = MRSReg;
2536 Op->SysReg.MSRReg = MSRReg;
2537 Op->SysReg.PStateField = PStateField;
2538 Op->StartLoc = S;
2539 Op->EndLoc = S;
2540 return Op;
2541 }
2542
2543 static std::unique_ptr<AArch64Operand>
2544 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2545 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2546 Op->PHint.Val = Val;
2547 Op->PHint.Data = Str.data();
2548 Op->PHint.Length = Str.size();
2549 Op->StartLoc = S;
2550 Op->EndLoc = S;
2551 return Op;
2552 }
2553
2554 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2555 SMLoc E, MCContext &Ctx) {
2556 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2557 Op->SysCRImm.Val = Val;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2564 StringRef Str,
2565 SMLoc S,
2566 MCContext &Ctx) {
2567 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2568 Op->Prefetch.Val = Val;
2569 Op->Barrier.Data = Str.data();
2570 Op->Barrier.Length = Str.size();
2571 Op->StartLoc = S;
2572 Op->EndLoc = S;
2573 return Op;
2574 }
2575
2576 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2577 StringRef Str,
2578 SMLoc S,
2579 MCContext &Ctx) {
2580 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2581 Op->PSBHint.Val = Val;
2582 Op->PSBHint.Data = Str.data();
2583 Op->PSBHint.Length = Str.size();
2584 Op->StartLoc = S;
2585 Op->EndLoc = S;
2586 return Op;
2587 }
2588
2589 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2590 StringRef Str,
2591 SMLoc S,
2592 MCContext &Ctx) {
2593 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2594 Op->BTIHint.Val = Val | 32;
2595 Op->BTIHint.Data = Str.data();
2596 Op->BTIHint.Length = Str.size();
2597 Op->StartLoc = S;
2598 Op->EndLoc = S;
2599 return Op;
2600 }
2601
2602 static std::unique_ptr<AArch64Operand>
2603 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2604 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2605 Op->CMHPriorityHint.Val = Val;
2606 Op->CMHPriorityHint.Data = Str.data();
2607 Op->CMHPriorityHint.Length = Str.size();
2608 Op->StartLoc = S;
2609 Op->EndLoc = S;
2610 return Op;
2611 }
2612
2613 static std::unique_ptr<AArch64Operand>
2614 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2615 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2616 Op->TIndexHint.Val = Val;
2617 Op->TIndexHint.Data = Str.data();
2618 Op->TIndexHint.Length = Str.size();
2619 Op->StartLoc = S;
2620 Op->EndLoc = S;
2621 return Op;
2622 }
2623
2624 static std::unique_ptr<AArch64Operand>
2625 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2626 SMLoc S, SMLoc E, MCContext &Ctx) {
2627 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2628 Op->MatrixReg.Reg = Reg;
2629 Op->MatrixReg.ElementWidth = ElementWidth;
2630 Op->MatrixReg.Kind = Kind;
2631 Op->StartLoc = S;
2632 Op->EndLoc = E;
2633 return Op;
2634 }
2635
2636 static std::unique_ptr<AArch64Operand>
2637 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2638 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2639 Op->SVCR.PStateField = PStateField;
2640 Op->SVCR.Data = Str.data();
2641 Op->SVCR.Length = Str.size();
2642 Op->StartLoc = S;
2643 Op->EndLoc = S;
2644 return Op;
2645 }
2646
2647 static std::unique_ptr<AArch64Operand>
2648 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2649 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2650 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2651 Op->ShiftExtend.Type = ShOp;
2652 Op->ShiftExtend.Amount = Val;
2653 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2654 Op->StartLoc = S;
2655 Op->EndLoc = E;
2656 return Op;
2657 }
2658};
2659
2660} // end anonymous namespace.
2661
2662void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2663 switch (Kind) {
2664 case k_FPImm:
2665 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2666 if (!getFPImmIsExact())
2667 OS << " (inexact)";
2668 OS << ">";
2669 break;
2670 case k_Barrier: {
2671 StringRef Name = getBarrierName();
2672 if (!Name.empty())
2673 OS << "<barrier " << Name << ">";
2674 else
2675 OS << "<barrier invalid #" << getBarrier() << ">";
2676 break;
2677 }
2678 case k_Immediate:
2679 MAI.printExpr(OS, *getImm());
2680 break;
2681 case k_ShiftedImm: {
2682 unsigned Shift = getShiftedImmShift();
2683 OS << "<shiftedimm ";
2684 MAI.printExpr(OS, *getShiftedImmVal());
2685 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2686 break;
2687 }
2688 case k_ImmRange: {
2689 OS << "<immrange ";
2690 OS << getFirstImmVal();
2691 OS << ":" << getLastImmVal() << ">";
2692 break;
2693 }
2694 case k_CondCode:
2695 OS << "<condcode " << getCondCode() << ">";
2696 break;
2697 case k_VectorList: {
2698 OS << "<vectorlist ";
2699 MCRegister Reg = getVectorListStart();
2700 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2701 OS << Reg.id() + i * getVectorListStride() << " ";
2702 OS << ">";
2703 break;
2704 }
2705 case k_VectorIndex:
2706 OS << "<vectorindex " << getVectorIndex() << ">";
2707 break;
2708 case k_SysReg:
2709 OS << "<sysreg: " << getSysReg() << '>';
2710 break;
2711 case k_Token:
2712 OS << "'" << getToken() << "'";
2713 break;
2714 case k_SysCR:
2715 OS << "c" << getSysCR();
2716 break;
2717 case k_Prefetch: {
2718 StringRef Name = getPrefetchName();
2719 if (!Name.empty())
2720 OS << "<prfop " << Name << ">";
2721 else
2722 OS << "<prfop invalid #" << getPrefetch() << ">";
2723 break;
2724 }
2725 case k_PSBHint:
2726 OS << getPSBHintName();
2727 break;
2728 case k_PHint:
2729 OS << getPHintName();
2730 break;
2731 case k_BTIHint:
2732 OS << getBTIHintName();
2733 break;
2734 case k_CMHPriorityHint:
2735 OS << getCMHPriorityHintName();
2736 break;
2737 case k_TIndexHint:
2738 OS << getTIndexHintName();
2739 break;
2740 case k_MatrixRegister:
2741 OS << "<matrix " << getMatrixReg().id() << ">";
2742 break;
2743 case k_MatrixTileList: {
2744 OS << "<matrixlist ";
2745 unsigned RegMask = getMatrixTileListRegMask();
2746 unsigned MaxBits = 8;
2747 for (unsigned I = MaxBits; I > 0; --I)
2748 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2749 OS << '>';
2750 break;
2751 }
2752 case k_SVCR: {
2753 OS << getSVCR();
2754 break;
2755 }
2756 case k_Register:
2757 OS << "<register " << getReg().id() << ">";
2758 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2759 break;
2760 [[fallthrough]];
2761 case k_ShiftExtend:
2762 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2763 << getShiftExtendAmount();
2764 if (!hasShiftExtendAmount())
2765 OS << "<imp>";
2766 OS << '>';
2767 break;
2768 }
2769}
2770
2771/// @name Auto-generated Match Functions
2772/// {
2773
2775
2776/// }
2777
2778static unsigned MatchNeonVectorRegName(StringRef Name) {
2779 return StringSwitch<unsigned>(Name.lower())
2780 .Case("v0", AArch64::Q0)
2781 .Case("v1", AArch64::Q1)
2782 .Case("v2", AArch64::Q2)
2783 .Case("v3", AArch64::Q3)
2784 .Case("v4", AArch64::Q4)
2785 .Case("v5", AArch64::Q5)
2786 .Case("v6", AArch64::Q6)
2787 .Case("v7", AArch64::Q7)
2788 .Case("v8", AArch64::Q8)
2789 .Case("v9", AArch64::Q9)
2790 .Case("v10", AArch64::Q10)
2791 .Case("v11", AArch64::Q11)
2792 .Case("v12", AArch64::Q12)
2793 .Case("v13", AArch64::Q13)
2794 .Case("v14", AArch64::Q14)
2795 .Case("v15", AArch64::Q15)
2796 .Case("v16", AArch64::Q16)
2797 .Case("v17", AArch64::Q17)
2798 .Case("v18", AArch64::Q18)
2799 .Case("v19", AArch64::Q19)
2800 .Case("v20", AArch64::Q20)
2801 .Case("v21", AArch64::Q21)
2802 .Case("v22", AArch64::Q22)
2803 .Case("v23", AArch64::Q23)
2804 .Case("v24", AArch64::Q24)
2805 .Case("v25", AArch64::Q25)
2806 .Case("v26", AArch64::Q26)
2807 .Case("v27", AArch64::Q27)
2808 .Case("v28", AArch64::Q28)
2809 .Case("v29", AArch64::Q29)
2810 .Case("v30", AArch64::Q30)
2811 .Case("v31", AArch64::Q31)
2812 .Default(0);
2813}
2814
2815/// Returns an optional pair of (#elements, element-width) if Suffix
2816/// is a valid vector kind. Where the number of elements in a vector
2817/// or the vector width is implicit or explicitly unknown (but still a
2818/// valid suffix kind), 0 is used.
2819static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2820 RegKind VectorKind) {
2821 std::pair<int, int> Res = {-1, -1};
2822
2823 switch (VectorKind) {
2824 case RegKind::NeonVector:
2826 .Case("", {0, 0})
2827 .Case(".1d", {1, 64})
2828 .Case(".1q", {1, 128})
2829 // '.2h' needed for fp16 scalar pairwise reductions
2830 .Case(".2h", {2, 16})
2831 .Case(".2b", {2, 8})
2832 .Case(".2s", {2, 32})
2833 .Case(".2d", {2, 64})
2834 // '.4b' is another special case for the ARMv8.2a dot product
2835 // operand
2836 .Case(".4b", {4, 8})
2837 .Case(".4h", {4, 16})
2838 .Case(".4s", {4, 32})
2839 .Case(".8b", {8, 8})
2840 .Case(".8h", {8, 16})
2841 .Case(".16b", {16, 8})
2842 // Accept the width neutral ones, too, for verbose syntax. If
2843 // those aren't used in the right places, the token operand won't
2844 // match so all will work out.
2845 .Case(".b", {0, 8})
2846 .Case(".h", {0, 16})
2847 .Case(".s", {0, 32})
2848 .Case(".d", {0, 64})
2849 .Default({-1, -1});
2850 break;
2851 case RegKind::SVEPredicateAsCounter:
2852 case RegKind::SVEPredicateVector:
2853 case RegKind::SVEDataVector:
2854 case RegKind::Matrix:
2856 .Case("", {0, 0})
2857 .Case(".b", {0, 8})
2858 .Case(".h", {0, 16})
2859 .Case(".s", {0, 32})
2860 .Case(".d", {0, 64})
2861 .Case(".q", {0, 128})
2862 .Default({-1, -1});
2863 break;
2864 default:
2865 llvm_unreachable("Unsupported RegKind");
2866 }
2867
2868 if (Res == std::make_pair(-1, -1))
2869 return std::nullopt;
2870
2871 return std::optional<std::pair<int, int>>(Res);
2872}
2873
2874static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2875 return parseVectorKind(Suffix, VectorKind).has_value();
2876}
2877
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case("z0", AArch64::Z0)
2881 .Case("z1", AArch64::Z1)
2882 .Case("z2", AArch64::Z2)
2883 .Case("z3", AArch64::Z3)
2884 .Case("z4", AArch64::Z4)
2885 .Case("z5", AArch64::Z5)
2886 .Case("z6", AArch64::Z6)
2887 .Case("z7", AArch64::Z7)
2888 .Case("z8", AArch64::Z8)
2889 .Case("z9", AArch64::Z9)
2890 .Case("z10", AArch64::Z10)
2891 .Case("z11", AArch64::Z11)
2892 .Case("z12", AArch64::Z12)
2893 .Case("z13", AArch64::Z13)
2894 .Case("z14", AArch64::Z14)
2895 .Case("z15", AArch64::Z15)
2896 .Case("z16", AArch64::Z16)
2897 .Case("z17", AArch64::Z17)
2898 .Case("z18", AArch64::Z18)
2899 .Case("z19", AArch64::Z19)
2900 .Case("z20", AArch64::Z20)
2901 .Case("z21", AArch64::Z21)
2902 .Case("z22", AArch64::Z22)
2903 .Case("z23", AArch64::Z23)
2904 .Case("z24", AArch64::Z24)
2905 .Case("z25", AArch64::Z25)
2906 .Case("z26", AArch64::Z26)
2907 .Case("z27", AArch64::Z27)
2908 .Case("z28", AArch64::Z28)
2909 .Case("z29", AArch64::Z29)
2910 .Case("z30", AArch64::Z30)
2911 .Case("z31", AArch64::Z31)
2912 .Default(0);
2913}
2914
2916 return StringSwitch<unsigned>(Name.lower())
2917 .Case("p0", AArch64::P0)
2918 .Case("p1", AArch64::P1)
2919 .Case("p2", AArch64::P2)
2920 .Case("p3", AArch64::P3)
2921 .Case("p4", AArch64::P4)
2922 .Case("p5", AArch64::P5)
2923 .Case("p6", AArch64::P6)
2924 .Case("p7", AArch64::P7)
2925 .Case("p8", AArch64::P8)
2926 .Case("p9", AArch64::P9)
2927 .Case("p10", AArch64::P10)
2928 .Case("p11", AArch64::P11)
2929 .Case("p12", AArch64::P12)
2930 .Case("p13", AArch64::P13)
2931 .Case("p14", AArch64::P14)
2932 .Case("p15", AArch64::P15)
2933 .Default(0);
2934}
2935
2937 return StringSwitch<unsigned>(Name.lower())
2938 .Case("pn0", AArch64::PN0)
2939 .Case("pn1", AArch64::PN1)
2940 .Case("pn2", AArch64::PN2)
2941 .Case("pn3", AArch64::PN3)
2942 .Case("pn4", AArch64::PN4)
2943 .Case("pn5", AArch64::PN5)
2944 .Case("pn6", AArch64::PN6)
2945 .Case("pn7", AArch64::PN7)
2946 .Case("pn8", AArch64::PN8)
2947 .Case("pn9", AArch64::PN9)
2948 .Case("pn10", AArch64::PN10)
2949 .Case("pn11", AArch64::PN11)
2950 .Case("pn12", AArch64::PN12)
2951 .Case("pn13", AArch64::PN13)
2952 .Case("pn14", AArch64::PN14)
2953 .Case("pn15", AArch64::PN15)
2954 .Default(0);
2955}
2956
2958 return StringSwitch<unsigned>(Name.lower())
2959 .Case("za0.d", AArch64::ZAD0)
2960 .Case("za1.d", AArch64::ZAD1)
2961 .Case("za2.d", AArch64::ZAD2)
2962 .Case("za3.d", AArch64::ZAD3)
2963 .Case("za4.d", AArch64::ZAD4)
2964 .Case("za5.d", AArch64::ZAD5)
2965 .Case("za6.d", AArch64::ZAD6)
2966 .Case("za7.d", AArch64::ZAD7)
2967 .Case("za0.s", AArch64::ZAS0)
2968 .Case("za1.s", AArch64::ZAS1)
2969 .Case("za2.s", AArch64::ZAS2)
2970 .Case("za3.s", AArch64::ZAS3)
2971 .Case("za0.h", AArch64::ZAH0)
2972 .Case("za1.h", AArch64::ZAH1)
2973 .Case("za0.b", AArch64::ZAB0)
2974 .Default(0);
2975}
2976
2977static unsigned matchMatrixRegName(StringRef Name) {
2978 return StringSwitch<unsigned>(Name.lower())
2979 .Case("za", AArch64::ZA)
2980 .Case("za0.q", AArch64::ZAQ0)
2981 .Case("za1.q", AArch64::ZAQ1)
2982 .Case("za2.q", AArch64::ZAQ2)
2983 .Case("za3.q", AArch64::ZAQ3)
2984 .Case("za4.q", AArch64::ZAQ4)
2985 .Case("za5.q", AArch64::ZAQ5)
2986 .Case("za6.q", AArch64::ZAQ6)
2987 .Case("za7.q", AArch64::ZAQ7)
2988 .Case("za8.q", AArch64::ZAQ8)
2989 .Case("za9.q", AArch64::ZAQ9)
2990 .Case("za10.q", AArch64::ZAQ10)
2991 .Case("za11.q", AArch64::ZAQ11)
2992 .Case("za12.q", AArch64::ZAQ12)
2993 .Case("za13.q", AArch64::ZAQ13)
2994 .Case("za14.q", AArch64::ZAQ14)
2995 .Case("za15.q", AArch64::ZAQ15)
2996 .Case("za0.d", AArch64::ZAD0)
2997 .Case("za1.d", AArch64::ZAD1)
2998 .Case("za2.d", AArch64::ZAD2)
2999 .Case("za3.d", AArch64::ZAD3)
3000 .Case("za4.d", AArch64::ZAD4)
3001 .Case("za5.d", AArch64::ZAD5)
3002 .Case("za6.d", AArch64::ZAD6)
3003 .Case("za7.d", AArch64::ZAD7)
3004 .Case("za0.s", AArch64::ZAS0)
3005 .Case("za1.s", AArch64::ZAS1)
3006 .Case("za2.s", AArch64::ZAS2)
3007 .Case("za3.s", AArch64::ZAS3)
3008 .Case("za0.h", AArch64::ZAH0)
3009 .Case("za1.h", AArch64::ZAH1)
3010 .Case("za0.b", AArch64::ZAB0)
3011 .Case("za0h.q", AArch64::ZAQ0)
3012 .Case("za1h.q", AArch64::ZAQ1)
3013 .Case("za2h.q", AArch64::ZAQ2)
3014 .Case("za3h.q", AArch64::ZAQ3)
3015 .Case("za4h.q", AArch64::ZAQ4)
3016 .Case("za5h.q", AArch64::ZAQ5)
3017 .Case("za6h.q", AArch64::ZAQ6)
3018 .Case("za7h.q", AArch64::ZAQ7)
3019 .Case("za8h.q", AArch64::ZAQ8)
3020 .Case("za9h.q", AArch64::ZAQ9)
3021 .Case("za10h.q", AArch64::ZAQ10)
3022 .Case("za11h.q", AArch64::ZAQ11)
3023 .Case("za12h.q", AArch64::ZAQ12)
3024 .Case("za13h.q", AArch64::ZAQ13)
3025 .Case("za14h.q", AArch64::ZAQ14)
3026 .Case("za15h.q", AArch64::ZAQ15)
3027 .Case("za0h.d", AArch64::ZAD0)
3028 .Case("za1h.d", AArch64::ZAD1)
3029 .Case("za2h.d", AArch64::ZAD2)
3030 .Case("za3h.d", AArch64::ZAD3)
3031 .Case("za4h.d", AArch64::ZAD4)
3032 .Case("za5h.d", AArch64::ZAD5)
3033 .Case("za6h.d", AArch64::ZAD6)
3034 .Case("za7h.d", AArch64::ZAD7)
3035 .Case("za0h.s", AArch64::ZAS0)
3036 .Case("za1h.s", AArch64::ZAS1)
3037 .Case("za2h.s", AArch64::ZAS2)
3038 .Case("za3h.s", AArch64::ZAS3)
3039 .Case("za0h.h", AArch64::ZAH0)
3040 .Case("za1h.h", AArch64::ZAH1)
3041 .Case("za0h.b", AArch64::ZAB0)
3042 .Case("za0v.q", AArch64::ZAQ0)
3043 .Case("za1v.q", AArch64::ZAQ1)
3044 .Case("za2v.q", AArch64::ZAQ2)
3045 .Case("za3v.q", AArch64::ZAQ3)
3046 .Case("za4v.q", AArch64::ZAQ4)
3047 .Case("za5v.q", AArch64::ZAQ5)
3048 .Case("za6v.q", AArch64::ZAQ6)
3049 .Case("za7v.q", AArch64::ZAQ7)
3050 .Case("za8v.q", AArch64::ZAQ8)
3051 .Case("za9v.q", AArch64::ZAQ9)
3052 .Case("za10v.q", AArch64::ZAQ10)
3053 .Case("za11v.q", AArch64::ZAQ11)
3054 .Case("za12v.q", AArch64::ZAQ12)
3055 .Case("za13v.q", AArch64::ZAQ13)
3056 .Case("za14v.q", AArch64::ZAQ14)
3057 .Case("za15v.q", AArch64::ZAQ15)
3058 .Case("za0v.d", AArch64::ZAD0)
3059 .Case("za1v.d", AArch64::ZAD1)
3060 .Case("za2v.d", AArch64::ZAD2)
3061 .Case("za3v.d", AArch64::ZAD3)
3062 .Case("za4v.d", AArch64::ZAD4)
3063 .Case("za5v.d", AArch64::ZAD5)
3064 .Case("za6v.d", AArch64::ZAD6)
3065 .Case("za7v.d", AArch64::ZAD7)
3066 .Case("za0v.s", AArch64::ZAS0)
3067 .Case("za1v.s", AArch64::ZAS1)
3068 .Case("za2v.s", AArch64::ZAS2)
3069 .Case("za3v.s", AArch64::ZAS3)
3070 .Case("za0v.h", AArch64::ZAH0)
3071 .Case("za1v.h", AArch64::ZAH1)
3072 .Case("za0v.b", AArch64::ZAB0)
3073 .Default(0);
3074}
3075
3076bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3077 SMLoc &EndLoc) {
3078 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3079}
3080
3081ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3082 SMLoc &EndLoc) {
3083 StartLoc = getLoc();
3084 ParseStatus Res = tryParseScalarRegister(Reg);
3085 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3086 return Res;
3087}
3088
3089// Matches a register name or register alias previously defined by '.req'
3090MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 RegKind Kind) {
3092 MCRegister Reg = MCRegister();
3093 if ((Reg = matchSVEDataVectorRegName(Name)))
3094 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3095
3096 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3097 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3098
3100 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3101
3102 if ((Reg = MatchNeonVectorRegName(Name)))
3103 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3104
3105 if ((Reg = matchMatrixRegName(Name)))
3106 return Kind == RegKind::Matrix ? Reg : MCRegister();
3107
3108 if (Name.equals_insensitive("zt0"))
3109 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3110
3111 // The parsed register must be of RegKind Scalar
3112 if ((Reg = MatchRegisterName(Name)))
3113 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3114
3115 if (!Reg) {
3116 // Handle a few common aliases of registers.
3117 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3118 .Case("fp", AArch64::FP)
3119 .Case("lr", AArch64::LR)
3120 .Case("x31", AArch64::XZR)
3121 .Case("w31", AArch64::WZR)
3122 .Default(0))
3123 return Kind == RegKind::Scalar ? Reg : MCRegister();
3124
3125 // Check for aliases registered via .req. Canonicalize to lower case.
3126 // That's more consistent since register names are case insensitive, and
3127 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3128 auto Entry = RegisterReqs.find(Name.lower());
3129 if (Entry == RegisterReqs.end())
3130 return MCRegister();
3131
3132 // set Reg if the match is the right kind of register
3133 if (Kind == Entry->getValue().first)
3134 Reg = Entry->getValue().second;
3135 }
3136 return Reg;
3137}
3138
3139unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 switch (K) {
3141 case RegKind::Scalar:
3142 case RegKind::NeonVector:
3143 case RegKind::SVEDataVector:
3144 return 32;
3145 case RegKind::Matrix:
3146 case RegKind::SVEPredicateVector:
3147 case RegKind::SVEPredicateAsCounter:
3148 return 16;
3149 case RegKind::LookupTable:
3150 return 1;
3151 }
3152 llvm_unreachable("Unsupported RegKind");
3153}
3154
3155/// tryParseScalarRegister - Try to parse a register name. The token must be an
3156/// Identifier when called, and if it is a register name the token is eaten and
3157/// the register is added to the operand list.
3158ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3159 const AsmToken &Tok = getTok();
3160 if (Tok.isNot(AsmToken::Identifier))
3161 return ParseStatus::NoMatch;
3162
3163 std::string lowerCase = Tok.getString().lower();
3164 MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3165 if (!Reg)
3166 return ParseStatus::NoMatch;
3167
3168 RegNum = Reg;
3169 Lex(); // Eat identifier token.
3170 return ParseStatus::Success;
3171}
3172
3173/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3174ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3175 SMLoc S = getLoc();
3176
3177 if (getTok().isNot(AsmToken::Identifier))
3178 return Error(S, "Expected cN operand where 0 <= N <= 15");
3179
3180 StringRef Tok = getTok().getIdentifier();
3181 if (Tok[0] != 'c' && Tok[0] != 'C')
3182 return Error(S, "Expected cN operand where 0 <= N <= 15");
3183
3184 uint32_t CRNum;
3185 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3186 if (BadNum || CRNum > 15)
3187 return Error(S, "Expected cN operand where 0 <= N <= 15");
3188
3189 Lex(); // Eat identifier token.
3190 Operands.push_back(
3191 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3192 return ParseStatus::Success;
3193}
3194
3195// Either an identifier for named values or a 6-bit immediate.
3196ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3197 SMLoc S = getLoc();
3198 const AsmToken &Tok = getTok();
3199
3200 unsigned MaxVal = 63;
3201
3202 // Immediate case, with optional leading hash:
3203 if (parseOptionalToken(AsmToken::Hash) ||
3204 Tok.is(AsmToken::Integer)) {
3205 const MCExpr *ImmVal;
3206 if (getParser().parseExpression(ImmVal))
3207 return ParseStatus::Failure;
3208
3209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3210 if (!MCE)
3211 return TokError("immediate value expected for prefetch operand");
3212 unsigned prfop = MCE->getValue();
3213 if (prfop > MaxVal)
3214 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3215 "] expected");
3216
3217 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3218 Operands.push_back(AArch64Operand::CreatePrefetch(
3219 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3220 return ParseStatus::Success;
3221 }
3222
3223 if (Tok.isNot(AsmToken::Identifier))
3224 return TokError("prefetch hint expected");
3225
3226 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3227 if (!RPRFM)
3228 return TokError("prefetch hint expected");
3229
3230 Operands.push_back(AArch64Operand::CreatePrefetch(
3231 RPRFM->Encoding, Tok.getString(), S, getContext()));
3232 Lex(); // Eat identifier token.
3233 return ParseStatus::Success;
3234}
3235
3236/// tryParsePrefetch - Try to parse a prefetch operand.
3237template <bool IsSVEPrefetch>
3238ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3239 SMLoc S = getLoc();
3240 const AsmToken &Tok = getTok();
3241
3242 auto LookupByName = [](StringRef N) {
3243 if (IsSVEPrefetch) {
3244 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3245 return std::optional<unsigned>(Res->Encoding);
3246 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3247 return std::optional<unsigned>(Res->Encoding);
3248 return std::optional<unsigned>();
3249 };
3250
3251 auto LookupByEncoding = [](unsigned E) {
3252 if (IsSVEPrefetch) {
3253 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3254 return std::optional<StringRef>(Res->Name);
3255 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3256 return std::optional<StringRef>(Res->Name);
3257 return std::optional<StringRef>();
3258 };
3259 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3260
3261 // Either an identifier for named values or a 5-bit immediate.
3262 // Eat optional hash.
3263 if (parseOptionalToken(AsmToken::Hash) ||
3264 Tok.is(AsmToken::Integer)) {
3265 const MCExpr *ImmVal;
3266 if (getParser().parseExpression(ImmVal))
3267 return ParseStatus::Failure;
3268
3269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3270 if (!MCE)
3271 return TokError("immediate value expected for prefetch operand");
3272 unsigned prfop = MCE->getValue();
3273 if (prfop > MaxVal)
3274 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3275 "] expected");
3276
3277 auto PRFM = LookupByEncoding(MCE->getValue());
3278 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3279 S, getContext()));
3280 return ParseStatus::Success;
3281 }
3282
3283 if (Tok.isNot(AsmToken::Identifier))
3284 return TokError("prefetch hint expected");
3285
3286 auto PRFM = LookupByName(Tok.getString());
3287 if (!PRFM)
3288 return TokError("prefetch hint expected");
3289
3290 Operands.push_back(AArch64Operand::CreatePrefetch(
3291 *PRFM, Tok.getString(), S, getContext()));
3292 Lex(); // Eat identifier token.
3293 return ParseStatus::Success;
3294}
3295
3296/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3297ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3298 SMLoc S = getLoc();
3299 const AsmToken &Tok = getTok();
3300 if (Tok.isNot(AsmToken::Identifier))
3301 return TokError("invalid operand for instruction");
3302
3303 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3304 if (!PSB)
3305 return TokError("invalid operand for instruction");
3306
3307 Operands.push_back(AArch64Operand::CreatePSBHint(
3308 PSB->Encoding, Tok.getString(), S, getContext()));
3309 Lex(); // Eat identifier token.
3310 return ParseStatus::Success;
3311}
3312
3313ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3314 SMLoc StartLoc = getLoc();
3315
3316 MCRegister RegNum;
3317
3318 // The case where xzr, xzr is not present is handled by an InstAlias.
3319
3320 auto RegTok = getTok(); // in case we need to backtrack
3321 if (!tryParseScalarRegister(RegNum).isSuccess())
3322 return ParseStatus::NoMatch;
3323
3324 if (RegNum != AArch64::XZR) {
3325 getLexer().UnLex(RegTok);
3326 return ParseStatus::NoMatch;
3327 }
3328
3329 if (parseComma())
3330 return ParseStatus::Failure;
3331
3332 if (!tryParseScalarRegister(RegNum).isSuccess())
3333 return TokError("expected register operand");
3334
3335 if (RegNum != AArch64::XZR)
3336 return TokError("xzr must be followed by xzr");
3337
3338 // We need to push something, since we claim this is an operand in .td.
3339 // See also AArch64AsmParser::parseKeywordOperand.
3340 Operands.push_back(AArch64Operand::CreateReg(
3341 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3342
3343 return ParseStatus::Success;
3344}
3345
3346/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3347ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3348 SMLoc S = getLoc();
3349 const AsmToken &Tok = getTok();
3350 if (Tok.isNot(AsmToken::Identifier))
3351 return TokError("invalid operand for instruction");
3352
3353 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3354 if (!BTI)
3355 return TokError("invalid operand for instruction");
3356
3357 Operands.push_back(AArch64Operand::CreateBTIHint(
3358 BTI->Encoding, Tok.getString(), S, getContext()));
3359 Lex(); // Eat identifier token.
3360 return ParseStatus::Success;
3361}
3362
3363/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3364ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3365 SMLoc S = getLoc();
3366 const AsmToken &Tok = getTok();
3367 if (Tok.isNot(AsmToken::Identifier))
3368 return TokError("invalid operand for instruction");
3369
3370 auto CMHPriority =
3371 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3372 if (!CMHPriority)
3373 return TokError("invalid operand for instruction");
3374
3375 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3376 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3377 Lex(); // Eat identifier token.
3378 return ParseStatus::Success;
3379}
3380
3381/// tryParseTIndexHint - Try to parse a TIndex operand
3382ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384 const AsmToken &Tok = getTok();
3385 if (Tok.isNot(AsmToken::Identifier))
3386 return TokError("invalid operand for instruction");
3387
3388 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.getString());
3389 if (!TIndex)
3390 return TokError("invalid operand for instruction");
3391
3392 Operands.push_back(AArch64Operand::CreateTIndexHint(
3393 TIndex->Encoding, Tok.getString(), S, getContext()));
3394 Lex(); // Eat identifier token.
3395 return ParseStatus::Success;
3396}
3397
3398/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3399/// instruction.
3400ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3401 SMLoc S = getLoc();
3402 const MCExpr *Expr = nullptr;
3403
3404 if (getTok().is(AsmToken::Hash)) {
3405 Lex(); // Eat hash token.
3406 }
3407
3408 if (parseSymbolicImmVal(Expr))
3409 return ParseStatus::Failure;
3410
3411 AArch64::Specifier ELFSpec;
3412 AArch64::Specifier DarwinSpec;
3413 int64_t Addend;
3414 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3415 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3416 // No modifier was specified at all; this is the syntax for an ELF basic
3417 // ADRP relocation (unfortunately).
3418 Expr =
3420 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3421 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3422 Addend != 0) {
3423 return Error(S, "gotpage label reference not allowed an addend");
3424 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3425 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3426 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3427 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3428 ELFSpec != AArch64::S_GOT_PAGE &&
3429 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3430 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3431 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3433 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3434 // The operand must be an @page or @gotpage qualified symbolref.
3435 return Error(S, "page or gotpage label reference expected");
3436 }
3437 }
3438
3439 // We have either a label reference possibly with addend or an immediate. The
3440 // addend is a raw value here. The linker will adjust it to only reference the
3441 // page.
3442 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3443 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3444
3445 return ParseStatus::Success;
3446}
3447
3448/// tryParseAdrLabel - Parse and validate a source label for the ADR
3449/// instruction.
3450ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3451 SMLoc S = getLoc();
3452 const MCExpr *Expr = nullptr;
3453
3454 // Leave anything with a bracket to the default for SVE
3455 if (getTok().is(AsmToken::LBrac))
3456 return ParseStatus::NoMatch;
3457
3458 if (getTok().is(AsmToken::Hash))
3459 Lex(); // Eat hash token.
3460
3461 if (parseSymbolicImmVal(Expr))
3462 return ParseStatus::Failure;
3463
3464 AArch64::Specifier ELFSpec;
3465 AArch64::Specifier DarwinSpec;
3466 int64_t Addend;
3467 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3468 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3469 // No modifier was specified at all; this is the syntax for an ELF basic
3470 // ADR relocation (unfortunately).
3472 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3473 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3474 // adr. It's not actually GOT entry page address but the GOT address
3475 // itself - we just share the same variant kind with :got_auth: operator
3476 // applied for adrp.
3477 // TODO: can we somehow get current TargetMachine object to call
3478 // getCodeModel() on it to ensure we are using tiny code model?
3479 return Error(S, "unexpected adr label");
3480 }
3481 }
3482
3483 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3484 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// tryParseFPImm - A floating point immediate expression operand.
3489template <bool AddFPZeroAsLiteral>
3490ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3491 SMLoc S = getLoc();
3492
3493 bool Hash = parseOptionalToken(AsmToken::Hash);
3494
3495 // Handle negation, as that still comes through as a separate token.
3496 bool isNegative = parseOptionalToken(AsmToken::Minus);
3497
3498 const AsmToken &Tok = getTok();
3499 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3500 if (!Hash)
3501 return ParseStatus::NoMatch;
3502 return TokError("invalid floating point immediate");
3503 }
3504
3505 // Parse hexadecimal representation.
3506 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3507 if (Tok.getIntVal() > 255 || isNegative)
3508 return TokError("encoded floating point value out of range");
3509
3511 Operands.push_back(
3512 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3513 } else {
3514 // Parse FP representation.
3515 APFloat RealVal(APFloat::IEEEdouble());
3516 auto StatusOrErr =
3517 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3518 if (errorToBool(StatusOrErr.takeError()))
3519 return TokError("invalid floating point representation");
3520
3521 if (isNegative)
3522 RealVal.changeSign();
3523
3524 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3525 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3526 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3527 } else
3528 Operands.push_back(AArch64Operand::CreateFPImm(
3529 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3530 }
3531
3532 Lex(); // Eat the token.
3533
3534 return ParseStatus::Success;
3535}
3536
3537/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3538/// a shift suffix, for example '#1, lsl #12'.
3539ParseStatus
3540AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3541 SMLoc S = getLoc();
3542
3543 if (getTok().is(AsmToken::Hash))
3544 Lex(); // Eat '#'
3545 else if (getTok().isNot(AsmToken::Integer))
3546 // Operand should start from # or should be integer, emit error otherwise.
3547 return ParseStatus::NoMatch;
3548
3549 if (getTok().is(AsmToken::Integer) &&
3550 getLexer().peekTok().is(AsmToken::Colon))
3551 return tryParseImmRange(Operands);
3552
3553 const MCExpr *Imm = nullptr;
3554 if (parseSymbolicImmVal(Imm))
3555 return ParseStatus::Failure;
3556 else if (getTok().isNot(AsmToken::Comma)) {
3557 Operands.push_back(
3558 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3559 return ParseStatus::Success;
3560 }
3561
3562 // Eat ','
3563 Lex();
3564 StringRef VecGroup;
3565 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 Operands.push_back(
3567 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3568 Operands.push_back(
3569 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3570 return ParseStatus::Success;
3571 }
3572
3573 // The optional operand must be "lsl #N" where N is non-negative.
3574 if (!getTok().is(AsmToken::Identifier) ||
3575 !getTok().getIdentifier().equals_insensitive("lsl"))
3576 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3577
3578 // Eat 'lsl'
3579 Lex();
3580
3581 parseOptionalToken(AsmToken::Hash);
3582
3583 if (getTok().isNot(AsmToken::Integer))
3584 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3585
3586 int64_t ShiftAmount = getTok().getIntVal();
3587
3588 if (ShiftAmount < 0)
3589 return Error(getLoc(), "positive shift amount required");
3590 Lex(); // Eat the number
3591
3592 // Just in case the optional lsl #0 is used for immediates other than zero.
3593 if (ShiftAmount == 0 && Imm != nullptr) {
3594 Operands.push_back(
3595 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3596 return ParseStatus::Success;
3597 }
3598
3599 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3600 getLoc(), getContext()));
3601 return ParseStatus::Success;
3602}
3603
3604/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3605/// suggestion to help common typos.
3607AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3608 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3609 .Case("eq", AArch64CC::EQ)
3610 .Case("ne", AArch64CC::NE)
3611 .Case("cs", AArch64CC::HS)
3612 .Case("hs", AArch64CC::HS)
3613 .Case("cc", AArch64CC::LO)
3614 .Case("lo", AArch64CC::LO)
3615 .Case("mi", AArch64CC::MI)
3616 .Case("pl", AArch64CC::PL)
3617 .Case("vs", AArch64CC::VS)
3618 .Case("vc", AArch64CC::VC)
3619 .Case("hi", AArch64CC::HI)
3620 .Case("ls", AArch64CC::LS)
3621 .Case("ge", AArch64CC::GE)
3622 .Case("lt", AArch64CC::LT)
3623 .Case("gt", AArch64CC::GT)
3624 .Case("le", AArch64CC::LE)
3625 .Case("al", AArch64CC::AL)
3626 .Case("nv", AArch64CC::NV)
3627 // SVE condition code aliases:
3628 .Case("none", AArch64CC::EQ)
3629 .Case("any", AArch64CC::NE)
3630 .Case("nlast", AArch64CC::HS)
3631 .Case("last", AArch64CC::LO)
3632 .Case("first", AArch64CC::MI)
3633 .Case("nfrst", AArch64CC::PL)
3634 .Case("pmore", AArch64CC::HI)
3635 .Case("plast", AArch64CC::LS)
3636 .Case("tcont", AArch64CC::GE)
3637 .Case("tstop", AArch64CC::LT)
3638 .Default(AArch64CC::Invalid);
3639
3640 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3641 Suggestion = "nfrst";
3642
3643 return CC;
3644}
3645
3646/// parseCondCode - Parse a Condition Code operand.
3647bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3648 bool invertCondCode) {
3649 SMLoc S = getLoc();
3650 const AsmToken &Tok = getTok();
3651 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3652
3653 StringRef Cond = Tok.getString();
3654 std::string Suggestion;
3655 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3656 if (CC == AArch64CC::Invalid) {
3657 std::string Msg = "invalid condition code";
3658 if (!Suggestion.empty())
3659 Msg += ", did you mean " + Suggestion + "?";
3660 return TokError(Msg);
3661 }
3662 Lex(); // Eat identifier token.
3663
3664 if (invertCondCode) {
3665 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3666 return TokError("condition codes AL and NV are invalid for this instruction");
3668 }
3669
3670 Operands.push_back(
3671 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3672 return false;
3673}
3674
3675ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3676 const AsmToken &Tok = getTok();
3677 SMLoc S = getLoc();
3678
3679 if (Tok.isNot(AsmToken::Identifier))
3680 return TokError("invalid operand for instruction");
3681
3682 unsigned PStateImm = -1;
3683 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3684 if (!SVCR)
3685 return ParseStatus::NoMatch;
3686 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3687 PStateImm = SVCR->Encoding;
3688
3689 Operands.push_back(
3690 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3691 Lex(); // Eat identifier token.
3692 return ParseStatus::Success;
3693}
3694
3695ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3696 const AsmToken &Tok = getTok();
3697 SMLoc S = getLoc();
3698
3699 StringRef Name = Tok.getString();
3700
3701 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3702 Lex(); // eat "za[.(b|h|s|d)]"
3703 unsigned ElementWidth = 0;
3704 auto DotPosition = Name.find('.');
3705 if (DotPosition != StringRef::npos) {
3706 const auto &KindRes =
3707 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3708 if (!KindRes)
3709 return TokError(
3710 "Expected the register to be followed by element width suffix");
3711 ElementWidth = KindRes->second;
3712 }
3713 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3714 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3715 getContext()));
3716 if (getLexer().is(AsmToken::LBrac)) {
3717 // There's no comma after matrix operand, so we can parse the next operand
3718 // immediately.
3719 if (parseOperand(Operands, false, false))
3720 return ParseStatus::NoMatch;
3721 }
3722 return ParseStatus::Success;
3723 }
3724
3725 // Try to parse matrix register.
3726 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3727 if (!Reg)
3728 return ParseStatus::NoMatch;
3729
3730 size_t DotPosition = Name.find('.');
3731 assert(DotPosition != StringRef::npos && "Unexpected register");
3732
3733 StringRef Head = Name.take_front(DotPosition);
3734 StringRef Tail = Name.drop_front(DotPosition);
3735 StringRef RowOrColumn = Head.take_back();
3736
3737 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3738 .Case("h", MatrixKind::Row)
3739 .Case("v", MatrixKind::Col)
3740 .Default(MatrixKind::Tile);
3741
3742 // Next up, parsing the suffix
3743 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3744 if (!KindRes)
3745 return TokError(
3746 "Expected the register to be followed by element width suffix");
3747 unsigned ElementWidth = KindRes->second;
3748
3749 Lex();
3750
3751 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3752 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3753
3754 if (getLexer().is(AsmToken::LBrac)) {
3755 // There's no comma after matrix operand, so we can parse the next operand
3756 // immediately.
3757 if (parseOperand(Operands, false, false))
3758 return ParseStatus::NoMatch;
3759 }
3760 return ParseStatus::Success;
3761}
3762
3763/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3764/// them if present.
3765ParseStatus
3766AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3767 const AsmToken &Tok = getTok();
3768 std::string LowerID = Tok.getString().lower();
3770 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3771 .Case("lsl", AArch64_AM::LSL)
3772 .Case("lsr", AArch64_AM::LSR)
3773 .Case("asr", AArch64_AM::ASR)
3774 .Case("ror", AArch64_AM::ROR)
3775 .Case("msl", AArch64_AM::MSL)
3776 .Case("uxtb", AArch64_AM::UXTB)
3777 .Case("uxth", AArch64_AM::UXTH)
3778 .Case("uxtw", AArch64_AM::UXTW)
3779 .Case("uxtx", AArch64_AM::UXTX)
3780 .Case("sxtb", AArch64_AM::SXTB)
3781 .Case("sxth", AArch64_AM::SXTH)
3782 .Case("sxtw", AArch64_AM::SXTW)
3783 .Case("sxtx", AArch64_AM::SXTX)
3785
3787 return ParseStatus::NoMatch;
3788
3789 SMLoc S = Tok.getLoc();
3790 Lex();
3791
3792 bool Hash = parseOptionalToken(AsmToken::Hash);
3793
3794 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3795 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3796 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3797 ShOp == AArch64_AM::MSL) {
3798 // We expect a number here.
3799 return TokError("expected #imm after shift specifier");
3800 }
3801
3802 // "extend" type operations don't need an immediate, #0 is implicit.
3803 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3804 Operands.push_back(
3805 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3806 return ParseStatus::Success;
3807 }
3808
3809 // Make sure we do actually have a number, identifier or a parenthesized
3810 // expression.
3811 SMLoc E = getLoc();
3812 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3813 !getTok().is(AsmToken::Identifier))
3814 return Error(E, "expected integer shift amount");
3815
3816 const MCExpr *ImmVal;
3817 if (getParser().parseExpression(ImmVal))
3818 return ParseStatus::Failure;
3819
3820 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3821 if (!MCE)
3822 return Error(E, "expected constant '#imm' after shift specifier");
3823
3824 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3825 Operands.push_back(AArch64Operand::CreateShiftExtend(
3826 ShOp, MCE->getValue(), true, S, E, getContext()));
3827 return ParseStatus::Success;
3828}
3829
3830static const struct Extension {
3831 const char *Name;
3833} ExtensionMap[] = {
3834 {"crc", {AArch64::FeatureCRC}},
3835 {"sm4", {AArch64::FeatureSM4}},
3836 {"sha3", {AArch64::FeatureSHA3}},
3837 {"sha2", {AArch64::FeatureSHA2}},
3838 {"aes", {AArch64::FeatureAES}},
3839 {"crypto", {AArch64::FeatureCrypto}},
3840 {"fp", {AArch64::FeatureFPARMv8}},
3841 {"simd", {AArch64::FeatureNEON}},
3842 {"ras", {AArch64::FeatureRAS}},
3843 {"rasv2", {AArch64::FeatureRASv2}},
3844 {"lse", {AArch64::FeatureLSE}},
3845 {"predres", {AArch64::FeaturePredRes}},
3846 {"predres2", {AArch64::FeatureSPECRES2}},
3847 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3848 {"mte", {AArch64::FeatureMTE}},
3849 {"memtag", {AArch64::FeatureMTE}},
3850 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3851 {"pan", {AArch64::FeaturePAN}},
3852 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3853 {"ccpp", {AArch64::FeatureCCPP}},
3854 {"rcpc", {AArch64::FeatureRCPC}},
3855 {"rng", {AArch64::FeatureRandGen}},
3856 {"sve", {AArch64::FeatureSVE}},
3857 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3858 {"sve2", {AArch64::FeatureSVE2}},
3859 {"sve-aes", {AArch64::FeatureSVEAES}},
3860 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3861 {"sve-sm4", {AArch64::FeatureSVESM4}},
3862 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3863 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3864 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3865 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3866 {"sve2-bitperm",
3867 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3868 AArch64::FeatureSVE2}},
3869 {"sve2p1", {AArch64::FeatureSVE2p1}},
3870 {"ls64", {AArch64::FeatureLS64}},
3871 {"xs", {AArch64::FeatureXS}},
3872 {"pauth", {AArch64::FeaturePAuth}},
3873 {"flagm", {AArch64::FeatureFlagM}},
3874 {"rme", {AArch64::FeatureRME}},
3875 {"sme", {AArch64::FeatureSME}},
3876 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3877 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3878 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3879 {"sme2", {AArch64::FeatureSME2}},
3880 {"sme2p1", {AArch64::FeatureSME2p1}},
3881 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3882 {"hbc", {AArch64::FeatureHBC}},
3883 {"mops", {AArch64::FeatureMOPS}},
3884 {"mec", {AArch64::FeatureMEC}},
3885 {"the", {AArch64::FeatureTHE}},
3886 {"d128", {AArch64::FeatureD128}},
3887 {"lse128", {AArch64::FeatureLSE128}},
3888 {"ite", {AArch64::FeatureITE}},
3889 {"cssc", {AArch64::FeatureCSSC}},
3890 {"rcpc3", {AArch64::FeatureRCPC3}},
3891 {"gcs", {AArch64::FeatureGCS}},
3892 {"bf16", {AArch64::FeatureBF16}},
3893 {"compnum", {AArch64::FeatureComplxNum}},
3894 {"dotprod", {AArch64::FeatureDotProd}},
3895 {"f32mm", {AArch64::FeatureMatMulFP32}},
3896 {"f64mm", {AArch64::FeatureMatMulFP64}},
3897 {"fp16", {AArch64::FeatureFullFP16}},
3898 {"fp16fml", {AArch64::FeatureFP16FML}},
3899 {"i8mm", {AArch64::FeatureMatMulInt8}},
3900 {"lor", {AArch64::FeatureLOR}},
3901 {"profile", {AArch64::FeatureSPE}},
3902 // "rdma" is the name documented by binutils for the feature, but
3903 // binutils also accepts incomplete prefixes of features, so "rdm"
3904 // works too. Support both spellings here.
3905 {"rdm", {AArch64::FeatureRDM}},
3906 {"rdma", {AArch64::FeatureRDM}},
3907 {"sb", {AArch64::FeatureSB}},
3908 {"ssbs", {AArch64::FeatureSSBS}},
3909 {"fp8", {AArch64::FeatureFP8}},
3910 {"faminmax", {AArch64::FeatureFAMINMAX}},
3911 {"fp8fma", {AArch64::FeatureFP8FMA}},
3912 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3913 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3914 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3915 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3916 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3917 {"lut", {AArch64::FeatureLUT}},
3918 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3919 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3920 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3921 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3922 {"cpa", {AArch64::FeatureCPA}},
3923 {"tlbiw", {AArch64::FeatureTLBIW}},
3924 {"pops", {AArch64::FeaturePoPS}},
3925 {"cmpbr", {AArch64::FeatureCMPBR}},
3926 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3927 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3928 {"fprcvt", {AArch64::FeatureFPRCVT}},
3929 {"lsfe", {AArch64::FeatureLSFE}},
3930 {"sme2p2", {AArch64::FeatureSME2p2}},
3931 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3932 {"sve2p2", {AArch64::FeatureSVE2p2}},
3933 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3934 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3935 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3936 {"lsui", {AArch64::FeatureLSUI}},
3937 {"occmo", {AArch64::FeatureOCCMO}},
3938 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {"lscp", {AArch64::FeatureLSCP}},
3942 {"tlbid", {AArch64::FeatureTLBID}},
3943 {"mpamv2", {AArch64::FeatureMPAMv2}},
3944 {"mtetc", {AArch64::FeatureMTETC}},
3945 {"gcie", {AArch64::FeatureGCIE}},
3946 {"sme2p3", {AArch64::FeatureSME2p3}},
3947 {"sve2p3", {AArch64::FeatureSVE2p3}},
3948 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3949 {"f16mm", {AArch64::FeatureF16MM}},
3950 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3951 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3952 {"mops-go", {AArch64::FeatureMOPS_GO}},
3953 {"poe2", {AArch64::FeatureS1POE2}},
3954 {"tev", {AArch64::FeatureTEV}},
3955 {"btie", {AArch64::FeatureBTIE}},
3956 {"dit", {AArch64::FeatureDIT}},
3957 {"brbe", {AArch64::FeatureBRBE}},
3958 {"bti", {AArch64::FeatureBranchTargetId}},
3959 {"fcma", {AArch64::FeatureComplxNum}},
3960 {"jscvt", {AArch64::FeatureJS}},
3961 {"pauth-lr", {AArch64::FeaturePAuthLR}},
3962 {"ssve-fexpa", {AArch64::FeatureSSVE_FEXPA}},
3963 {"wfxt", {AArch64::FeatureWFxT}},
3965
3966static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3967 if (FBS[AArch64::HasV8_0aOps])
3968 Str += "ARMv8a";
3969 if (FBS[AArch64::HasV8_1aOps])
3970 Str += "ARMv8.1a";
3971 else if (FBS[AArch64::HasV8_2aOps])
3972 Str += "ARMv8.2a";
3973 else if (FBS[AArch64::HasV8_3aOps])
3974 Str += "ARMv8.3a";
3975 else if (FBS[AArch64::HasV8_4aOps])
3976 Str += "ARMv8.4a";
3977 else if (FBS[AArch64::HasV8_5aOps])
3978 Str += "ARMv8.5a";
3979 else if (FBS[AArch64::HasV8_6aOps])
3980 Str += "ARMv8.6a";
3981 else if (FBS[AArch64::HasV8_7aOps])
3982 Str += "ARMv8.7a";
3983 else if (FBS[AArch64::HasV8_8aOps])
3984 Str += "ARMv8.8a";
3985 else if (FBS[AArch64::HasV8_9aOps])
3986 Str += "ARMv8.9a";
3987 else if (FBS[AArch64::HasV9_0aOps])
3988 Str += "ARMv9-a";
3989 else if (FBS[AArch64::HasV9_1aOps])
3990 Str += "ARMv9.1a";
3991 else if (FBS[AArch64::HasV9_2aOps])
3992 Str += "ARMv9.2a";
3993 else if (FBS[AArch64::HasV9_3aOps])
3994 Str += "ARMv9.3a";
3995 else if (FBS[AArch64::HasV9_4aOps])
3996 Str += "ARMv9.4a";
3997 else if (FBS[AArch64::HasV9_5aOps])
3998 Str += "ARMv9.5a";
3999 else if (FBS[AArch64::HasV9_6aOps])
4000 Str += "ARMv9.6a";
4001 else if (FBS[AArch64::HasV9_7aOps])
4002 Str += "ARMv9.7a";
4003 else if (FBS[AArch64::HasV8_0rOps])
4004 Str += "ARMv8r";
4005 else {
4006 SmallVector<std::string, 2> ExtMatches;
4007 for (const auto& Ext : ExtensionMap) {
4008 // Use & in case multiple features are enabled
4009 if ((FBS & Ext.Features) != FeatureBitset())
4010 ExtMatches.push_back(Ext.Name);
4011 }
4012 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
4013 }
4014}
4015
4016void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4017 SMLoc S) {
4018 const uint16_t Op2 = Encoding & 7;
4019 const uint16_t Cm = (Encoding & 0x78) >> 3;
4020 const uint16_t Cn = (Encoding & 0x780) >> 7;
4021 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4022
4023 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
4024
4025 Operands.push_back(
4026 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4027 Operands.push_back(
4028 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
4029 Operands.push_back(
4030 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
4031 Expr = MCConstantExpr::create(Op2, getContext());
4032 Operands.push_back(
4033 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4034}
4035
4036/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4037/// are simple aliases for the SYS instruction. Parse them specially so that
4038/// we create a SYS MCInst.
4039bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4040 OperandVector &Operands) {
4041 if (Name.contains('.'))
4042 return TokError("invalid operand");
4043
4044 Mnemonic = Name;
4045 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
4046
4047 const AsmToken &Tok = getTok();
4048 StringRef Op = Tok.getString();
4049 SMLoc S = Tok.getLoc();
4050 bool ExpectRegister = true;
4051 bool OptionalRegister = false;
4052 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4053 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4054
4055 if (Mnemonic == "ic") {
4056 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
4057 if (!IC)
4058 return TokError("invalid operand for IC instruction");
4059 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
4060 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4062 return TokError(Str);
4063 }
4064 ExpectRegister = IC->NeedsReg;
4065 createSysAlias(IC->Encoding, Operands, S);
4066 } else if (Mnemonic == "dc") {
4067 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4068 if (!DC)
4069 return TokError("invalid operand for DC instruction");
4070 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4071 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4073 return TokError(Str);
4074 }
4075 createSysAlias(DC->Encoding, Operands, S);
4076 } else if (Mnemonic == "at") {
4077 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4078 if (!AT)
4079 return TokError("invalid operand for AT instruction");
4080 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4081 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4083 return TokError(Str);
4084 }
4085 createSysAlias(AT->Encoding, Operands, S);
4086 } else if (Mnemonic == "tlbi") {
4087 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4088 if (!TLBI)
4089 return TokError("invalid operand for TLBI instruction");
4090 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4091 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4093 return TokError(Str);
4094 }
4095 ExpectRegister = TLBI->NeedsReg;
4096 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4097 if (hasAll || hasTLBID) {
4098 OptionalRegister = TLBI->OptionalReg;
4099 }
4100 createSysAlias(TLBI->Encoding, Operands, S);
4101 } else if (Mnemonic == "mlbi") {
4102 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Op);
4103 if (!MLBI)
4104 return TokError("invalid operand for MLBI instruction");
4105 else if (!MLBI->haveFeatures(getSTI().getFeatureBits())) {
4106 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4108 return TokError(Str);
4109 }
4110 ExpectRegister = MLBI->NeedsReg;
4111 createSysAlias(MLBI->Encoding, Operands, S);
4112 } else if (Mnemonic == "gic") {
4113 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4114 if (!GIC)
4115 return TokError("invalid operand for GIC instruction");
4116 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4117 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4119 return TokError(Str);
4120 }
4121 ExpectRegister = GIC->NeedsReg;
4122 createSysAlias(GIC->Encoding, Operands, S);
4123 } else if (Mnemonic == "gsb") {
4124 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4125 if (!GSB)
4126 return TokError("invalid operand for GSB instruction");
4127 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4128 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4130 return TokError(Str);
4131 }
4132 ExpectRegister = false;
4133 createSysAlias(GSB->Encoding, Operands, S);
4134 } else if (Mnemonic == "plbi") {
4135 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Op);
4136 if (!PLBI)
4137 return TokError("invalid operand for PLBI instruction");
4138 else if (!PLBI->haveFeatures(getSTI().getFeatureBits())) {
4139 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4141 return TokError(Str);
4142 }
4143 ExpectRegister = PLBI->NeedsReg;
4144 if (hasAll || hasTLBID) {
4145 OptionalRegister = PLBI->OptionalReg;
4146 }
4147 createSysAlias(PLBI->Encoding, Operands, S);
4148 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4149 Mnemonic == "cosp") {
4150
4151 if (Op.lower() != "rctx")
4152 return TokError("invalid operand for prediction restriction instruction");
4153
4154 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4155 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4156
4157 if (Mnemonic == "cosp" && !hasSpecres2)
4158 return TokError("COSP requires: predres2");
4159 if (!hasPredres)
4160 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4161
4162 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4163 : Mnemonic == "dvp" ? 0b101
4164 : Mnemonic == "cosp" ? 0b110
4165 : Mnemonic == "cpp" ? 0b111
4166 : 0;
4167 assert(PRCTX_Op2 &&
4168 "Invalid mnemonic for prediction restriction instruction");
4169 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4170 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4171
4172 createSysAlias(Encoding, Operands, S);
4173 }
4174
4175 Lex(); // Eat operand.
4176
4177 bool HasRegister = false;
4178
4179 // Check for the optional register operand.
4180 if (parseOptionalToken(AsmToken::Comma)) {
4181 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4182 return TokError("expected register operand");
4183 HasRegister = true;
4184 }
4185
4186 if (!OptionalRegister) {
4187 if (ExpectRegister && !HasRegister)
4188 return TokError("specified " + Mnemonic + " op requires a register");
4189 else if (!ExpectRegister && HasRegister)
4190 return TokError("specified " + Mnemonic + " op does not use a register");
4191 }
4192
4193 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4194 return true;
4195
4196 return false;
4197}
4198
4199/// parseSyslAlias - The GICR instructions are simple aliases for
4200/// the SYSL instruction. Parse them specially so that we create a
4201/// SYS MCInst.
4202bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4203 OperandVector &Operands) {
4204
4205 Mnemonic = Name;
4206 Operands.push_back(
4207 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4208
4209 // Now expect two operands (identifier + register)
4210 SMLoc startLoc = getLoc();
4211 const AsmToken &regTok = getTok();
4212 StringRef reg = regTok.getString();
4213 MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4214 if (!Reg)
4215 return TokError("expected register operand");
4216
4217 Operands.push_back(AArch64Operand::CreateReg(
4218 Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4219
4220 Lex(); // Eat token
4221 if (parseToken(AsmToken::Comma))
4222 return true;
4223
4224 // Check for identifier
4225 const AsmToken &operandTok = getTok();
4226 StringRef Op = operandTok.getString();
4227 SMLoc S2 = operandTok.getLoc();
4228 Lex(); // Eat token
4229
4230 if (Mnemonic == "gicr") {
4231 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4232 if (!GICR)
4233 return Error(S2, "invalid operand for GICR instruction");
4234 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4235 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4237 return Error(S2, Str);
4238 }
4239 createSysAlias(GICR->Encoding, Operands, S2);
4240 }
4241
4242 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4243 return true;
4244
4245 return false;
4246}
4247
4248/// parseSyspAlias - The TLBIP instructions are simple aliases for
4249/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4250bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4251 OperandVector &Operands) {
4252 if (Name.contains('.'))
4253 return TokError("invalid operand");
4254
4255 Mnemonic = Name;
4256 Operands.push_back(
4257 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4258
4259 const AsmToken &Tok = getTok();
4260 StringRef Op = Tok.getString();
4261 SMLoc S = Tok.getLoc();
4262
4263 if (Mnemonic == "tlbip") {
4264 const AArch64TLBIP::TLBIP *TLBIP = AArch64TLBIP::lookupTLBIPByName(Op);
4265 if (!TLBIP)
4266 return TokError("invalid operand for TLBIP instruction");
4267 if (!getSTI().hasFeature(AArch64::FeatureD128) &&
4268 !getSTI().hasFeature(AArch64::FeatureAll))
4269 return TokError("instruction requires: d128");
4270 if (!TLBIP->haveFeatures(getSTI().getFeatureBits())) {
4271 std::string Str("instruction requires: ");
4273 return TokError(Str);
4274 }
4275 createSysAlias(TLBIP->Encoding, Operands, S);
4276 }
4277
4278 Lex(); // Eat operand.
4279
4280 if (parseComma())
4281 return true;
4282
4283 if (Tok.isNot(AsmToken::Identifier))
4284 return TokError("expected register identifier");
4285 auto Result = tryParseSyspXzrPair(Operands);
4286 if (Result.isNoMatch())
4287 Result = tryParseGPRSeqPair(Operands);
4288 if (!Result.isSuccess())
4289 return TokError("specified " + Mnemonic +
4290 " op requires a pair of registers");
4291
4292 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4293 return true;
4294
4295 return false;
4296}
4297
4298ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4299 MCAsmParser &Parser = getParser();
4300 const AsmToken &Tok = getTok();
4301
4302 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4303 return TokError("'csync' operand expected");
4304 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4305 // Immediate operand.
4306 const MCExpr *ImmVal;
4307 SMLoc ExprLoc = getLoc();
4308 AsmToken IntTok = Tok;
4309 if (getParser().parseExpression(ImmVal))
4310 return ParseStatus::Failure;
4311 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4312 if (!MCE)
4313 return Error(ExprLoc, "immediate value expected for barrier operand");
4314 int64_t Value = MCE->getValue();
4315 if (Mnemonic == "dsb" && Value > 15) {
4316 // This case is a no match here, but it might be matched by the nXS
4317 // variant. Deliberately not unlex the optional '#' as it is not necessary
4318 // to characterize an integer immediate.
4319 Parser.getLexer().UnLex(IntTok);
4320 return ParseStatus::NoMatch;
4321 }
4322 if (Value < 0 || Value > 15)
4323 return Error(ExprLoc, "barrier operand out of range");
4324 auto DB = AArch64DB::lookupDBByEncoding(Value);
4325 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4326 ExprLoc, getContext(),
4327 false /*hasnXSModifier*/));
4328 return ParseStatus::Success;
4329 }
4330
4331 if (Tok.isNot(AsmToken::Identifier))
4332 return TokError("invalid operand for instruction");
4333
4334 StringRef Operand = Tok.getString();
4335 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4336 auto DB = AArch64DB::lookupDBByName(Operand);
4337 // The only valid named option for ISB is 'sy'
4338 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4339 return TokError("'sy' or #imm operand expected");
4340 // The only valid named option for TSB is 'csync'
4341 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4342 return TokError("'csync' operand expected");
4343 if (!DB && !TSB) {
4344 if (Mnemonic == "dsb") {
4345 // This case is a no match here, but it might be matched by the nXS
4346 // variant.
4347 return ParseStatus::NoMatch;
4348 }
4349 return TokError("invalid barrier option name");
4350 }
4351
4352 Operands.push_back(AArch64Operand::CreateBarrier(
4353 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4354 getContext(), false /*hasnXSModifier*/));
4355 Lex(); // Consume the option
4356
4357 return ParseStatus::Success;
4358}
4359
4360ParseStatus
4361AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4362 const AsmToken &Tok = getTok();
4363
4364 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4365 if (Mnemonic != "dsb")
4366 return ParseStatus::Failure;
4367
4368 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4369 // Immediate operand.
4370 const MCExpr *ImmVal;
4371 SMLoc ExprLoc = getLoc();
4372 if (getParser().parseExpression(ImmVal))
4373 return ParseStatus::Failure;
4374 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4375 if (!MCE)
4376 return Error(ExprLoc, "immediate value expected for barrier operand");
4377 int64_t Value = MCE->getValue();
4378 // v8.7-A DSB in the nXS variant accepts only the following immediate
4379 // values: 16, 20, 24, 28.
4380 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4381 return Error(ExprLoc, "barrier operand out of range");
4382 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4383 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4384 ExprLoc, getContext(),
4385 true /*hasnXSModifier*/));
4386 return ParseStatus::Success;
4387 }
4388
4389 if (Tok.isNot(AsmToken::Identifier))
4390 return TokError("invalid operand for instruction");
4391
4392 StringRef Operand = Tok.getString();
4393 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4394
4395 if (!DB)
4396 return TokError("invalid barrier option name");
4397
4398 Operands.push_back(
4399 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4400 getContext(), true /*hasnXSModifier*/));
4401 Lex(); // Consume the option
4402
4403 return ParseStatus::Success;
4404}
4405
4406ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4407 const AsmToken &Tok = getTok();
4408
4409 if (Tok.isNot(AsmToken::Identifier))
4410 return ParseStatus::NoMatch;
4411
4412 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4413 return ParseStatus::NoMatch;
4414
4415 int MRSReg, MSRReg;
4416 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4417 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4418 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4419 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4420 } else
4421 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4422
4423 unsigned PStateImm = -1;
4424 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4425 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4426 PStateImm = PState15->Encoding;
4427 if (!PState15) {
4428 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4429 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4430 PStateImm = PState1->Encoding;
4431 }
4432
4433 Operands.push_back(
4434 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4435 PStateImm, getContext()));
4436 Lex(); // Eat identifier
4437
4438 return ParseStatus::Success;
4439}
4440
4441ParseStatus
4442AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4443 SMLoc S = getLoc();
4444 const AsmToken &Tok = getTok();
4445 if (Tok.isNot(AsmToken::Identifier))
4446 return TokError("invalid operand for instruction");
4447
4449 if (!PH)
4450 return TokError("invalid operand for instruction");
4451
4452 Operands.push_back(AArch64Operand::CreatePHintInst(
4453 PH->Encoding, Tok.getString(), S, getContext()));
4454 Lex(); // Eat identifier token.
4455 return ParseStatus::Success;
4456}
4457
4458/// tryParseNeonVectorRegister - Parse a vector register operand.
4459bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4460 if (getTok().isNot(AsmToken::Identifier))
4461 return true;
4462
4463 SMLoc S = getLoc();
4464 // Check for a vector register specifier first.
4465 StringRef Kind;
4466 MCRegister Reg;
4467 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4468 if (!Res.isSuccess())
4469 return true;
4470
4471 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4472 if (!KindRes)
4473 return true;
4474
4475 unsigned ElementWidth = KindRes->second;
4476 Operands.push_back(
4477 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4478 S, getLoc(), getContext()));
4479
4480 // If there was an explicit qualifier, that goes on as a literal text
4481 // operand.
4482 if (!Kind.empty())
4483 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4484
4485 return tryParseVectorIndex(Operands).isFailure();
4486}
4487
4488ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4489 SMLoc SIdx = getLoc();
4490 if (parseOptionalToken(AsmToken::LBrac)) {
4491 const MCExpr *ImmVal;
4492 if (getParser().parseExpression(ImmVal))
4493 return ParseStatus::NoMatch;
4494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4495 if (!MCE)
4496 return TokError("immediate value expected for vector index");
4497
4498 SMLoc E = getLoc();
4499
4500 if (parseToken(AsmToken::RBrac, "']' expected"))
4501 return ParseStatus::Failure;
4502
4503 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4504 E, getContext()));
4505 return ParseStatus::Success;
4506 }
4507
4508 return ParseStatus::NoMatch;
4509}
4510
4511// tryParseVectorRegister - Try to parse a vector register name with
4512// optional kind specifier. If it is a register specifier, eat the token
4513// and return it.
4514ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4515 StringRef &Kind,
4516 RegKind MatchKind) {
4517 const AsmToken &Tok = getTok();
4518
4519 if (Tok.isNot(AsmToken::Identifier))
4520 return ParseStatus::NoMatch;
4521
4522 StringRef Name = Tok.getString();
4523 // If there is a kind specifier, it's separated from the register name by
4524 // a '.'.
4525 size_t Start = 0, Next = Name.find('.');
4526 StringRef Head = Name.slice(Start, Next);
4527 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4528
4529 if (RegNum) {
4530 if (Next != StringRef::npos) {
4531 Kind = Name.substr(Next);
4532 if (!isValidVectorKind(Kind, MatchKind))
4533 return TokError("invalid vector kind qualifier");
4534 }
4535 Lex(); // Eat the register token.
4536
4537 Reg = RegNum;
4538 return ParseStatus::Success;
4539 }
4540
4541 return ParseStatus::NoMatch;
4542}
4543
4544ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4545 OperandVector &Operands) {
4546 ParseStatus Status =
4547 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4548 if (!Status.isSuccess())
4549 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4550 return Status;
4551}
4552
4553/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4554template <RegKind RK>
4555ParseStatus
4556AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4557 // Check for a SVE predicate register specifier first.
4558 const SMLoc S = getLoc();
4559 StringRef Kind;
4560 MCRegister RegNum;
4561 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4562 if (!Res.isSuccess())
4563 return Res;
4564
4565 const auto &KindRes = parseVectorKind(Kind, RK);
4566 if (!KindRes)
4567 return ParseStatus::NoMatch;
4568
4569 unsigned ElementWidth = KindRes->second;
4570 Operands.push_back(AArch64Operand::CreateVectorReg(
4571 RegNum, RK, ElementWidth, S,
4572 getLoc(), getContext()));
4573
4574 if (getLexer().is(AsmToken::LBrac)) {
4575 if (RK == RegKind::SVEPredicateAsCounter) {
4576 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4577 if (ResIndex.isSuccess())
4578 return ParseStatus::Success;
4579 } else {
4580 // Indexed predicate, there's no comma so try parse the next operand
4581 // immediately.
4582 if (parseOperand(Operands, false, false))
4583 return ParseStatus::NoMatch;
4584 }
4585 }
4586
4587 // Not all predicates are followed by a '/m' or '/z'.
4588 if (getTok().isNot(AsmToken::Slash))
4589 return ParseStatus::Success;
4590
4591 // But when they do they shouldn't have an element type suffix.
4592 if (!Kind.empty())
4593 return Error(S, "not expecting size suffix");
4594
4595 // Add a literal slash as operand
4596 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4597
4598 Lex(); // Eat the slash.
4599
4600 // Zeroing or merging?
4601 auto Pred = getTok().getString().lower();
4602 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4603 return Error(getLoc(), "expecting 'z' predication");
4604
4605 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4606 return Error(getLoc(), "expecting 'm' or 'z' predication");
4607
4608 // Add zero/merge token.
4609 const char *ZM = Pred == "z" ? "z" : "m";
4610 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4611
4612 Lex(); // Eat zero/merge token.
4613 return ParseStatus::Success;
4614}
4615
4616/// parseRegister - Parse a register operand.
4617bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4618 // Try for a Neon vector register.
4619 if (!tryParseNeonVectorRegister(Operands))
4620 return false;
4621
4622 if (tryParseZTOperand(Operands).isSuccess())
4623 return false;
4624
4625 // Otherwise try for a scalar register.
4626 if (tryParseGPROperand<false>(Operands).isSuccess())
4627 return false;
4628
4629 return true;
4630}
4631
4632bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4633 bool HasELFModifier = false;
4634 AArch64::Specifier RefKind;
4635 SMLoc Loc = getLexer().getLoc();
4636 if (parseOptionalToken(AsmToken::Colon)) {
4637 HasELFModifier = true;
4638
4639 if (getTok().isNot(AsmToken::Identifier))
4640 return TokError("expect relocation specifier in operand after ':'");
4641
4642 std::string LowerCase = getTok().getIdentifier().lower();
4643 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4644 .Case("lo12", AArch64::S_LO12)
4645 .Case("abs_g3", AArch64::S_ABS_G3)
4646 .Case("abs_g2", AArch64::S_ABS_G2)
4647 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4648 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4649 .Case("abs_g1", AArch64::S_ABS_G1)
4650 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4651 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4652 .Case("abs_g0", AArch64::S_ABS_G0)
4653 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4654 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4655 .Case("prel_g3", AArch64::S_PREL_G3)
4656 .Case("prel_g2", AArch64::S_PREL_G2)
4657 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4658 .Case("prel_g1", AArch64::S_PREL_G1)
4659 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4660 .Case("prel_g0", AArch64::S_PREL_G0)
4661 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4662 .Case("dtprel", AArch64::S_DTPREL)
4663 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4664 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4665 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4666 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4667 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4668 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4669 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4670 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4671 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4672 .Case("tprel_g2", AArch64::S_TPREL_G2)
4673 .Case("tprel_g1", AArch64::S_TPREL_G1)
4674 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4675 .Case("tprel_g0", AArch64::S_TPREL_G0)
4676 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4677 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4678 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4679 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4680 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4681 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4682 .Case("got", AArch64::S_GOT_PAGE)
4683 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4684 .Case("got_lo12", AArch64::S_GOT_LO12)
4685 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4686 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4687 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4688 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4689 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4690 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4691 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4692 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4693 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4694 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4695 .Default(AArch64::S_INVALID);
4696
4697 if (RefKind == AArch64::S_INVALID)
4698 return TokError("expect relocation specifier in operand after ':'");
4699
4700 Lex(); // Eat identifier
4701
4702 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4703 return true;
4704 }
4705
4706 if (getParser().parseExpression(ImmVal))
4707 return true;
4708
4709 if (HasELFModifier)
4710 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4711
4712 SMLoc EndLoc;
4713 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4714 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4715 return true;
4716 const MCExpr *Term;
4717 MCBinaryExpr::Opcode Opcode;
4718 if (parseOptionalToken(AsmToken::Plus))
4719 Opcode = MCBinaryExpr::Add;
4720 else if (parseOptionalToken(AsmToken::Minus))
4721 Opcode = MCBinaryExpr::Sub;
4722 else
4723 return false;
4724 if (getParser().parsePrimaryExpr(Term, EndLoc))
4725 return true;
4726 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4727 }
4728
4729 return false;
4730}
4731
4732ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4733 if (getTok().isNot(AsmToken::LCurly))
4734 return ParseStatus::NoMatch;
4735
4736 auto ParseMatrixTile = [this](unsigned &Reg,
4737 unsigned &ElementWidth) -> ParseStatus {
4738 StringRef Name = getTok().getString();
4739 size_t DotPosition = Name.find('.');
4740 if (DotPosition == StringRef::npos)
4741 return ParseStatus::NoMatch;
4742
4743 unsigned RegNum = matchMatrixTileListRegName(Name);
4744 if (!RegNum)
4745 return ParseStatus::NoMatch;
4746
4747 StringRef Tail = Name.drop_front(DotPosition);
4748 const std::optional<std::pair<int, int>> &KindRes =
4749 parseVectorKind(Tail, RegKind::Matrix);
4750 if (!KindRes)
4751 return TokError(
4752 "Expected the register to be followed by element width suffix");
4753 ElementWidth = KindRes->second;
4754 Reg = RegNum;
4755 Lex(); // Eat the register.
4756 return ParseStatus::Success;
4757 };
4758
4759 SMLoc S = getLoc();
4760 auto LCurly = getTok();
4761 Lex(); // Eat left bracket token.
4762
4763 // Empty matrix list
4764 if (parseOptionalToken(AsmToken::RCurly)) {
4765 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4766 /*RegMask=*/0, S, getLoc(), getContext()));
4767 return ParseStatus::Success;
4768 }
4769
4770 // Try parse {za} alias early
4771 if (getTok().getString().equals_insensitive("za")) {
4772 Lex(); // Eat 'za'
4773
4774 if (parseToken(AsmToken::RCurly, "'}' expected"))
4775 return ParseStatus::Failure;
4776
4777 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4778 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4779 return ParseStatus::Success;
4780 }
4781
4782 SMLoc TileLoc = getLoc();
4783
4784 unsigned FirstReg, ElementWidth;
4785 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4786 if (!ParseRes.isSuccess()) {
4787 getLexer().UnLex(LCurly);
4788 return ParseRes;
4789 }
4790
4791 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4792
4793 unsigned PrevReg = FirstReg;
4794
4795 SmallSet<unsigned, 8> DRegs;
4796 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4797
4798 SmallSet<unsigned, 8> SeenRegs;
4799 SeenRegs.insert(FirstReg);
4800
4801 while (parseOptionalToken(AsmToken::Comma)) {
4802 TileLoc = getLoc();
4803 unsigned Reg, NextElementWidth;
4804 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4805 if (!ParseRes.isSuccess())
4806 return ParseRes;
4807
4808 // Element size must match on all regs in the list.
4809 if (ElementWidth != NextElementWidth)
4810 return Error(TileLoc, "mismatched register size suffix");
4811
4812 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4813 Warning(TileLoc, "tile list not in ascending order");
4814
4815 if (SeenRegs.contains(Reg))
4816 Warning(TileLoc, "duplicate tile in list");
4817 else {
4818 SeenRegs.insert(Reg);
4819 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4820 }
4821
4822 PrevReg = Reg;
4823 }
4824
4825 if (parseToken(AsmToken::RCurly, "'}' expected"))
4826 return ParseStatus::Failure;
4827
4828 unsigned RegMask = 0;
4829 for (auto Reg : DRegs)
4830 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4831 RI->getEncodingValue(AArch64::ZAD0));
4832 Operands.push_back(
4833 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4834
4835 return ParseStatus::Success;
4836}
4837
4838template <RegKind VectorKind>
4839ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4840 bool ExpectMatch) {
4841 MCAsmParser &Parser = getParser();
4842 if (!getTok().is(AsmToken::LCurly))
4843 return ParseStatus::NoMatch;
4844
4845 // Wrapper around parse function
4846 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4847 bool NoMatchIsError) -> ParseStatus {
4848 auto RegTok = getTok();
4849 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4850 if (ParseRes.isSuccess()) {
4851 if (parseVectorKind(Kind, VectorKind))
4852 return ParseRes;
4853 llvm_unreachable("Expected a valid vector kind");
4854 }
4855
4856 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4857 RegTok.getString().equals_insensitive("zt0"))
4858 return ParseStatus::NoMatch;
4859
4860 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4861 (ParseRes.isNoMatch() && NoMatchIsError &&
4862 !RegTok.getString().starts_with_insensitive("za")))
4863 return Error(Loc, "vector register expected");
4864
4865 return ParseStatus::NoMatch;
4866 };
4867
4868 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4869 SMLoc S = getLoc();
4870 auto LCurly = getTok();
4871 Lex(); // Eat left bracket token.
4872
4873 StringRef Kind;
4874 MCRegister FirstReg;
4875 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4876
4877 // Put back the original left bracket if there was no match, so that
4878 // different types of list-operands can be matched (e.g. SVE, Neon).
4879 if (ParseRes.isNoMatch())
4880 Parser.getLexer().UnLex(LCurly);
4881
4882 if (!ParseRes.isSuccess())
4883 return ParseRes;
4884
4885 MCRegister PrevReg = FirstReg;
4886 unsigned Count = 1;
4887
4888 unsigned Stride = 1;
4889 if (parseOptionalToken(AsmToken::Minus)) {
4890 SMLoc Loc = getLoc();
4891 StringRef NextKind;
4892
4893 MCRegister Reg;
4894 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4895 if (!ParseRes.isSuccess())
4896 return ParseRes;
4897
4898 // Any Kind suffices must match on all regs in the list.
4899 if (Kind != NextKind)
4900 return Error(Loc, "mismatched register size suffix");
4901
4902 unsigned Space =
4903 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4904
4905 if (Space == 0 || Space > 3)
4906 return Error(Loc, "invalid number of vectors");
4907
4908 Count += Space;
4909 }
4910 else {
4911 bool HasCalculatedStride = false;
4912 while (parseOptionalToken(AsmToken::Comma)) {
4913 SMLoc Loc = getLoc();
4914 StringRef NextKind;
4915 MCRegister Reg;
4916 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4917 if (!ParseRes.isSuccess())
4918 return ParseRes;
4919
4920 // Any Kind suffices must match on all regs in the list.
4921 if (Kind != NextKind)
4922 return Error(Loc, "mismatched register size suffix");
4923
4924 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4925 unsigned PrevRegVal =
4926 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4927 if (!HasCalculatedStride) {
4928 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4929 : (NumRegs - (PrevRegVal - RegVal));
4930 HasCalculatedStride = true;
4931 }
4932
4933 // Register must be incremental (with a wraparound at last register).
4934 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4935 return Error(Loc, "registers must have the same sequential stride");
4936
4937 PrevReg = Reg;
4938 ++Count;
4939 }
4940 }
4941
4942 if (parseToken(AsmToken::RCurly, "'}' expected"))
4943 return ParseStatus::Failure;
4944
4945 if (Count > 4)
4946 return Error(S, "invalid number of vectors");
4947
4948 unsigned NumElements = 0;
4949 unsigned ElementWidth = 0;
4950 if (!Kind.empty()) {
4951 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4952 std::tie(NumElements, ElementWidth) = *VK;
4953 }
4954
4955 Operands.push_back(AArch64Operand::CreateVectorList(
4956 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4957 getLoc(), getContext()));
4958
4959 if (getTok().is(AsmToken::LBrac)) {
4960 ParseStatus Res = tryParseVectorIndex(Operands);
4961 if (Res.isFailure())
4962 return ParseStatus::Failure;
4963 return ParseStatus::Success;
4964 }
4965
4966 return ParseStatus::Success;
4967}
4968
4969/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4970bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4971 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4972 if (!ParseRes.isSuccess())
4973 return true;
4974
4975 return tryParseVectorIndex(Operands).isFailure();
4976}
4977
4978ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4979 SMLoc StartLoc = getLoc();
4980
4981 MCRegister RegNum;
4982 ParseStatus Res = tryParseScalarRegister(RegNum);
4983 if (!Res.isSuccess())
4984 return Res;
4985
4986 if (!parseOptionalToken(AsmToken::Comma)) {
4987 Operands.push_back(AArch64Operand::CreateReg(
4988 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4989 return ParseStatus::Success;
4990 }
4991
4992 parseOptionalToken(AsmToken::Hash);
4993
4994 if (getTok().isNot(AsmToken::Integer))
4995 return Error(getLoc(), "index must be absent or #0");
4996
4997 const MCExpr *ImmVal;
4998 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4999 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
5000 return Error(getLoc(), "index must be absent or #0");
5001
5002 Operands.push_back(AArch64Operand::CreateReg(
5003 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
5004 return ParseStatus::Success;
5005}
5006
5007ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5008 SMLoc StartLoc = getLoc();
5009 const AsmToken &Tok = getTok();
5010 std::string Name = Tok.getString().lower();
5011
5012 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5013
5014 if (!Reg)
5015 return ParseStatus::NoMatch;
5016
5017 Operands.push_back(AArch64Operand::CreateReg(
5018 Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
5019 Lex(); // Eat register.
5020
5021 // Check if register is followed by an index
5022 if (parseOptionalToken(AsmToken::LBrac)) {
5023 Operands.push_back(
5024 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5025 const MCExpr *ImmVal;
5026 if (getParser().parseExpression(ImmVal))
5027 return ParseStatus::NoMatch;
5028 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5029 if (!MCE)
5030 return TokError("immediate value expected for vector index");
5031 Operands.push_back(AArch64Operand::CreateImm(
5032 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
5033 getLoc(), getContext()));
5034 if (parseOptionalToken(AsmToken::Comma))
5035 if (parseOptionalMulOperand(Operands))
5036 return ParseStatus::Failure;
5037 if (parseToken(AsmToken::RBrac, "']' expected"))
5038 return ParseStatus::Failure;
5039 Operands.push_back(
5040 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5041 }
5042 return ParseStatus::Success;
5043}
5044
5045template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5046ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5047 SMLoc StartLoc = getLoc();
5048
5049 MCRegister RegNum;
5050 ParseStatus Res = tryParseScalarRegister(RegNum);
5051 if (!Res.isSuccess())
5052 return Res;
5053
5054 // No shift/extend is the default.
5055 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
5056 Operands.push_back(AArch64Operand::CreateReg(
5057 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
5058 return ParseStatus::Success;
5059 }
5060
5061 // Eat the comma
5062 Lex();
5063
5064 // Match the shift
5066 Res = tryParseOptionalShiftExtend(ExtOpnd);
5067 if (!Res.isSuccess())
5068 return Res;
5069
5070 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5071 Operands.push_back(AArch64Operand::CreateReg(
5072 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5073 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5074 Ext->hasShiftExtendAmount()));
5075
5076 return ParseStatus::Success;
5077}
5078
5079bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5080 MCAsmParser &Parser = getParser();
5081
5082 // Some SVE instructions have a decoration after the immediate, i.e.
5083 // "mul vl". We parse them here and add tokens, which must be present in the
5084 // asm string in the tablegen instruction.
5085 bool NextIsVL =
5086 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5087 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5088 if (!getTok().getString().equals_insensitive("mul") ||
5089 !(NextIsVL || NextIsHash))
5090 return true;
5091
5092 Operands.push_back(
5093 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5094 Lex(); // Eat the "mul"
5095
5096 if (NextIsVL) {
5097 Operands.push_back(
5098 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5099 Lex(); // Eat the "vl"
5100 return false;
5101 }
5102
5103 if (NextIsHash) {
5104 Lex(); // Eat the #
5105 SMLoc S = getLoc();
5106
5107 // Parse immediate operand.
5108 const MCExpr *ImmVal;
5109 if (!Parser.parseExpression(ImmVal))
5110 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5111 Operands.push_back(AArch64Operand::CreateImm(
5112 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5113 getContext()));
5114 return false;
5115 }
5116 }
5117
5118 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5119}
5120
5121bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5122 StringRef &VecGroup) {
5123 MCAsmParser &Parser = getParser();
5124 auto Tok = Parser.getTok();
5125 if (Tok.isNot(AsmToken::Identifier))
5126 return true;
5127
5128 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5129 .Case("vgx2", "vgx2")
5130 .Case("vgx4", "vgx4")
5131 .Default("");
5132
5133 if (VG.empty())
5134 return true;
5135
5136 VecGroup = VG;
5137 Parser.Lex(); // Eat vgx[2|4]
5138 return false;
5139}
5140
5141bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5142 auto Tok = getTok();
5143 if (Tok.isNot(AsmToken::Identifier))
5144 return true;
5145
5146 auto Keyword = Tok.getString();
5147 Keyword = StringSwitch<StringRef>(Keyword.lower())
5148 .Case("sm", "sm")
5149 .Case("za", "za")
5150 .Default(Keyword);
5151 Operands.push_back(
5152 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5153
5154 Lex();
5155 return false;
5156}
5157
5158/// parseOperand - Parse a arm instruction operand. For now this parses the
5159/// operand regardless of the mnemonic.
5160bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5161 bool invertCondCode) {
5162 MCAsmParser &Parser = getParser();
5163
5164 ParseStatus ResTy =
5165 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5166
5167 // Check if the current operand has a custom associated parser, if so, try to
5168 // custom parse the operand, or fallback to the general approach.
5169 if (ResTy.isSuccess())
5170 return false;
5171 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5172 // there was a match, but an error occurred, in which case, just return that
5173 // the operand parsing failed.
5174 if (ResTy.isFailure())
5175 return true;
5176
5177 // Nothing custom, so do general case parsing.
5178 SMLoc S, E;
5179 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5180 if (parseOptionalToken(AsmToken::Comma)) {
5181 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5182 if (!Res.isNoMatch())
5183 return Res.isFailure();
5184 getLexer().UnLex(SavedTok);
5185 }
5186 return false;
5187 };
5188 switch (getLexer().getKind()) {
5189 default: {
5190 SMLoc S = getLoc();
5191 const MCExpr *Expr;
5192 if (parseSymbolicImmVal(Expr))
5193 return Error(S, "invalid operand");
5194
5195 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5196 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5197 return parseOptionalShiftExtend(getTok());
5198 }
5199 case AsmToken::LBrac: {
5200 Operands.push_back(
5201 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5202 Lex(); // Eat '['
5203
5204 // There's no comma after a '[', so we can parse the next operand
5205 // immediately.
5206 return parseOperand(Operands, false, false);
5207 }
5208 case AsmToken::LCurly: {
5209 if (!parseNeonVectorList(Operands))
5210 return false;
5211
5212 Operands.push_back(
5213 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5214 Lex(); // Eat '{'
5215
5216 // There's no comma after a '{', so we can parse the next operand
5217 // immediately.
5218 return parseOperand(Operands, false, false);
5219 }
5220 case AsmToken::Identifier: {
5221 // See if this is a "VG" decoration used by SME instructions.
5222 StringRef VecGroup;
5223 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5224 Operands.push_back(
5225 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5226 return false;
5227 }
5228 // If we're expecting a Condition Code operand, then just parse that.
5229 if (isCondCode)
5230 return parseCondCode(Operands, invertCondCode);
5231
5232 // If it's a register name, parse it.
5233 if (!parseRegister(Operands)) {
5234 // Parse an optional shift/extend modifier.
5235 AsmToken SavedTok = getTok();
5236 if (parseOptionalToken(AsmToken::Comma)) {
5237 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5238 // such cases and don't report an error when <label> happens to match a
5239 // shift/extend modifier.
5240 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5241 /*ParseForAllFeatures=*/true);
5242 if (!Res.isNoMatch())
5243 return Res.isFailure();
5244 Res = tryParseOptionalShiftExtend(Operands);
5245 if (!Res.isNoMatch())
5246 return Res.isFailure();
5247 getLexer().UnLex(SavedTok);
5248 }
5249 return false;
5250 }
5251
5252 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5253 // by SVE instructions.
5254 if (!parseOptionalMulOperand(Operands))
5255 return false;
5256
5257 // If this is a two-word mnemonic, parse its special keyword
5258 // operand as an identifier.
5259 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5260 Mnemonic == "gcsb")
5261 return parseKeywordOperand(Operands);
5262
5263 // This was not a register so parse other operands that start with an
5264 // identifier (like labels) as expressions and create them as immediates.
5265 const MCExpr *IdVal, *Term;
5266 S = getLoc();
5267 if (getParser().parseExpression(IdVal))
5268 return true;
5269 if (getParser().parseAtSpecifier(IdVal, E))
5270 return true;
5271 std::optional<MCBinaryExpr::Opcode> Opcode;
5272 if (parseOptionalToken(AsmToken::Plus))
5273 Opcode = MCBinaryExpr::Add;
5274 else if (parseOptionalToken(AsmToken::Minus))
5275 Opcode = MCBinaryExpr::Sub;
5276 if (Opcode) {
5277 if (getParser().parsePrimaryExpr(Term, E))
5278 return true;
5279 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5280 }
5281 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5282
5283 // Parse an optional shift/extend modifier.
5284 return parseOptionalShiftExtend(getTok());
5285 }
5286 case AsmToken::Integer:
5287 case AsmToken::Real:
5288 case AsmToken::Hash: {
5289 // #42 -> immediate.
5290 S = getLoc();
5291
5292 parseOptionalToken(AsmToken::Hash);
5293
5294 // Parse a negative sign
5295 bool isNegative = false;
5296 if (getTok().is(AsmToken::Minus)) {
5297 isNegative = true;
5298 // We need to consume this token only when we have a Real, otherwise
5299 // we let parseSymbolicImmVal take care of it
5300 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5301 Lex();
5302 }
5303
5304 // The only Real that should come through here is a literal #0.0 for
5305 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5306 // so convert the value.
5307 const AsmToken &Tok = getTok();
5308 if (Tok.is(AsmToken::Real)) {
5309 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5310 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5311 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5312 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5313 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5314 return TokError("unexpected floating point literal");
5315 else if (IntVal != 0 || isNegative)
5316 return TokError("expected floating-point constant #0.0");
5317 Lex(); // Eat the token.
5318
5319 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5320 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5321 return false;
5322 }
5323
5324 const MCExpr *ImmVal;
5325 if (parseSymbolicImmVal(ImmVal))
5326 return true;
5327
5328 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5329 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5330
5331 // Parse an optional shift/extend modifier.
5332 return parseOptionalShiftExtend(Tok);
5333 }
5334 case AsmToken::Equal: {
5335 SMLoc Loc = getLoc();
5336 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5337 return TokError("unexpected token in operand");
5338 Lex(); // Eat '='
5339 const MCExpr *SubExprVal;
5340 if (getParser().parseExpression(SubExprVal))
5341 return true;
5342
5343 if (Operands.size() < 2 ||
5344 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5345 return Error(Loc, "Only valid when first operand is register");
5346
5347 bool IsXReg =
5348 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5349 Operands[1]->getReg());
5350
5351 MCContext& Ctx = getContext();
5352 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5353 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5354 if (isa<MCConstantExpr>(SubExprVal)) {
5355 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5356 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5357 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5358 ShiftAmt += 16;
5359 Imm >>= 16;
5360 }
5361 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5362 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5363 Operands.push_back(AArch64Operand::CreateImm(
5364 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5365 if (ShiftAmt)
5366 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5367 ShiftAmt, true, S, E, Ctx));
5368 return false;
5369 }
5370 APInt Simm = APInt(64, Imm << ShiftAmt);
5371 // check if the immediate is an unsigned or signed 32-bit int for W regs
5372 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5373 return Error(Loc, "Immediate too large for register");
5374 }
5375 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5376 const MCExpr *CPLoc =
5377 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5378 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5379 return false;
5380 }
5381 }
5382}
5383
5384bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5385 const MCExpr *Expr = nullptr;
5386 SMLoc L = getLoc();
5387 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5388 return true;
5389 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5390 if (check(!Value, L, "expected constant expression"))
5391 return true;
5392 Out = Value->getValue();
5393 return false;
5394}
5395
5396bool AArch64AsmParser::parseComma() {
5397 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5398 return true;
5399 // Eat the comma
5400 Lex();
5401 return false;
5402}
5403
5404bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5405 unsigned First, unsigned Last) {
5406 MCRegister Reg;
5407 SMLoc Start, End;
5408 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5409 return true;
5410
5411 // Special handling for FP and LR; they aren't linearly after x28 in
5412 // the registers enum.
5413 unsigned RangeEnd = Last;
5414 if (Base == AArch64::X0) {
5415 if (Last == AArch64::FP) {
5416 RangeEnd = AArch64::X28;
5417 if (Reg == AArch64::FP) {
5418 Out = 29;
5419 return false;
5420 }
5421 }
5422 if (Last == AArch64::LR) {
5423 RangeEnd = AArch64::X28;
5424 if (Reg == AArch64::FP) {
5425 Out = 29;
5426 return false;
5427 } else if (Reg == AArch64::LR) {
5428 Out = 30;
5429 return false;
5430 }
5431 }
5432 }
5433
5434 if (check(Reg < First || Reg > RangeEnd, Start,
5435 Twine("expected register in range ") +
5438 return true;
5439 Out = Reg - Base;
5440 return false;
5441}
5442
5443bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5444 const MCParsedAsmOperand &Op2) const {
5445 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5446 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5447
5448 if (AOp1.isVectorList() && AOp2.isVectorList())
5449 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5450 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5451 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5452
5453 if (!AOp1.isReg() || !AOp2.isReg())
5454 return false;
5455
5456 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5457 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5458 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5459
5460 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5461 "Testing equality of non-scalar registers not supported");
5462
5463 // Check if a registers match their sub/super register classes.
5464 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5465 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5466 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5467 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5468 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5469 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5470 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5471 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5472
5473 return false;
5474}
5475
5476/// Parse an AArch64 instruction mnemonic followed by its operands.
5477bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5478 StringRef Name, SMLoc NameLoc,
5479 OperandVector &Operands) {
5480 Name = StringSwitch<StringRef>(Name.lower())
5481 .Case("beq", "b.eq")
5482 .Case("bne", "b.ne")
5483 .Case("bhs", "b.hs")
5484 .Case("bcs", "b.cs")
5485 .Case("blo", "b.lo")
5486 .Case("bcc", "b.cc")
5487 .Case("bmi", "b.mi")
5488 .Case("bpl", "b.pl")
5489 .Case("bvs", "b.vs")
5490 .Case("bvc", "b.vc")
5491 .Case("bhi", "b.hi")
5492 .Case("bls", "b.ls")
5493 .Case("bge", "b.ge")
5494 .Case("blt", "b.lt")
5495 .Case("bgt", "b.gt")
5496 .Case("ble", "b.le")
5497 .Case("bal", "b.al")
5498 .Case("bnv", "b.nv")
5499 .Default(Name);
5500
5501 // First check for the AArch64-specific .req directive.
5502 if (getTok().is(AsmToken::Identifier) &&
5503 getTok().getIdentifier().lower() == ".req") {
5504 parseDirectiveReq(Name, NameLoc);
5505 // We always return 'error' for this, as we're done with this
5506 // statement and don't need to match the 'instruction."
5507 return true;
5508 }
5509
5510 // Create the leading tokens for the mnemonic, split by '.' characters.
5511 size_t Start = 0, Next = Name.find('.');
5512 StringRef Head = Name.slice(Start, Next);
5513
5514 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5515 // instructions are aliases for the SYS instruction.
5516 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5517 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5518 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5519 return parseSysAlias(Head, NameLoc, Operands);
5520
5521 // GICR instructions are aliases for the SYSL instruction.
5522 if (Head == "gicr")
5523 return parseSyslAlias(Head, NameLoc, Operands);
5524
5525 // TLBIP instructions are aliases for the SYSP instruction.
5526 if (Head == "tlbip")
5527 return parseSyspAlias(Head, NameLoc, Operands);
5528
5529 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5530 Mnemonic = Head;
5531
5532 // Handle condition codes for a branch mnemonic
5533 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5534 Start = Next;
5535 Next = Name.find('.', Start + 1);
5536 Head = Name.slice(Start + 1, Next);
5537
5538 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5539 (Head.data() - Name.data()));
5540 std::string Suggestion;
5541 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5542 if (CC == AArch64CC::Invalid) {
5543 std::string Msg = "invalid condition code";
5544 if (!Suggestion.empty())
5545 Msg += ", did you mean " + Suggestion + "?";
5546 return Error(SuffixLoc, Msg);
5547 }
5548 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5549 /*IsSuffix=*/true));
5550 Operands.push_back(
5551 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5552 }
5553
5554 // Add the remaining tokens in the mnemonic.
5555 while (Next != StringRef::npos) {
5556 Start = Next;
5557 Next = Name.find('.', Start + 1);
5558 Head = Name.slice(Start, Next);
5559 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5560 (Head.data() - Name.data()) + 1);
5561 Operands.push_back(AArch64Operand::CreateToken(
5562 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5563 }
5564
5565 // Conditional compare instructions have a Condition Code operand, which needs
5566 // to be parsed and an immediate operand created.
5567 bool condCodeFourthOperand =
5568 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5569 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5570 Head == "csinc" || Head == "csinv" || Head == "csneg");
5571
5572 // These instructions are aliases to some of the conditional select
5573 // instructions. However, the condition code is inverted in the aliased
5574 // instruction.
5575 //
5576 // FIXME: Is this the correct way to handle these? Or should the parser
5577 // generate the aliased instructions directly?
5578 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5579 bool condCodeThirdOperand =
5580 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5581
5582 // Read the remaining operands.
5583 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5584
5585 unsigned N = 1;
5586 do {
5587 // Parse and remember the operand.
5588 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5589 (N == 3 && condCodeThirdOperand) ||
5590 (N == 2 && condCodeSecondOperand),
5591 condCodeSecondOperand || condCodeThirdOperand)) {
5592 return true;
5593 }
5594
5595 // After successfully parsing some operands there are three special cases
5596 // to consider (i.e. notional operands not separated by commas). Two are
5597 // due to memory specifiers:
5598 // + An RBrac will end an address for load/store/prefetch
5599 // + An '!' will indicate a pre-indexed operation.
5600 //
5601 // And a further case is '}', which ends a group of tokens specifying the
5602 // SME accumulator array 'ZA' or tile vector, i.e.
5603 //
5604 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5605 //
5606 // It's someone else's responsibility to make sure these tokens are sane
5607 // in the given context!
5608
5609 if (parseOptionalToken(AsmToken::RBrac))
5610 Operands.push_back(
5611 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5612 if (parseOptionalToken(AsmToken::Exclaim))
5613 Operands.push_back(
5614 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5615 if (parseOptionalToken(AsmToken::RCurly))
5616 Operands.push_back(
5617 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5618
5619 ++N;
5620 } while (parseOptionalToken(AsmToken::Comma));
5621 }
5622
5623 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5624 return true;
5625
5626 return false;
5627}
5628
5629static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5630 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5631 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5632 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5633 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5634 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5635 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5636 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5637}
5638
5639// FIXME: This entire function is a giant hack to provide us with decent
5640// operand range validation/diagnostics until TableGen/MC can be extended
5641// to support autogeneration of this kind of validation.
5642bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5643 SmallVectorImpl<SMLoc> &Loc) {
5644 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5645 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5646
5647 // A prefix only applies to the instruction following it. Here we extract
5648 // prefix information for the next instruction before validating the current
5649 // one so that in the case of failure we don't erroneously continue using the
5650 // current prefix.
5651 PrefixInfo Prefix = NextPrefix;
5652 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5653
5654 // Before validating the instruction in isolation we run through the rules
5655 // applicable when it follows a prefix instruction.
5656 // NOTE: brk & hlt can be prefixed but require no additional validation.
5657 if (Prefix.isActive() &&
5658 (Inst.getOpcode() != AArch64::BRK) &&
5659 (Inst.getOpcode() != AArch64::HLT)) {
5660
5661 // Prefixed instructions must have a destructive operand.
5664 return Error(IDLoc, "instruction is unpredictable when following a"
5665 " movprfx, suggest replacing movprfx with mov");
5666
5667 // Destination operands must match.
5668 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5669 return Error(Loc[0], "instruction is unpredictable when following a"
5670 " movprfx writing to a different destination");
5671
5672 // Destination operand must not be used in any other location.
5673 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5674 if (Inst.getOperand(i).isReg() &&
5675 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5676 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5677 return Error(Loc[0], "instruction is unpredictable when following a"
5678 " movprfx and destination also used as non-destructive"
5679 " source");
5680 }
5681
5682 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5683 if (Prefix.isPredicated()) {
5684 int PgIdx = -1;
5685
5686 // Find the instructions general predicate.
5687 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5688 if (Inst.getOperand(i).isReg() &&
5689 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5690 PgIdx = i;
5691 break;
5692 }
5693
5694 // Instruction must be predicated if the movprfx is predicated.
5695 if (PgIdx == -1 ||
5697 return Error(IDLoc, "instruction is unpredictable when following a"
5698 " predicated movprfx, suggest using unpredicated movprfx");
5699
5700 // Instruction must use same general predicate as the movprfx.
5701 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5702 return Error(IDLoc, "instruction is unpredictable when following a"
5703 " predicated movprfx using a different general predicate");
5704
5705 // Instruction element type must match the movprfx.
5706 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5707 return Error(IDLoc, "instruction is unpredictable when following a"
5708 " predicated movprfx with a different element size");
5709 }
5710 }
5711
5712 // On ARM64EC, only valid registers may be used. Warn against using
5713 // explicitly disallowed registers.
5714 if (IsWindowsArm64EC) {
5715 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5716 if (Inst.getOperand(i).isReg()) {
5717 MCRegister Reg = Inst.getOperand(i).getReg();
5718 // At this point, vector registers are matched to their
5719 // appropriately sized alias.
5720 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5721 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5722 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5723 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5724 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5725 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5726 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5727 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5728 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5729 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5730 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5731 " is disallowed on ARM64EC.");
5732 }
5733 }
5734 }
5735 }
5736
5737 // Check for indexed addressing modes w/ the base register being the
5738 // same as a destination/source register or pair load where
5739 // the Rt == Rt2. All of those are undefined behaviour.
5740 switch (Inst.getOpcode()) {
5741 case AArch64::LDPSWpre:
5742 case AArch64::LDPWpost:
5743 case AArch64::LDPWpre:
5744 case AArch64::LDPXpost:
5745 case AArch64::LDPXpre: {
5746 MCRegister Rt = Inst.getOperand(1).getReg();
5747 MCRegister Rt2 = Inst.getOperand(2).getReg();
5748 MCRegister Rn = Inst.getOperand(3).getReg();
5749 if (RI->isSubRegisterEq(Rn, Rt))
5750 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5751 "is also a destination");
5752 if (RI->isSubRegisterEq(Rn, Rt2))
5753 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5754 "is also a destination");
5755 [[fallthrough]];
5756 }
5757 case AArch64::LDR_ZA:
5758 case AArch64::STR_ZA: {
5759 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5760 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5761 return Error(Loc[1],
5762 "unpredictable instruction, immediate and offset mismatch.");
5763 break;
5764 }
5765 case AArch64::LDPDi:
5766 case AArch64::LDPQi:
5767 case AArch64::LDPSi:
5768 case AArch64::LDPSWi:
5769 case AArch64::LDPWi:
5770 case AArch64::LDPXi: {
5771 MCRegister Rt = Inst.getOperand(0).getReg();
5772 MCRegister Rt2 = Inst.getOperand(1).getReg();
5773 if (Rt == Rt2)
5774 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5775 break;
5776 }
5777 case AArch64::LDPDpost:
5778 case AArch64::LDPDpre:
5779 case AArch64::LDPQpost:
5780 case AArch64::LDPQpre:
5781 case AArch64::LDPSpost:
5782 case AArch64::LDPSpre:
5783 case AArch64::LDPSWpost: {
5784 MCRegister Rt = Inst.getOperand(1).getReg();
5785 MCRegister Rt2 = Inst.getOperand(2).getReg();
5786 if (Rt == Rt2)
5787 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5788 break;
5789 }
5790 case AArch64::STPDpost:
5791 case AArch64::STPDpre:
5792 case AArch64::STPQpost:
5793 case AArch64::STPQpre:
5794 case AArch64::STPSpost:
5795 case AArch64::STPSpre:
5796 case AArch64::STPWpost:
5797 case AArch64::STPWpre:
5798 case AArch64::STPXpost:
5799 case AArch64::STPXpre: {
5800 MCRegister Rt = Inst.getOperand(1).getReg();
5801 MCRegister Rt2 = Inst.getOperand(2).getReg();
5802 MCRegister Rn = Inst.getOperand(3).getReg();
5803 if (RI->isSubRegisterEq(Rn, Rt))
5804 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5805 "is also a source");
5806 if (RI->isSubRegisterEq(Rn, Rt2))
5807 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5808 "is also a source");
5809 break;
5810 }
5811 case AArch64::LDRBBpre:
5812 case AArch64::LDRBpre:
5813 case AArch64::LDRHHpre:
5814 case AArch64::LDRHpre:
5815 case AArch64::LDRSBWpre:
5816 case AArch64::LDRSBXpre:
5817 case AArch64::LDRSHWpre:
5818 case AArch64::LDRSHXpre:
5819 case AArch64::LDRSWpre:
5820 case AArch64::LDRWpre:
5821 case AArch64::LDRXpre:
5822 case AArch64::LDRBBpost:
5823 case AArch64::LDRBpost:
5824 case AArch64::LDRHHpost:
5825 case AArch64::LDRHpost:
5826 case AArch64::LDRSBWpost:
5827 case AArch64::LDRSBXpost:
5828 case AArch64::LDRSHWpost:
5829 case AArch64::LDRSHXpost:
5830 case AArch64::LDRSWpost:
5831 case AArch64::LDRWpost:
5832 case AArch64::LDRXpost: {
5833 MCRegister Rt = Inst.getOperand(1).getReg();
5834 MCRegister Rn = Inst.getOperand(2).getReg();
5835 if (RI->isSubRegisterEq(Rn, Rt))
5836 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5837 "is also a source");
5838 break;
5839 }
5840 case AArch64::STRBBpost:
5841 case AArch64::STRBpost:
5842 case AArch64::STRHHpost:
5843 case AArch64::STRHpost:
5844 case AArch64::STRWpost:
5845 case AArch64::STRXpost:
5846 case AArch64::STRBBpre:
5847 case AArch64::STRBpre:
5848 case AArch64::STRHHpre:
5849 case AArch64::STRHpre:
5850 case AArch64::STRWpre:
5851 case AArch64::STRXpre: {
5852 MCRegister Rt = Inst.getOperand(1).getReg();
5853 MCRegister Rn = Inst.getOperand(2).getReg();
5854 if (RI->isSubRegisterEq(Rn, Rt))
5855 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5856 "is also a source");
5857 break;
5858 }
5859 case AArch64::STXRB:
5860 case AArch64::STXRH:
5861 case AArch64::STXRW:
5862 case AArch64::STXRX:
5863 case AArch64::STLXRB:
5864 case AArch64::STLXRH:
5865 case AArch64::STLXRW:
5866 case AArch64::STLXRX: {
5867 MCRegister Rs = Inst.getOperand(0).getReg();
5868 MCRegister Rt = Inst.getOperand(1).getReg();
5869 MCRegister Rn = Inst.getOperand(2).getReg();
5870 if (RI->isSubRegisterEq(Rt, Rs) ||
5871 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5872 return Error(Loc[0],
5873 "unpredictable STXR instruction, status is also a source");
5874 break;
5875 }
5876 case AArch64::STXPW:
5877 case AArch64::STXPX:
5878 case AArch64::STLXPW:
5879 case AArch64::STLXPX: {
5880 MCRegister Rs = Inst.getOperand(0).getReg();
5881 MCRegister Rt1 = Inst.getOperand(1).getReg();
5882 MCRegister Rt2 = Inst.getOperand(2).getReg();
5883 MCRegister Rn = Inst.getOperand(3).getReg();
5884 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5885 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5886 return Error(Loc[0],
5887 "unpredictable STXP instruction, status is also a source");
5888 break;
5889 }
5890 case AArch64::LDRABwriteback:
5891 case AArch64::LDRAAwriteback: {
5892 MCRegister Xt = Inst.getOperand(0).getReg();
5893 MCRegister Xn = Inst.getOperand(1).getReg();
5894 if (Xt == Xn)
5895 return Error(Loc[0],
5896 "unpredictable LDRA instruction, writeback base"
5897 " is also a destination");
5898 break;
5899 }
5900 }
5901
5902 // Check v8.8-A memops instructions.
5903 switch (Inst.getOpcode()) {
5904 case AArch64::CPYFP:
5905 case AArch64::CPYFPWN:
5906 case AArch64::CPYFPRN:
5907 case AArch64::CPYFPN:
5908 case AArch64::CPYFPWT:
5909 case AArch64::CPYFPWTWN:
5910 case AArch64::CPYFPWTRN:
5911 case AArch64::CPYFPWTN:
5912 case AArch64::CPYFPRT:
5913 case AArch64::CPYFPRTWN:
5914 case AArch64::CPYFPRTRN:
5915 case AArch64::CPYFPRTN:
5916 case AArch64::CPYFPT:
5917 case AArch64::CPYFPTWN:
5918 case AArch64::CPYFPTRN:
5919 case AArch64::CPYFPTN:
5920 case AArch64::CPYFM:
5921 case AArch64::CPYFMWN:
5922 case AArch64::CPYFMRN:
5923 case AArch64::CPYFMN:
5924 case AArch64::CPYFMWT:
5925 case AArch64::CPYFMWTWN:
5926 case AArch64::CPYFMWTRN:
5927 case AArch64::CPYFMWTN:
5928 case AArch64::CPYFMRT:
5929 case AArch64::CPYFMRTWN:
5930 case AArch64::CPYFMRTRN:
5931 case AArch64::CPYFMRTN:
5932 case AArch64::CPYFMT:
5933 case AArch64::CPYFMTWN:
5934 case AArch64::CPYFMTRN:
5935 case AArch64::CPYFMTN:
5936 case AArch64::CPYFE:
5937 case AArch64::CPYFEWN:
5938 case AArch64::CPYFERN:
5939 case AArch64::CPYFEN:
5940 case AArch64::CPYFEWT:
5941 case AArch64::CPYFEWTWN:
5942 case AArch64::CPYFEWTRN:
5943 case AArch64::CPYFEWTN:
5944 case AArch64::CPYFERT:
5945 case AArch64::CPYFERTWN:
5946 case AArch64::CPYFERTRN:
5947 case AArch64::CPYFERTN:
5948 case AArch64::CPYFET:
5949 case AArch64::CPYFETWN:
5950 case AArch64::CPYFETRN:
5951 case AArch64::CPYFETN:
5952 case AArch64::CPYP:
5953 case AArch64::CPYPWN:
5954 case AArch64::CPYPRN:
5955 case AArch64::CPYPN:
5956 case AArch64::CPYPWT:
5957 case AArch64::CPYPWTWN:
5958 case AArch64::CPYPWTRN:
5959 case AArch64::CPYPWTN:
5960 case AArch64::CPYPRT:
5961 case AArch64::CPYPRTWN:
5962 case AArch64::CPYPRTRN:
5963 case AArch64::CPYPRTN:
5964 case AArch64::CPYPT:
5965 case AArch64::CPYPTWN:
5966 case AArch64::CPYPTRN:
5967 case AArch64::CPYPTN:
5968 case AArch64::CPYM:
5969 case AArch64::CPYMWN:
5970 case AArch64::CPYMRN:
5971 case AArch64::CPYMN:
5972 case AArch64::CPYMWT:
5973 case AArch64::CPYMWTWN:
5974 case AArch64::CPYMWTRN:
5975 case AArch64::CPYMWTN:
5976 case AArch64::CPYMRT:
5977 case AArch64::CPYMRTWN:
5978 case AArch64::CPYMRTRN:
5979 case AArch64::CPYMRTN:
5980 case AArch64::CPYMT:
5981 case AArch64::CPYMTWN:
5982 case AArch64::CPYMTRN:
5983 case AArch64::CPYMTN:
5984 case AArch64::CPYE:
5985 case AArch64::CPYEWN:
5986 case AArch64::CPYERN:
5987 case AArch64::CPYEN:
5988 case AArch64::CPYEWT:
5989 case AArch64::CPYEWTWN:
5990 case AArch64::CPYEWTRN:
5991 case AArch64::CPYEWTN:
5992 case AArch64::CPYERT:
5993 case AArch64::CPYERTWN:
5994 case AArch64::CPYERTRN:
5995 case AArch64::CPYERTN:
5996 case AArch64::CPYET:
5997 case AArch64::CPYETWN:
5998 case AArch64::CPYETRN:
5999 case AArch64::CPYETN: {
6000 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
6001 MCRegister Xd = Inst.getOperand(3).getReg();
6002 MCRegister Xs = Inst.getOperand(4).getReg();
6003 MCRegister Xn = Inst.getOperand(5).getReg();
6004
6005 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6006 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6007 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6008
6009 if (Xd == Xs)
6010 return Error(Loc[0], "invalid CPY instruction, destination and source"
6011 " registers are the same");
6012 if (Xd == Xn)
6013 return Error(Loc[0], "invalid CPY instruction, destination and size"
6014 " registers are the same");
6015 if (Xs == Xn)
6016 return Error(Loc[0], "invalid CPY instruction, source and size"
6017 " registers are the same");
6018 break;
6019 }
6020 case AArch64::SETP:
6021 case AArch64::SETPT:
6022 case AArch64::SETPN:
6023 case AArch64::SETPTN:
6024 case AArch64::SETM:
6025 case AArch64::SETMT:
6026 case AArch64::SETMN:
6027 case AArch64::SETMTN:
6028 case AArch64::SETE:
6029 case AArch64::SETET:
6030 case AArch64::SETEN:
6031 case AArch64::SETETN:
6032 case AArch64::SETGP:
6033 case AArch64::SETGPT:
6034 case AArch64::SETGPN:
6035 case AArch64::SETGPTN:
6036 case AArch64::SETGM:
6037 case AArch64::SETGMT:
6038 case AArch64::SETGMN:
6039 case AArch64::SETGMTN:
6040 case AArch64::MOPSSETGE:
6041 case AArch64::MOPSSETGET:
6042 case AArch64::MOPSSETGEN:
6043 case AArch64::MOPSSETGETN: {
6044 // Xd_wb == op0, Xn_wb == op1
6045 MCRegister Xd = Inst.getOperand(2).getReg();
6046 MCRegister Xn = Inst.getOperand(3).getReg();
6047 MCRegister Xm = Inst.getOperand(4).getReg();
6048
6049 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6050 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6051
6052 if (Xd == Xn)
6053 return Error(Loc[0], "invalid SET instruction, destination and size"
6054 " registers are the same");
6055 if (Xd == Xm)
6056 return Error(Loc[0], "invalid SET instruction, destination and source"
6057 " registers are the same");
6058 if (Xn == Xm)
6059 return Error(Loc[0], "invalid SET instruction, source and size"
6060 " registers are the same");
6061 break;
6062 }
6063 case AArch64::SETGOP:
6064 case AArch64::SETGOPT:
6065 case AArch64::SETGOPN:
6066 case AArch64::SETGOPTN:
6067 case AArch64::SETGOM:
6068 case AArch64::SETGOMT:
6069 case AArch64::SETGOMN:
6070 case AArch64::SETGOMTN:
6071 case AArch64::SETGOE:
6072 case AArch64::SETGOET:
6073 case AArch64::SETGOEN:
6074 case AArch64::SETGOETN: {
6075 // Xd_wb == op0, Xn_wb == op1
6076 MCRegister Xd = Inst.getOperand(2).getReg();
6077 MCRegister Xn = Inst.getOperand(3).getReg();
6078
6079 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6080 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6081
6082 if (Xd == Xn)
6083 return Error(Loc[0], "invalid SET instruction, destination and size"
6084 " registers are the same");
6085 break;
6086 }
6087 }
6088
6089 // Now check immediate ranges. Separate from the above as there is overlap
6090 // in the instructions being checked and this keeps the nested conditionals
6091 // to a minimum.
6092 switch (Inst.getOpcode()) {
6093 case AArch64::ADDSWri:
6094 case AArch64::ADDSXri:
6095 case AArch64::ADDWri:
6096 case AArch64::ADDXri:
6097 case AArch64::SUBSWri:
6098 case AArch64::SUBSXri:
6099 case AArch64::SUBWri:
6100 case AArch64::SUBXri: {
6101 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6102 // some slight duplication here.
6103 if (Inst.getOperand(2).isExpr()) {
6104 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6105 AArch64::Specifier ELFSpec;
6106 AArch64::Specifier DarwinSpec;
6107 int64_t Addend;
6108 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6109
6110 // Only allow these with ADDXri.
6111 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6112 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6113 Inst.getOpcode() == AArch64::ADDXri)
6114 return false;
6115
6116 // Only allow these with ADDXri/ADDWri
6124 ELFSpec) &&
6125 (Inst.getOpcode() == AArch64::ADDXri ||
6126 Inst.getOpcode() == AArch64::ADDWri))
6127 return false;
6128
6129 // Don't allow symbol refs in the immediate field otherwise
6130 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6131 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6132 // 'cmp w0, 'borked')
6133 return Error(Loc.back(), "invalid immediate expression");
6134 }
6135 // We don't validate more complex expressions here
6136 }
6137 return false;
6138 }
6139 default:
6140 return false;
6141 }
6142}
6143
6145 const FeatureBitset &FBS,
6146 unsigned VariantID = 0);
6147
6148bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6150 OperandVector &Operands) {
6151 switch (ErrCode) {
6152 case Match_InvalidTiedOperand: {
6153 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6154 if (Op.isVectorList())
6155 return Error(Loc, "operand must match destination register list");
6156
6157 assert(Op.isReg() && "Unexpected operand type");
6158 switch (Op.getRegEqualityTy()) {
6159 case RegConstraintEqualityTy::EqualsSubReg:
6160 return Error(Loc, "operand must be 64-bit form of destination register");
6161 case RegConstraintEqualityTy::EqualsSuperReg:
6162 return Error(Loc, "operand must be 32-bit form of destination register");
6163 case RegConstraintEqualityTy::EqualsReg:
6164 return Error(Loc, "operand must match destination register");
6165 }
6166 llvm_unreachable("Unknown RegConstraintEqualityTy");
6167 }
6168 case Match_MissingFeature:
6169 return Error(Loc,
6170 "instruction requires a CPU feature not currently enabled");
6171 case Match_InvalidOperand:
6172 return Error(Loc, "invalid operand for instruction");
6173 case Match_InvalidSuffix:
6174 return Error(Loc, "invalid type suffix for instruction");
6175 case Match_InvalidCondCode:
6176 return Error(Loc, "expected AArch64 condition code");
6177 case Match_AddSubRegExtendSmall:
6178 return Error(Loc,
6179 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6180 case Match_AddSubRegExtendLarge:
6181 return Error(Loc,
6182 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6183 case Match_AddSubSecondSource:
6184 return Error(Loc,
6185 "expected compatible register, symbol or integer in range [0, 4095]");
6186 case Match_LogicalSecondSource:
6187 return Error(Loc, "expected compatible register or logical immediate");
6188 case Match_InvalidMovImm32Shift:
6189 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6190 case Match_InvalidMovImm64Shift:
6191 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6192 case Match_AddSubRegShift32:
6193 return Error(Loc,
6194 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6195 case Match_AddSubRegShift64:
6196 return Error(Loc,
6197 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6198 case Match_InvalidFPImm:
6199 return Error(Loc,
6200 "expected compatible register or floating-point constant");
6201 case Match_InvalidMemoryIndexedSImm6:
6202 return Error(Loc, "index must be an integer in range [-32, 31].");
6203 case Match_InvalidMemoryIndexedSImm5:
6204 return Error(Loc, "index must be an integer in range [-16, 15].");
6205 case Match_InvalidMemoryIndexed1SImm4:
6206 return Error(Loc, "index must be an integer in range [-8, 7].");
6207 case Match_InvalidMemoryIndexed2SImm4:
6208 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6209 case Match_InvalidMemoryIndexed3SImm4:
6210 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6211 case Match_InvalidMemoryIndexed4SImm4:
6212 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6213 case Match_InvalidMemoryIndexed16SImm4:
6214 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6215 case Match_InvalidMemoryIndexed32SImm4:
6216 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6217 case Match_InvalidMemoryIndexed1SImm6:
6218 return Error(Loc, "index must be an integer in range [-32, 31].");
6219 case Match_InvalidMemoryIndexedSImm8:
6220 return Error(Loc, "index must be an integer in range [-128, 127].");
6221 case Match_InvalidMemoryIndexedSImm9:
6222 return Error(Loc, "index must be an integer in range [-256, 255].");
6223 case Match_InvalidMemoryIndexed16SImm9:
6224 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6225 case Match_InvalidMemoryIndexed8SImm10:
6226 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6227 case Match_InvalidMemoryIndexed4SImm7:
6228 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6229 case Match_InvalidMemoryIndexed8SImm7:
6230 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6231 case Match_InvalidMemoryIndexed16SImm7:
6232 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6233 case Match_InvalidMemoryIndexed8UImm5:
6234 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6235 case Match_InvalidMemoryIndexed8UImm3:
6236 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6237 case Match_InvalidMemoryIndexed4UImm5:
6238 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6239 case Match_InvalidMemoryIndexed2UImm5:
6240 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6241 case Match_InvalidMemoryIndexed8UImm6:
6242 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6243 case Match_InvalidMemoryIndexed16UImm6:
6244 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6245 case Match_InvalidMemoryIndexed4UImm6:
6246 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6247 case Match_InvalidMemoryIndexed2UImm6:
6248 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6249 case Match_InvalidMemoryIndexed1UImm6:
6250 return Error(Loc, "index must be in range [0, 63].");
6251 case Match_InvalidMemoryWExtend8:
6252 return Error(Loc,
6253 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6254 case Match_InvalidMemoryWExtend16:
6255 return Error(Loc,
6256 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6257 case Match_InvalidMemoryWExtend32:
6258 return Error(Loc,
6259 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6260 case Match_InvalidMemoryWExtend64:
6261 return Error(Loc,
6262 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6263 case Match_InvalidMemoryWExtend128:
6264 return Error(Loc,
6265 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6266 case Match_InvalidMemoryXExtend8:
6267 return Error(Loc,
6268 "expected 'lsl' or 'sxtx' with optional shift of #0");
6269 case Match_InvalidMemoryXExtend16:
6270 return Error(Loc,
6271 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6272 case Match_InvalidMemoryXExtend32:
6273 return Error(Loc,
6274 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6275 case Match_InvalidMemoryXExtend64:
6276 return Error(Loc,
6277 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6278 case Match_InvalidMemoryXExtend128:
6279 return Error(Loc,
6280 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6281 case Match_InvalidMemoryIndexed1:
6282 return Error(Loc, "index must be an integer in range [0, 4095].");
6283 case Match_InvalidMemoryIndexed2:
6284 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6285 case Match_InvalidMemoryIndexed4:
6286 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6287 case Match_InvalidMemoryIndexed8:
6288 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6289 case Match_InvalidMemoryIndexed16:
6290 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6291 case Match_InvalidImm0_0:
6292 return Error(Loc, "immediate must be 0.");
6293 case Match_InvalidImm0_1:
6294 return Error(Loc, "immediate must be an integer in range [0, 1].");
6295 case Match_InvalidImm0_3:
6296 return Error(Loc, "immediate must be an integer in range [0, 3].");
6297 case Match_InvalidImm0_7:
6298 return Error(Loc, "immediate must be an integer in range [0, 7].");
6299 case Match_InvalidImm0_15:
6300 return Error(Loc, "immediate must be an integer in range [0, 15].");
6301 case Match_InvalidImm0_31:
6302 return Error(Loc, "immediate must be an integer in range [0, 31].");
6303 case Match_InvalidImm0_63:
6304 return Error(Loc, "immediate must be an integer in range [0, 63].");
6305 case Match_InvalidImm0_127:
6306 return Error(Loc, "immediate must be an integer in range [0, 127].");
6307 case Match_InvalidImm0_255:
6308 return Error(Loc, "immediate must be an integer in range [0, 255].");
6309 case Match_InvalidImm0_65535:
6310 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6311 case Match_InvalidImm1_8:
6312 return Error(Loc, "immediate must be an integer in range [1, 8].");
6313 case Match_InvalidImm1_16:
6314 return Error(Loc, "immediate must be an integer in range [1, 16].");
6315 case Match_InvalidImm1_32:
6316 return Error(Loc, "immediate must be an integer in range [1, 32].");
6317 case Match_InvalidImm1_64:
6318 return Error(Loc, "immediate must be an integer in range [1, 64].");
6319 case Match_InvalidImmM1_62:
6320 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6321 case Match_InvalidMemoryIndexedRange2UImm0:
6322 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6323 case Match_InvalidMemoryIndexedRange2UImm1:
6324 return Error(Loc, "vector select offset must be an immediate range of the "
6325 "form <immf>:<imml>, where the first "
6326 "immediate is a multiple of 2 in the range [0, 2], and "
6327 "the second immediate is immf + 1.");
6328 case Match_InvalidMemoryIndexedRange2UImm2:
6329 case Match_InvalidMemoryIndexedRange2UImm3:
6330 return Error(
6331 Loc,
6332 "vector select offset must be an immediate range of the form "
6333 "<immf>:<imml>, "
6334 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6335 "[0, 14] "
6336 "depending on the instruction, and the second immediate is immf + 1.");
6337 case Match_InvalidMemoryIndexedRange4UImm0:
6338 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6339 case Match_InvalidMemoryIndexedRange4UImm1:
6340 case Match_InvalidMemoryIndexedRange4UImm2:
6341 return Error(
6342 Loc,
6343 "vector select offset must be an immediate range of the form "
6344 "<immf>:<imml>, "
6345 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6346 "[0, 12] "
6347 "depending on the instruction, and the second immediate is immf + 3.");
6348 case Match_InvalidSVEAddSubImm8:
6349 return Error(Loc, "immediate must be an integer in range [0, 255]"
6350 " with a shift amount of 0");
6351 case Match_InvalidSVEAddSubImm16:
6352 case Match_InvalidSVEAddSubImm32:
6353 case Match_InvalidSVEAddSubImm64:
6354 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6355 "multiple of 256 in range [256, 65280]");
6356 case Match_InvalidSVECpyImm8:
6357 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6358 " with a shift amount of 0");
6359 case Match_InvalidSVECpyImm16:
6360 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6361 "multiple of 256 in range [-32768, 65280]");
6362 case Match_InvalidSVECpyImm32:
6363 case Match_InvalidSVECpyImm64:
6364 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6365 "multiple of 256 in range [-32768, 32512]");
6366 case Match_InvalidIndexRange0_0:
6367 return Error(Loc, "expected lane specifier '[0]'");
6368 case Match_InvalidIndexRange1_1:
6369 return Error(Loc, "expected lane specifier '[1]'");
6370 case Match_InvalidIndexRange0_15:
6371 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6372 case Match_InvalidIndexRange0_7:
6373 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6374 case Match_InvalidIndexRange0_3:
6375 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6376 case Match_InvalidIndexRange0_1:
6377 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6378 case Match_InvalidSVEIndexRange0_63:
6379 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6380 case Match_InvalidSVEIndexRange0_31:
6381 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6382 case Match_InvalidSVEIndexRange0_15:
6383 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6384 case Match_InvalidSVEIndexRange0_7:
6385 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6386 case Match_InvalidSVEIndexRange0_3:
6387 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6388 case Match_InvalidLabel:
6389 return Error(Loc, "expected label or encodable integer pc offset");
6390 case Match_MRS:
6391 return Error(Loc, "expected readable system register");
6392 case Match_MSR:
6393 case Match_InvalidSVCR:
6394 return Error(Loc, "expected writable system register or pstate");
6395 case Match_InvalidComplexRotationEven:
6396 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6397 case Match_InvalidComplexRotationOdd:
6398 return Error(Loc, "complex rotation must be 90 or 270.");
6399 case Match_MnemonicFail: {
6400 std::string Suggestion = AArch64MnemonicSpellCheck(
6401 ((AArch64Operand &)*Operands[0]).getToken(),
6402 ComputeAvailableFeatures(STI->getFeatureBits()));
6403 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6404 }
6405 case Match_InvalidGPR64shifted8:
6406 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6407 case Match_InvalidGPR64shifted16:
6408 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6409 case Match_InvalidGPR64shifted32:
6410 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6411 case Match_InvalidGPR64shifted64:
6412 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6413 case Match_InvalidGPR64shifted128:
6414 return Error(
6415 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6416 case Match_InvalidGPR64NoXZRshifted8:
6417 return Error(Loc, "register must be x0..x30 without shift");
6418 case Match_InvalidGPR64NoXZRshifted16:
6419 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6420 case Match_InvalidGPR64NoXZRshifted32:
6421 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6422 case Match_InvalidGPR64NoXZRshifted64:
6423 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6424 case Match_InvalidGPR64NoXZRshifted128:
6425 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6426 case Match_InvalidZPR32UXTW8:
6427 case Match_InvalidZPR32SXTW8:
6428 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6429 case Match_InvalidZPR32UXTW16:
6430 case Match_InvalidZPR32SXTW16:
6431 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6432 case Match_InvalidZPR32UXTW32:
6433 case Match_InvalidZPR32SXTW32:
6434 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6435 case Match_InvalidZPR32UXTW64:
6436 case Match_InvalidZPR32SXTW64:
6437 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6438 case Match_InvalidZPR64UXTW8:
6439 case Match_InvalidZPR64SXTW8:
6440 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6441 case Match_InvalidZPR64UXTW16:
6442 case Match_InvalidZPR64SXTW16:
6443 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6444 case Match_InvalidZPR64UXTW32:
6445 case Match_InvalidZPR64SXTW32:
6446 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6447 case Match_InvalidZPR64UXTW64:
6448 case Match_InvalidZPR64SXTW64:
6449 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6450 case Match_InvalidZPR32LSL8:
6451 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6452 case Match_InvalidZPR32LSL16:
6453 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6454 case Match_InvalidZPR32LSL32:
6455 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6456 case Match_InvalidZPR32LSL64:
6457 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6458 case Match_InvalidZPR64LSL8:
6459 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6460 case Match_InvalidZPR64LSL16:
6461 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6462 case Match_InvalidZPR64LSL32:
6463 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6464 case Match_InvalidZPR64LSL64:
6465 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6466 case Match_InvalidZPR0:
6467 return Error(Loc, "expected register without element width suffix");
6468 case Match_InvalidZPR8:
6469 case Match_InvalidZPR16:
6470 case Match_InvalidZPR32:
6471 case Match_InvalidZPR64:
6472 case Match_InvalidZPR128:
6473 return Error(Loc, "invalid element width");
6474 case Match_InvalidZPR_3b8:
6475 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6476 case Match_InvalidZPR_3b16:
6477 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6478 case Match_InvalidZPR_3b32:
6479 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6480 case Match_InvalidZPR_4b8:
6481 return Error(Loc,
6482 "Invalid restricted vector register, expected z0.b..z15.b");
6483 case Match_InvalidZPR_4b16:
6484 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6485 case Match_InvalidZPR_4b32:
6486 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6487 case Match_InvalidZPR_4b64:
6488 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6489 case Match_InvalidZPRMul2_Lo8:
6490 return Error(Loc, "Invalid restricted vector register, expected even "
6491 "register in z0.b..z14.b");
6492 case Match_InvalidZPRMul2_Hi8:
6493 return Error(Loc, "Invalid restricted vector register, expected even "
6494 "register in z16.b..z30.b");
6495 case Match_InvalidZPRMul2_Lo16:
6496 return Error(Loc, "Invalid restricted vector register, expected even "
6497 "register in z0.h..z14.h");
6498 case Match_InvalidZPRMul2_Hi16:
6499 return Error(Loc, "Invalid restricted vector register, expected even "
6500 "register in z16.h..z30.h");
6501 case Match_InvalidZPRMul2_Lo32:
6502 return Error(Loc, "Invalid restricted vector register, expected even "
6503 "register in z0.s..z14.s");
6504 case Match_InvalidZPRMul2_Hi32:
6505 return Error(Loc, "Invalid restricted vector register, expected even "
6506 "register in z16.s..z30.s");
6507 case Match_InvalidZPRMul2_Lo64:
6508 return Error(Loc, "Invalid restricted vector register, expected even "
6509 "register in z0.d..z14.d");
6510 case Match_InvalidZPRMul2_Hi64:
6511 return Error(Loc, "Invalid restricted vector register, expected even "
6512 "register in z16.d..z30.d");
6513 case Match_InvalidZPR_K0:
6514 return Error(Loc, "invalid restricted vector register, expected register "
6515 "in z20..z23 or z28..z31");
6516 case Match_InvalidSVEPattern:
6517 return Error(Loc, "invalid predicate pattern");
6518 case Match_InvalidSVEPPRorPNRAnyReg:
6519 case Match_InvalidSVEPPRorPNRBReg:
6520 case Match_InvalidSVEPredicateAnyReg:
6521 case Match_InvalidSVEPredicateBReg:
6522 case Match_InvalidSVEPredicateHReg:
6523 case Match_InvalidSVEPredicateSReg:
6524 case Match_InvalidSVEPredicateDReg:
6525 return Error(Loc, "invalid predicate register.");
6526 case Match_InvalidSVEPredicate3bAnyReg:
6527 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6528 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6529 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6530 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6531 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6532 return Error(Loc, "Invalid predicate register, expected PN in range "
6533 "pn8..pn15 with element suffix.");
6534 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6535 return Error(Loc, "invalid restricted predicate-as-counter register "
6536 "expected pn8..pn15");
6537 case Match_InvalidSVEPNPredicateBReg:
6538 case Match_InvalidSVEPNPredicateHReg:
6539 case Match_InvalidSVEPNPredicateSReg:
6540 case Match_InvalidSVEPNPredicateDReg:
6541 return Error(Loc, "Invalid predicate register, expected PN in range "
6542 "pn0..pn15 with element suffix.");
6543 case Match_InvalidSVEVecLenSpecifier:
6544 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6545 case Match_InvalidSVEPredicateListMul2x8:
6546 case Match_InvalidSVEPredicateListMul2x16:
6547 case Match_InvalidSVEPredicateListMul2x32:
6548 case Match_InvalidSVEPredicateListMul2x64:
6549 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6550 "predicate registers, where the first vector is a multiple of 2 "
6551 "and with correct element type");
6552 case Match_InvalidSVEExactFPImmOperandHalfOne:
6553 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6554 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6555 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6556 case Match_InvalidSVEExactFPImmOperandZeroOne:
6557 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6558 case Match_InvalidMatrixTileVectorH8:
6559 case Match_InvalidMatrixTileVectorV8:
6560 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6561 case Match_InvalidMatrixTileVectorH16:
6562 case Match_InvalidMatrixTileVectorV16:
6563 return Error(Loc,
6564 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6565 case Match_InvalidMatrixTileVectorH32:
6566 case Match_InvalidMatrixTileVectorV32:
6567 return Error(Loc,
6568 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6569 case Match_InvalidMatrixTileVectorH64:
6570 case Match_InvalidMatrixTileVectorV64:
6571 return Error(Loc,
6572 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6573 case Match_InvalidMatrixTileVectorH128:
6574 case Match_InvalidMatrixTileVectorV128:
6575 return Error(Loc,
6576 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6577 case Match_InvalidMatrixTile16:
6578 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6579 case Match_InvalidMatrixTile32:
6580 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6581 case Match_InvalidMatrixTile64:
6582 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6583 case Match_InvalidMatrix:
6584 return Error(Loc, "invalid matrix operand, expected za");
6585 case Match_InvalidMatrix8:
6586 return Error(Loc, "invalid matrix operand, expected suffix .b");
6587 case Match_InvalidMatrix16:
6588 return Error(Loc, "invalid matrix operand, expected suffix .h");
6589 case Match_InvalidMatrix32:
6590 return Error(Loc, "invalid matrix operand, expected suffix .s");
6591 case Match_InvalidMatrix64:
6592 return Error(Loc, "invalid matrix operand, expected suffix .d");
6593 case Match_InvalidMatrixIndexGPR32_12_15:
6594 return Error(Loc, "operand must be a register in range [w12, w15]");
6595 case Match_InvalidMatrixIndexGPR32_8_11:
6596 return Error(Loc, "operand must be a register in range [w8, w11]");
6597 case Match_InvalidSVEVectorList2x8Mul2:
6598 case Match_InvalidSVEVectorList2x16Mul2:
6599 case Match_InvalidSVEVectorList2x32Mul2:
6600 case Match_InvalidSVEVectorList2x64Mul2:
6601 case Match_InvalidSVEVectorList2x128Mul2:
6602 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6603 "SVE vectors, where the first vector is a multiple of 2 "
6604 "and with matching element types");
6605 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6606 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6607 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6608 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6609 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6610 "SVE vectors in the range z0-z14, where the first vector "
6611 "is a multiple of 2 "
6612 "and with matching element types");
6613 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6614 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6615 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6616 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6617 return Error(Loc,
6618 "Invalid vector list, expected list with 2 consecutive "
6619 "SVE vectors in the range z16-z30, where the first vector "
6620 "is a multiple of 2 "
6621 "and with matching element types");
6622 case Match_InvalidSVEVectorList4x8Mul4:
6623 case Match_InvalidSVEVectorList4x16Mul4:
6624 case Match_InvalidSVEVectorList4x32Mul4:
6625 case Match_InvalidSVEVectorList4x64Mul4:
6626 case Match_InvalidSVEVectorList4x128Mul4:
6627 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6628 "SVE vectors, where the first vector is a multiple of 4 "
6629 "and with matching element types");
6630 case Match_InvalidLookupTable:
6631 return Error(Loc, "Invalid lookup table, expected zt0");
6632 case Match_InvalidSVEVectorListStrided2x8:
6633 case Match_InvalidSVEVectorListStrided2x16:
6634 case Match_InvalidSVEVectorListStrided2x32:
6635 case Match_InvalidSVEVectorListStrided2x64:
6636 return Error(
6637 Loc,
6638 "Invalid vector list, expected list with each SVE vector in the list "
6639 "8 registers apart, and the first register in the range [z0, z7] or "
6640 "[z16, z23] and with correct element type");
6641 case Match_InvalidSVEVectorListStrided4x8:
6642 case Match_InvalidSVEVectorListStrided4x16:
6643 case Match_InvalidSVEVectorListStrided4x32:
6644 case Match_InvalidSVEVectorListStrided4x64:
6645 return Error(
6646 Loc,
6647 "Invalid vector list, expected list with each SVE vector in the list "
6648 "4 registers apart, and the first register in the range [z0, z3] or "
6649 "[z16, z19] and with correct element type");
6650 case Match_AddSubLSLImm3ShiftLarge:
6651 return Error(Loc,
6652 "expected 'lsl' with optional integer in range [0, 7]");
6653 default:
6654 llvm_unreachable("unexpected error code!");
6655 }
6656}
6657
6658static const char *getSubtargetFeatureName(uint64_t Val);
6659
6660bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6661 OperandVector &Operands,
6662 MCStreamer &Out,
6664 bool MatchingInlineAsm) {
6665 assert(!Operands.empty() && "Unexpected empty operand list!");
6666 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6667 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6668
6669 StringRef Tok = Op.getToken();
6670 unsigned NumOperands = Operands.size();
6671
6672 if (NumOperands == 4 && Tok == "lsl") {
6673 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6674 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6675 if (Op2.isScalarReg() && Op3.isImm()) {
6676 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6677 if (Op3CE) {
6678 uint64_t Op3Val = Op3CE->getValue();
6679 uint64_t NewOp3Val = 0;
6680 uint64_t NewOp4Val = 0;
6681 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6682 Op2.getReg())) {
6683 NewOp3Val = (32 - Op3Val) & 0x1f;
6684 NewOp4Val = 31 - Op3Val;
6685 } else {
6686 NewOp3Val = (64 - Op3Val) & 0x3f;
6687 NewOp4Val = 63 - Op3Val;
6688 }
6689
6690 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6691 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6692
6693 Operands[0] =
6694 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6695 Operands.push_back(AArch64Operand::CreateImm(
6696 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6697 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6698 Op3.getEndLoc(), getContext());
6699 }
6700 }
6701 } else if (NumOperands == 4 && Tok == "bfc") {
6702 // FIXME: Horrible hack to handle BFC->BFM alias.
6703 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6704 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6705 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6706
6707 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6708 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6709 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6710
6711 if (LSBCE && WidthCE) {
6712 uint64_t LSB = LSBCE->getValue();
6713 uint64_t Width = WidthCE->getValue();
6714
6715 uint64_t RegWidth = 0;
6716 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6717 Op1.getReg()))
6718 RegWidth = 64;
6719 else
6720 RegWidth = 32;
6721
6722 if (LSB >= RegWidth)
6723 return Error(LSBOp.getStartLoc(),
6724 "expected integer in range [0, 31]");
6725 if (Width < 1 || Width > RegWidth)
6726 return Error(WidthOp.getStartLoc(),
6727 "expected integer in range [1, 32]");
6728
6729 uint64_t ImmR = 0;
6730 if (RegWidth == 32)
6731 ImmR = (32 - LSB) & 0x1f;
6732 else
6733 ImmR = (64 - LSB) & 0x3f;
6734
6735 uint64_t ImmS = Width - 1;
6736
6737 if (ImmR != 0 && ImmS >= ImmR)
6738 return Error(WidthOp.getStartLoc(),
6739 "requested insert overflows register");
6740
6741 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6742 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6743 Operands[0] =
6744 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6745 Operands[2] = AArch64Operand::CreateReg(
6746 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6747 SMLoc(), SMLoc(), getContext());
6748 Operands[3] = AArch64Operand::CreateImm(
6749 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6750 Operands.emplace_back(
6751 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6752 WidthOp.getEndLoc(), getContext()));
6753 }
6754 }
6755 } else if (NumOperands == 5) {
6756 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6757 // UBFIZ -> UBFM aliases.
6758 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6759 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6760 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6761 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6762
6763 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6764 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6765 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6766
6767 if (Op3CE && Op4CE) {
6768 uint64_t Op3Val = Op3CE->getValue();
6769 uint64_t Op4Val = Op4CE->getValue();
6770
6771 uint64_t RegWidth = 0;
6772 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6773 Op1.getReg()))
6774 RegWidth = 64;
6775 else
6776 RegWidth = 32;
6777
6778 if (Op3Val >= RegWidth)
6779 return Error(Op3.getStartLoc(),
6780 "expected integer in range [0, 31]");
6781 if (Op4Val < 1 || Op4Val > RegWidth)
6782 return Error(Op4.getStartLoc(),
6783 "expected integer in range [1, 32]");
6784
6785 uint64_t NewOp3Val = 0;
6786 if (RegWidth == 32)
6787 NewOp3Val = (32 - Op3Val) & 0x1f;
6788 else
6789 NewOp3Val = (64 - Op3Val) & 0x3f;
6790
6791 uint64_t NewOp4Val = Op4Val - 1;
6792
6793 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6794 return Error(Op4.getStartLoc(),
6795 "requested insert overflows register");
6796
6797 const MCExpr *NewOp3 =
6798 MCConstantExpr::create(NewOp3Val, getContext());
6799 const MCExpr *NewOp4 =
6800 MCConstantExpr::create(NewOp4Val, getContext());
6801 Operands[3] = AArch64Operand::CreateImm(
6802 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6803 Operands[4] = AArch64Operand::CreateImm(
6804 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6805 if (Tok == "bfi")
6806 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6807 getContext());
6808 else if (Tok == "sbfiz")
6809 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6810 getContext());
6811 else if (Tok == "ubfiz")
6812 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6813 getContext());
6814 else
6815 llvm_unreachable("No valid mnemonic for alias?");
6816 }
6817 }
6818
6819 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6820 // UBFX -> UBFM aliases.
6821 } else if (NumOperands == 5 &&
6822 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6823 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6824 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6825 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6826
6827 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6828 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6829 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6830
6831 if (Op3CE && Op4CE) {
6832 uint64_t Op3Val = Op3CE->getValue();
6833 uint64_t Op4Val = Op4CE->getValue();
6834
6835 uint64_t RegWidth = 0;
6836 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6837 Op1.getReg()))
6838 RegWidth = 64;
6839 else
6840 RegWidth = 32;
6841
6842 if (Op3Val >= RegWidth)
6843 return Error(Op3.getStartLoc(),
6844 "expected integer in range [0, 31]");
6845 if (Op4Val < 1 || Op4Val > RegWidth)
6846 return Error(Op4.getStartLoc(),
6847 "expected integer in range [1, 32]");
6848
6849 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6850
6851 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6852 return Error(Op4.getStartLoc(),
6853 "requested extract overflows register");
6854
6855 const MCExpr *NewOp4 =
6856 MCConstantExpr::create(NewOp4Val, getContext());
6857 Operands[4] = AArch64Operand::CreateImm(
6858 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6859 if (Tok == "bfxil")
6860 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6861 getContext());
6862 else if (Tok == "sbfx")
6863 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6864 getContext());
6865 else if (Tok == "ubfx")
6866 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6867 getContext());
6868 else
6869 llvm_unreachable("No valid mnemonic for alias?");
6870 }
6871 }
6872 }
6873 }
6874
6875 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6876 // instruction for FP registers correctly in some rare circumstances. Convert
6877 // it to a safe instruction and warn (because silently changing someone's
6878 // assembly is rude).
6879 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6880 NumOperands == 4 && Tok == "movi") {
6881 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6882 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6883 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6884 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6885 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6886 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6887 if (Suffix.lower() == ".2d" &&
6888 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6889 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6890 " correctly on this CPU, converting to equivalent movi.16b");
6891 // Switch the suffix to .16b.
6892 unsigned Idx = Op1.isToken() ? 1 : 2;
6893 Operands[Idx] =
6894 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6895 }
6896 }
6897 }
6898
6899 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6900 // InstAlias can't quite handle this since the reg classes aren't
6901 // subclasses.
6902 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6903 // The source register can be Wn here, but the matcher expects a
6904 // GPR64. Twiddle it here if necessary.
6905 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6906 if (Op.isScalarReg()) {
6907 MCRegister Reg = getXRegFromWReg(Op.getReg());
6908 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6909 Op.getStartLoc(), Op.getEndLoc(),
6910 getContext());
6911 }
6912 }
6913 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6914 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6915 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6916 if (Op.isScalarReg() &&
6917 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6918 Op.getReg())) {
6919 // The source register can be Wn here, but the matcher expects a
6920 // GPR64. Twiddle it here if necessary.
6921 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6922 if (Op.isScalarReg()) {
6923 MCRegister Reg = getXRegFromWReg(Op.getReg());
6924 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6925 Op.getStartLoc(),
6926 Op.getEndLoc(), getContext());
6927 }
6928 }
6929 }
6930 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6931 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6932 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6933 if (Op.isScalarReg() &&
6934 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6935 Op.getReg())) {
6936 // The source register can be Wn here, but the matcher expects a
6937 // GPR32. Twiddle it here if necessary.
6938 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6939 if (Op.isScalarReg()) {
6940 MCRegister Reg = getWRegFromXReg(Op.getReg());
6941 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6942 Op.getStartLoc(),
6943 Op.getEndLoc(), getContext());
6944 }
6945 }
6946 }
6947
6948 MCInst Inst;
6949 FeatureBitset MissingFeatures;
6950 // First try to match against the secondary set of tables containing the
6951 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6952 unsigned MatchResult =
6953 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6954 MatchingInlineAsm, 1);
6955
6956 // If that fails, try against the alternate table containing long-form NEON:
6957 // "fadd v0.2s, v1.2s, v2.2s"
6958 if (MatchResult != Match_Success) {
6959 // But first, save the short-form match result: we can use it in case the
6960 // long-form match also fails.
6961 auto ShortFormNEONErrorInfo = ErrorInfo;
6962 auto ShortFormNEONMatchResult = MatchResult;
6963 auto ShortFormNEONMissingFeatures = MissingFeatures;
6964
6965 MatchResult =
6966 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6967 MatchingInlineAsm, 0);
6968
6969 // Now, both matches failed, and the long-form match failed on the mnemonic
6970 // suffix token operand. The short-form match failure is probably more
6971 // relevant: use it instead.
6972 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6973 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6974 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6975 MatchResult = ShortFormNEONMatchResult;
6976 ErrorInfo = ShortFormNEONErrorInfo;
6977 MissingFeatures = ShortFormNEONMissingFeatures;
6978 }
6979 }
6980
6981 switch (MatchResult) {
6982 case Match_Success: {
6983 // Perform range checking and other semantic validations
6984 SmallVector<SMLoc, 8> OperandLocs;
6985 NumOperands = Operands.size();
6986 for (unsigned i = 1; i < NumOperands; ++i)
6987 OperandLocs.push_back(Operands[i]->getStartLoc());
6988 if (validateInstruction(Inst, IDLoc, OperandLocs))
6989 return true;
6990
6991 Inst.setLoc(IDLoc);
6992 Out.emitInstruction(Inst, getSTI());
6993 return false;
6994 }
6995 case Match_MissingFeature: {
6996 assert(MissingFeatures.any() && "Unknown missing feature!");
6997 // Special case the error message for the very common case where only
6998 // a single subtarget feature is missing (neon, e.g.).
6999 std::string Msg = "instruction requires:";
7000 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
7001 if (MissingFeatures[i]) {
7002 Msg += " ";
7003 Msg += getSubtargetFeatureName(i);
7004 }
7005 }
7006 return Error(IDLoc, Msg);
7007 }
7008 case Match_MnemonicFail:
7009 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
7010 case Match_InvalidOperand: {
7011 SMLoc ErrorLoc = IDLoc;
7012
7013 if (ErrorInfo != ~0ULL) {
7014 if (ErrorInfo >= Operands.size())
7015 return Error(IDLoc, "too few operands for instruction",
7016 SMRange(IDLoc, getTok().getLoc()));
7017
7018 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7019 if (ErrorLoc == SMLoc())
7020 ErrorLoc = IDLoc;
7021 }
7022 // If the match failed on a suffix token operand, tweak the diagnostic
7023 // accordingly.
7024 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7025 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7026 MatchResult = Match_InvalidSuffix;
7027
7028 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7029 }
7030 case Match_InvalidTiedOperand:
7031 case Match_InvalidMemoryIndexed1:
7032 case Match_InvalidMemoryIndexed2:
7033 case Match_InvalidMemoryIndexed4:
7034 case Match_InvalidMemoryIndexed8:
7035 case Match_InvalidMemoryIndexed16:
7036 case Match_InvalidCondCode:
7037 case Match_AddSubLSLImm3ShiftLarge:
7038 case Match_AddSubRegExtendSmall:
7039 case Match_AddSubRegExtendLarge:
7040 case Match_AddSubSecondSource:
7041 case Match_LogicalSecondSource:
7042 case Match_AddSubRegShift32:
7043 case Match_AddSubRegShift64:
7044 case Match_InvalidMovImm32Shift:
7045 case Match_InvalidMovImm64Shift:
7046 case Match_InvalidFPImm:
7047 case Match_InvalidMemoryWExtend8:
7048 case Match_InvalidMemoryWExtend16:
7049 case Match_InvalidMemoryWExtend32:
7050 case Match_InvalidMemoryWExtend64:
7051 case Match_InvalidMemoryWExtend128:
7052 case Match_InvalidMemoryXExtend8:
7053 case Match_InvalidMemoryXExtend16:
7054 case Match_InvalidMemoryXExtend32:
7055 case Match_InvalidMemoryXExtend64:
7056 case Match_InvalidMemoryXExtend128:
7057 case Match_InvalidMemoryIndexed1SImm4:
7058 case Match_InvalidMemoryIndexed2SImm4:
7059 case Match_InvalidMemoryIndexed3SImm4:
7060 case Match_InvalidMemoryIndexed4SImm4:
7061 case Match_InvalidMemoryIndexed1SImm6:
7062 case Match_InvalidMemoryIndexed16SImm4:
7063 case Match_InvalidMemoryIndexed32SImm4:
7064 case Match_InvalidMemoryIndexed4SImm7:
7065 case Match_InvalidMemoryIndexed8SImm7:
7066 case Match_InvalidMemoryIndexed16SImm7:
7067 case Match_InvalidMemoryIndexed8UImm5:
7068 case Match_InvalidMemoryIndexed8UImm3:
7069 case Match_InvalidMemoryIndexed4UImm5:
7070 case Match_InvalidMemoryIndexed2UImm5:
7071 case Match_InvalidMemoryIndexed1UImm6:
7072 case Match_InvalidMemoryIndexed2UImm6:
7073 case Match_InvalidMemoryIndexed4UImm6:
7074 case Match_InvalidMemoryIndexed8UImm6:
7075 case Match_InvalidMemoryIndexed16UImm6:
7076 case Match_InvalidMemoryIndexedSImm6:
7077 case Match_InvalidMemoryIndexedSImm5:
7078 case Match_InvalidMemoryIndexedSImm8:
7079 case Match_InvalidMemoryIndexedSImm9:
7080 case Match_InvalidMemoryIndexed16SImm9:
7081 case Match_InvalidMemoryIndexed8SImm10:
7082 case Match_InvalidImm0_0:
7083 case Match_InvalidImm0_1:
7084 case Match_InvalidImm0_3:
7085 case Match_InvalidImm0_7:
7086 case Match_InvalidImm0_15:
7087 case Match_InvalidImm0_31:
7088 case Match_InvalidImm0_63:
7089 case Match_InvalidImm0_127:
7090 case Match_InvalidImm0_255:
7091 case Match_InvalidImm0_65535:
7092 case Match_InvalidImm1_8:
7093 case Match_InvalidImm1_16:
7094 case Match_InvalidImm1_32:
7095 case Match_InvalidImm1_64:
7096 case Match_InvalidImmM1_62:
7097 case Match_InvalidMemoryIndexedRange2UImm0:
7098 case Match_InvalidMemoryIndexedRange2UImm1:
7099 case Match_InvalidMemoryIndexedRange2UImm2:
7100 case Match_InvalidMemoryIndexedRange2UImm3:
7101 case Match_InvalidMemoryIndexedRange4UImm0:
7102 case Match_InvalidMemoryIndexedRange4UImm1:
7103 case Match_InvalidMemoryIndexedRange4UImm2:
7104 case Match_InvalidSVEAddSubImm8:
7105 case Match_InvalidSVEAddSubImm16:
7106 case Match_InvalidSVEAddSubImm32:
7107 case Match_InvalidSVEAddSubImm64:
7108 case Match_InvalidSVECpyImm8:
7109 case Match_InvalidSVECpyImm16:
7110 case Match_InvalidSVECpyImm32:
7111 case Match_InvalidSVECpyImm64:
7112 case Match_InvalidIndexRange0_0:
7113 case Match_InvalidIndexRange1_1:
7114 case Match_InvalidIndexRange0_15:
7115 case Match_InvalidIndexRange0_7:
7116 case Match_InvalidIndexRange0_3:
7117 case Match_InvalidIndexRange0_1:
7118 case Match_InvalidSVEIndexRange0_63:
7119 case Match_InvalidSVEIndexRange0_31:
7120 case Match_InvalidSVEIndexRange0_15:
7121 case Match_InvalidSVEIndexRange0_7:
7122 case Match_InvalidSVEIndexRange0_3:
7123 case Match_InvalidLabel:
7124 case Match_InvalidComplexRotationEven:
7125 case Match_InvalidComplexRotationOdd:
7126 case Match_InvalidGPR64shifted8:
7127 case Match_InvalidGPR64shifted16:
7128 case Match_InvalidGPR64shifted32:
7129 case Match_InvalidGPR64shifted64:
7130 case Match_InvalidGPR64shifted128:
7131 case Match_InvalidGPR64NoXZRshifted8:
7132 case Match_InvalidGPR64NoXZRshifted16:
7133 case Match_InvalidGPR64NoXZRshifted32:
7134 case Match_InvalidGPR64NoXZRshifted64:
7135 case Match_InvalidGPR64NoXZRshifted128:
7136 case Match_InvalidZPR32UXTW8:
7137 case Match_InvalidZPR32UXTW16:
7138 case Match_InvalidZPR32UXTW32:
7139 case Match_InvalidZPR32UXTW64:
7140 case Match_InvalidZPR32SXTW8:
7141 case Match_InvalidZPR32SXTW16:
7142 case Match_InvalidZPR32SXTW32:
7143 case Match_InvalidZPR32SXTW64:
7144 case Match_InvalidZPR64UXTW8:
7145 case Match_InvalidZPR64SXTW8:
7146 case Match_InvalidZPR64UXTW16:
7147 case Match_InvalidZPR64SXTW16:
7148 case Match_InvalidZPR64UXTW32:
7149 case Match_InvalidZPR64SXTW32:
7150 case Match_InvalidZPR64UXTW64:
7151 case Match_InvalidZPR64SXTW64:
7152 case Match_InvalidZPR32LSL8:
7153 case Match_InvalidZPR32LSL16:
7154 case Match_InvalidZPR32LSL32:
7155 case Match_InvalidZPR32LSL64:
7156 case Match_InvalidZPR64LSL8:
7157 case Match_InvalidZPR64LSL16:
7158 case Match_InvalidZPR64LSL32:
7159 case Match_InvalidZPR64LSL64:
7160 case Match_InvalidZPR0:
7161 case Match_InvalidZPR8:
7162 case Match_InvalidZPR16:
7163 case Match_InvalidZPR32:
7164 case Match_InvalidZPR64:
7165 case Match_InvalidZPR128:
7166 case Match_InvalidZPR_3b8:
7167 case Match_InvalidZPR_3b16:
7168 case Match_InvalidZPR_3b32:
7169 case Match_InvalidZPR_4b8:
7170 case Match_InvalidZPR_4b16:
7171 case Match_InvalidZPR_4b32:
7172 case Match_InvalidZPR_4b64:
7173 case Match_InvalidSVEPPRorPNRAnyReg:
7174 case Match_InvalidSVEPPRorPNRBReg:
7175 case Match_InvalidSVEPredicateAnyReg:
7176 case Match_InvalidSVEPattern:
7177 case Match_InvalidSVEVecLenSpecifier:
7178 case Match_InvalidSVEPredicateBReg:
7179 case Match_InvalidSVEPredicateHReg:
7180 case Match_InvalidSVEPredicateSReg:
7181 case Match_InvalidSVEPredicateDReg:
7182 case Match_InvalidSVEPredicate3bAnyReg:
7183 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7184 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7186 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7187 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7188 case Match_InvalidSVEPNPredicateBReg:
7189 case Match_InvalidSVEPNPredicateHReg:
7190 case Match_InvalidSVEPNPredicateSReg:
7191 case Match_InvalidSVEPNPredicateDReg:
7192 case Match_InvalidSVEPredicateListMul2x8:
7193 case Match_InvalidSVEPredicateListMul2x16:
7194 case Match_InvalidSVEPredicateListMul2x32:
7195 case Match_InvalidSVEPredicateListMul2x64:
7196 case Match_InvalidSVEExactFPImmOperandHalfOne:
7197 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7198 case Match_InvalidSVEExactFPImmOperandZeroOne:
7199 case Match_InvalidMatrixTile16:
7200 case Match_InvalidMatrixTile32:
7201 case Match_InvalidMatrixTile64:
7202 case Match_InvalidMatrix:
7203 case Match_InvalidMatrix8:
7204 case Match_InvalidMatrix16:
7205 case Match_InvalidMatrix32:
7206 case Match_InvalidMatrix64:
7207 case Match_InvalidMatrixTileVectorH8:
7208 case Match_InvalidMatrixTileVectorH16:
7209 case Match_InvalidMatrixTileVectorH32:
7210 case Match_InvalidMatrixTileVectorH64:
7211 case Match_InvalidMatrixTileVectorH128:
7212 case Match_InvalidMatrixTileVectorV8:
7213 case Match_InvalidMatrixTileVectorV16:
7214 case Match_InvalidMatrixTileVectorV32:
7215 case Match_InvalidMatrixTileVectorV64:
7216 case Match_InvalidMatrixTileVectorV128:
7217 case Match_InvalidSVCR:
7218 case Match_InvalidMatrixIndexGPR32_12_15:
7219 case Match_InvalidMatrixIndexGPR32_8_11:
7220 case Match_InvalidLookupTable:
7221 case Match_InvalidZPRMul2_Lo8:
7222 case Match_InvalidZPRMul2_Hi8:
7223 case Match_InvalidZPRMul2_Lo16:
7224 case Match_InvalidZPRMul2_Hi16:
7225 case Match_InvalidZPRMul2_Lo32:
7226 case Match_InvalidZPRMul2_Hi32:
7227 case Match_InvalidZPRMul2_Lo64:
7228 case Match_InvalidZPRMul2_Hi64:
7229 case Match_InvalidZPR_K0:
7230 case Match_InvalidSVEVectorList2x8Mul2:
7231 case Match_InvalidSVEVectorList2x16Mul2:
7232 case Match_InvalidSVEVectorList2x32Mul2:
7233 case Match_InvalidSVEVectorList2x64Mul2:
7234 case Match_InvalidSVEVectorList2x128Mul2:
7235 case Match_InvalidSVEVectorList4x8Mul4:
7236 case Match_InvalidSVEVectorList4x16Mul4:
7237 case Match_InvalidSVEVectorList4x32Mul4:
7238 case Match_InvalidSVEVectorList4x64Mul4:
7239 case Match_InvalidSVEVectorList4x128Mul4:
7240 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7241 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7242 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7243 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7244 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7245 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7246 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7247 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7248 case Match_InvalidSVEVectorListStrided2x8:
7249 case Match_InvalidSVEVectorListStrided2x16:
7250 case Match_InvalidSVEVectorListStrided2x32:
7251 case Match_InvalidSVEVectorListStrided2x64:
7252 case Match_InvalidSVEVectorListStrided4x8:
7253 case Match_InvalidSVEVectorListStrided4x16:
7254 case Match_InvalidSVEVectorListStrided4x32:
7255 case Match_InvalidSVEVectorListStrided4x64:
7256 case Match_MSR:
7257 case Match_MRS: {
7258 if (ErrorInfo >= Operands.size())
7259 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7260 // Any time we get here, there's nothing fancy to do. Just get the
7261 // operand SMLoc and display the diagnostic.
7262 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7263 if (ErrorLoc == SMLoc())
7264 ErrorLoc = IDLoc;
7265 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7266 }
7267 }
7268
7269 llvm_unreachable("Implement any new match types added!");
7270}
7271
7272/// ParseDirective parses the arm specific directives
7273bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7274 const MCContext::Environment Format = getContext().getObjectFileType();
7275 bool IsMachO = Format == MCContext::IsMachO;
7276 bool IsCOFF = Format == MCContext::IsCOFF;
7277 bool IsELF = Format == MCContext::IsELF;
7278
7279 auto IDVal = DirectiveID.getIdentifier().lower();
7280 SMLoc Loc = DirectiveID.getLoc();
7281 if (IDVal == ".arch")
7282 parseDirectiveArch(Loc);
7283 else if (IDVal == ".cpu")
7284 parseDirectiveCPU(Loc);
7285 else if (IDVal == ".tlsdesccall")
7286 parseDirectiveTLSDescCall(Loc);
7287 else if (IDVal == ".ltorg" || IDVal == ".pool")
7288 parseDirectiveLtorg(Loc);
7289 else if (IDVal == ".unreq")
7290 parseDirectiveUnreq(Loc);
7291 else if (IDVal == ".inst")
7292 parseDirectiveInst(Loc);
7293 else if (IDVal == ".cfi_negate_ra_state")
7294 parseDirectiveCFINegateRAState();
7295 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7296 parseDirectiveCFINegateRAStateWithPC();
7297 else if (IDVal == ".cfi_b_key_frame")
7298 parseDirectiveCFIBKeyFrame();
7299 else if (IDVal == ".cfi_mte_tagged_frame")
7300 parseDirectiveCFIMTETaggedFrame();
7301 else if (IDVal == ".arch_extension")
7302 parseDirectiveArchExtension(Loc);
7303 else if (IDVal == ".variant_pcs")
7304 parseDirectiveVariantPCS(Loc);
7305 else if (IsMachO) {
7306 if (IDVal == MCLOHDirectiveName())
7307 parseDirectiveLOH(IDVal, Loc);
7308 else
7309 return true;
7310 } else if (IsCOFF) {
7311 if (IDVal == ".seh_stackalloc")
7312 parseDirectiveSEHAllocStack(Loc);
7313 else if (IDVal == ".seh_endprologue")
7314 parseDirectiveSEHPrologEnd(Loc);
7315 else if (IDVal == ".seh_save_r19r20_x")
7316 parseDirectiveSEHSaveR19R20X(Loc);
7317 else if (IDVal == ".seh_save_fplr")
7318 parseDirectiveSEHSaveFPLR(Loc);
7319 else if (IDVal == ".seh_save_fplr_x")
7320 parseDirectiveSEHSaveFPLRX(Loc);
7321 else if (IDVal == ".seh_save_reg")
7322 parseDirectiveSEHSaveReg(Loc);
7323 else if (IDVal == ".seh_save_reg_x")
7324 parseDirectiveSEHSaveRegX(Loc);
7325 else if (IDVal == ".seh_save_regp")
7326 parseDirectiveSEHSaveRegP(Loc);
7327 else if (IDVal == ".seh_save_regp_x")
7328 parseDirectiveSEHSaveRegPX(Loc);
7329 else if (IDVal == ".seh_save_lrpair")
7330 parseDirectiveSEHSaveLRPair(Loc);
7331 else if (IDVal == ".seh_save_freg")
7332 parseDirectiveSEHSaveFReg(Loc);
7333 else if (IDVal == ".seh_save_freg_x")
7334 parseDirectiveSEHSaveFRegX(Loc);
7335 else if (IDVal == ".seh_save_fregp")
7336 parseDirectiveSEHSaveFRegP(Loc);
7337 else if (IDVal == ".seh_save_fregp_x")
7338 parseDirectiveSEHSaveFRegPX(Loc);
7339 else if (IDVal == ".seh_set_fp")
7340 parseDirectiveSEHSetFP(Loc);
7341 else if (IDVal == ".seh_add_fp")
7342 parseDirectiveSEHAddFP(Loc);
7343 else if (IDVal == ".seh_nop")
7344 parseDirectiveSEHNop(Loc);
7345 else if (IDVal == ".seh_save_next")
7346 parseDirectiveSEHSaveNext(Loc);
7347 else if (IDVal == ".seh_startepilogue")
7348 parseDirectiveSEHEpilogStart(Loc);
7349 else if (IDVal == ".seh_endepilogue")
7350 parseDirectiveSEHEpilogEnd(Loc);
7351 else if (IDVal == ".seh_trap_frame")
7352 parseDirectiveSEHTrapFrame(Loc);
7353 else if (IDVal == ".seh_pushframe")
7354 parseDirectiveSEHMachineFrame(Loc);
7355 else if (IDVal == ".seh_context")
7356 parseDirectiveSEHContext(Loc);
7357 else if (IDVal == ".seh_ec_context")
7358 parseDirectiveSEHECContext(Loc);
7359 else if (IDVal == ".seh_clear_unwound_to_call")
7360 parseDirectiveSEHClearUnwoundToCall(Loc);
7361 else if (IDVal == ".seh_pac_sign_lr")
7362 parseDirectiveSEHPACSignLR(Loc);
7363 else if (IDVal == ".seh_save_any_reg")
7364 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7365 else if (IDVal == ".seh_save_any_reg_p")
7366 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7367 else if (IDVal == ".seh_save_any_reg_x")
7368 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7369 else if (IDVal == ".seh_save_any_reg_px")
7370 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7371 else if (IDVal == ".seh_allocz")
7372 parseDirectiveSEHAllocZ(Loc);
7373 else if (IDVal == ".seh_save_zreg")
7374 parseDirectiveSEHSaveZReg(Loc);
7375 else if (IDVal == ".seh_save_preg")
7376 parseDirectiveSEHSavePReg(Loc);
7377 else
7378 return true;
7379 } else if (IsELF) {
7380 if (IDVal == ".aeabi_subsection")
7381 parseDirectiveAeabiSubSectionHeader(Loc);
7382 else if (IDVal == ".aeabi_attribute")
7383 parseDirectiveAeabiAArch64Attr(Loc);
7384 else
7385 return true;
7386 } else
7387 return true;
7388 return false;
7389}
7390
7391static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7392 SmallVector<StringRef, 4> &RequestedExtensions) {
7393 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7394 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7395
7396 if (!NoCrypto && Crypto) {
7397 // Map 'generic' (and others) to sha2 and aes, because
7398 // that was the traditional meaning of crypto.
7399 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7400 ArchInfo == AArch64::ARMV8_3A) {
7401 RequestedExtensions.push_back("sha2");
7402 RequestedExtensions.push_back("aes");
7403 }
7404 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7405 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7406 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7407 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7408 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7409 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7410 RequestedExtensions.push_back("sm4");
7411 RequestedExtensions.push_back("sha3");
7412 RequestedExtensions.push_back("sha2");
7413 RequestedExtensions.push_back("aes");
7414 }
7415 } else if (NoCrypto) {
7416 // Map 'generic' (and others) to sha2 and aes, because
7417 // that was the traditional meaning of crypto.
7418 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7419 ArchInfo == AArch64::ARMV8_3A) {
7420 RequestedExtensions.push_back("nosha2");
7421 RequestedExtensions.push_back("noaes");
7422 }
7423 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7424 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7425 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7426 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7427 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7428 ArchInfo == AArch64::ARMV9_4A) {
7429 RequestedExtensions.push_back("nosm4");
7430 RequestedExtensions.push_back("nosha3");
7431 RequestedExtensions.push_back("nosha2");
7432 RequestedExtensions.push_back("noaes");
7433 }
7434 }
7435}
7436
7438 return SMLoc::getFromPointer(L.getPointer() + Offset);
7439}
7440
7441/// parseDirectiveArch
7442/// ::= .arch token
7443bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7444 SMLoc CurLoc = getLoc();
7445
7446 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7447 StringRef Arch, ExtensionString;
7448 std::tie(Arch, ExtensionString) = Name.split('+');
7449
7450 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7451 if (!ArchInfo)
7452 return Error(CurLoc, "unknown arch name");
7453
7454 if (parseToken(AsmToken::EndOfStatement))
7455 return true;
7456
7457 // Get the architecture and extension features.
7458 std::vector<StringRef> AArch64Features;
7459 AArch64Features.push_back(ArchInfo->ArchFeature);
7460 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7461
7462 MCSubtargetInfo &STI = copySTI();
7463 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7464 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7465 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7466
7467 SmallVector<StringRef, 4> RequestedExtensions;
7468 if (!ExtensionString.empty())
7469 ExtensionString.split(RequestedExtensions, '+');
7470
7471 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7472 CurLoc = incrementLoc(CurLoc, Arch.size());
7473
7474 for (auto Name : RequestedExtensions) {
7475 // Advance source location past '+'.
7476 CurLoc = incrementLoc(CurLoc, 1);
7477
7478 bool EnableFeature = !Name.consume_front_insensitive("no");
7479
7480 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7481 return Extension.Name == Name;
7482 });
7483
7484 if (It == std::end(ExtensionMap))
7485 return Error(CurLoc, "unsupported architectural extension: " + Name);
7486
7487 if (EnableFeature)
7488 STI.SetFeatureBitsTransitively(It->Features);
7489 else
7490 STI.ClearFeatureBitsTransitively(It->Features);
7491 CurLoc = incrementLoc(CurLoc, Name.size());
7492 }
7493 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7494 setAvailableFeatures(Features);
7495
7496 getTargetStreamer().emitDirectiveArch(Name);
7497 return false;
7498}
7499
7500/// parseDirectiveArchExtension
7501/// ::= .arch_extension [no]feature
7502bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7503 SMLoc ExtLoc = getLoc();
7504
7505 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7506
7507 if (parseEOL())
7508 return true;
7509
7510 bool EnableFeature = true;
7511 StringRef Name = FullName;
7512 if (Name.starts_with_insensitive("no")) {
7513 EnableFeature = false;
7514 Name = Name.substr(2);
7515 }
7516
7517 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7518 return Extension.Name == Name;
7519 });
7520
7521 if (It == std::end(ExtensionMap))
7522 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7523
7524 MCSubtargetInfo &STI = copySTI();
7525 if (EnableFeature)
7526 STI.SetFeatureBitsTransitively(It->Features);
7527 else
7528 STI.ClearFeatureBitsTransitively(It->Features);
7529 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7530 setAvailableFeatures(Features);
7531
7532 getTargetStreamer().emitDirectiveArchExtension(FullName);
7533 return false;
7534}
7535
7536/// parseDirectiveCPU
7537/// ::= .cpu id
7538bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7539 SMLoc CurLoc = getLoc();
7540
7541 StringRef CPU, ExtensionString;
7542 std::tie(CPU, ExtensionString) =
7543 getParser().parseStringToEndOfStatement().trim().split('+');
7544
7545 if (parseToken(AsmToken::EndOfStatement))
7546 return true;
7547
7548 SmallVector<StringRef, 4> RequestedExtensions;
7549 if (!ExtensionString.empty())
7550 ExtensionString.split(RequestedExtensions, '+');
7551
7552 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7553 if (!CpuArch) {
7554 Error(CurLoc, "unknown CPU name");
7555 return false;
7556 }
7557 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7558
7559 MCSubtargetInfo &STI = copySTI();
7560 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7561 CurLoc = incrementLoc(CurLoc, CPU.size());
7562
7563 for (auto Name : RequestedExtensions) {
7564 // Advance source location past '+'.
7565 CurLoc = incrementLoc(CurLoc, 1);
7566
7567 bool EnableFeature = !Name.consume_front_insensitive("no");
7568
7569 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7570 return Extension.Name == Name;
7571 });
7572
7573 if (It == std::end(ExtensionMap))
7574 return Error(CurLoc, "unsupported architectural extension: " + Name);
7575
7576 if (EnableFeature)
7577 STI.SetFeatureBitsTransitively(It->Features);
7578 else
7579 STI.ClearFeatureBitsTransitively(It->Features);
7580 CurLoc = incrementLoc(CurLoc, Name.size());
7581 }
7582 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7583 setAvailableFeatures(Features);
7584 return false;
7585}
7586
7587/// parseDirectiveInst
7588/// ::= .inst opcode [, ...]
7589bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7590 if (getLexer().is(AsmToken::EndOfStatement))
7591 return Error(Loc, "expected expression following '.inst' directive");
7592
7593 auto parseOp = [&]() -> bool {
7594 SMLoc L = getLoc();
7595 const MCExpr *Expr = nullptr;
7596 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7597 return true;
7598 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7599 if (check(!Value, L, "expected constant expression"))
7600 return true;
7601 getTargetStreamer().emitInst(Value->getValue());
7602 return false;
7603 };
7604
7605 return parseMany(parseOp);
7606}
7607
7608// parseDirectiveTLSDescCall:
7609// ::= .tlsdesccall symbol
7610bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7611 StringRef Name;
7612 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7613 parseToken(AsmToken::EndOfStatement))
7614 return true;
7615
7616 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7617 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7619
7620 MCInst Inst;
7621 Inst.setOpcode(AArch64::TLSDESCCALL);
7623
7624 getParser().getStreamer().emitInstruction(Inst, getSTI());
7625 return false;
7626}
7627
7628/// ::= .loh <lohName | lohId> label1, ..., labelN
7629/// The number of arguments depends on the loh identifier.
7630bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7632 if (getTok().isNot(AsmToken::Identifier)) {
7633 if (getTok().isNot(AsmToken::Integer))
7634 return TokError("expected an identifier or a number in directive");
7635 // We successfully get a numeric value for the identifier.
7636 // Check if it is valid.
7637 int64_t Id = getTok().getIntVal();
7638 if (Id <= -1U && !isValidMCLOHType(Id))
7639 return TokError("invalid numeric identifier in directive");
7640 Kind = (MCLOHType)Id;
7641 } else {
7642 StringRef Name = getTok().getIdentifier();
7643 // We successfully parse an identifier.
7644 // Check if it is a recognized one.
7645 int Id = MCLOHNameToId(Name);
7646
7647 if (Id == -1)
7648 return TokError("invalid identifier in directive");
7649 Kind = (MCLOHType)Id;
7650 }
7651 // Consume the identifier.
7652 Lex();
7653 // Get the number of arguments of this LOH.
7654 int NbArgs = MCLOHIdToNbArgs(Kind);
7655
7656 assert(NbArgs != -1 && "Invalid number of arguments");
7657
7659 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7660 StringRef Name;
7661 if (getParser().parseIdentifier(Name))
7662 return TokError("expected identifier in directive");
7663 Args.push_back(getContext().getOrCreateSymbol(Name));
7664
7665 if (Idx + 1 == NbArgs)
7666 break;
7667 if (parseComma())
7668 return true;
7669 }
7670 if (parseEOL())
7671 return true;
7672
7673 getStreamer().emitLOHDirective(Kind, Args);
7674 return false;
7675}
7676
7677/// parseDirectiveLtorg
7678/// ::= .ltorg | .pool
7679bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7680 if (parseEOL())
7681 return true;
7682 getTargetStreamer().emitCurrentConstantPool();
7683 return false;
7684}
7685
7686/// parseDirectiveReq
7687/// ::= name .req registername
7688bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7689 Lex(); // Eat the '.req' token.
7690 SMLoc SRegLoc = getLoc();
7691 RegKind RegisterKind = RegKind::Scalar;
7692 MCRegister RegNum;
7693 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7694
7695 if (!ParseRes.isSuccess()) {
7696 StringRef Kind;
7697 RegisterKind = RegKind::NeonVector;
7698 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7699
7700 if (ParseRes.isFailure())
7701 return true;
7702
7703 if (ParseRes.isSuccess() && !Kind.empty())
7704 return Error(SRegLoc, "vector register without type specifier expected");
7705 }
7706
7707 if (!ParseRes.isSuccess()) {
7708 StringRef Kind;
7709 RegisterKind = RegKind::SVEDataVector;
7710 ParseRes =
7711 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7712
7713 if (ParseRes.isFailure())
7714 return true;
7715
7716 if (ParseRes.isSuccess() && !Kind.empty())
7717 return Error(SRegLoc,
7718 "sve vector register without type specifier expected");
7719 }
7720
7721 if (!ParseRes.isSuccess()) {
7722 StringRef Kind;
7723 RegisterKind = RegKind::SVEPredicateVector;
7724 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7725
7726 if (ParseRes.isFailure())
7727 return true;
7728
7729 if (ParseRes.isSuccess() && !Kind.empty())
7730 return Error(SRegLoc,
7731 "sve predicate register without type specifier expected");
7732 }
7733
7734 if (!ParseRes.isSuccess())
7735 return Error(SRegLoc, "register name or alias expected");
7736
7737 // Shouldn't be anything else.
7738 if (parseEOL())
7739 return true;
7740
7741 auto pair = std::make_pair(RegisterKind, RegNum);
7742 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7743 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7744
7745 return false;
7746}
7747
7748/// parseDirectiveUneq
7749/// ::= .unreq registername
7750bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7751 if (getTok().isNot(AsmToken::Identifier))
7752 return TokError("unexpected input in .unreq directive.");
7753 RegisterReqs.erase(getTok().getIdentifier().lower());
7754 Lex(); // Eat the identifier.
7755 return parseToken(AsmToken::EndOfStatement);
7756}
7757
7758bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7759 if (parseEOL())
7760 return true;
7761 getStreamer().emitCFINegateRAState();
7762 return false;
7763}
7764
7765bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7766 if (parseEOL())
7767 return true;
7768 getStreamer().emitCFINegateRAStateWithPC();
7769 return false;
7770}
7771
7772/// parseDirectiveCFIBKeyFrame
7773/// ::= .cfi_b_key
7774bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7775 if (parseEOL())
7776 return true;
7777 getStreamer().emitCFIBKeyFrame();
7778 return false;
7779}
7780
7781/// parseDirectiveCFIMTETaggedFrame
7782/// ::= .cfi_mte_tagged_frame
7783bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7784 if (parseEOL())
7785 return true;
7786 getStreamer().emitCFIMTETaggedFrame();
7787 return false;
7788}
7789
7790/// parseDirectiveVariantPCS
7791/// ::= .variant_pcs symbolname
7792bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7793 StringRef Name;
7794 if (getParser().parseIdentifier(Name))
7795 return TokError("expected symbol name");
7796 if (parseEOL())
7797 return true;
7798 getTargetStreamer().emitDirectiveVariantPCS(
7799 getContext().getOrCreateSymbol(Name));
7800 return false;
7801}
7802
7803/// parseDirectiveSEHAllocStack
7804/// ::= .seh_stackalloc
7805bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7806 int64_t Size;
7807 if (parseImmExpr(Size))
7808 return true;
7809 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7810 return false;
7811}
7812
7813/// parseDirectiveSEHPrologEnd
7814/// ::= .seh_endprologue
7815bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7816 getTargetStreamer().emitARM64WinCFIPrologEnd();
7817 return false;
7818}
7819
7820/// parseDirectiveSEHSaveR19R20X
7821/// ::= .seh_save_r19r20_x
7822bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7823 int64_t Offset;
7824 if (parseImmExpr(Offset))
7825 return true;
7826 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7827 return false;
7828}
7829
7830/// parseDirectiveSEHSaveFPLR
7831/// ::= .seh_save_fplr
7832bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7833 int64_t Offset;
7834 if (parseImmExpr(Offset))
7835 return true;
7836 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7837 return false;
7838}
7839
7840/// parseDirectiveSEHSaveFPLRX
7841/// ::= .seh_save_fplr_x
7842bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7843 int64_t Offset;
7844 if (parseImmExpr(Offset))
7845 return true;
7846 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7847 return false;
7848}
7849
7850/// parseDirectiveSEHSaveReg
7851/// ::= .seh_save_reg
7852bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7853 unsigned Reg;
7854 int64_t Offset;
7855 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7856 parseComma() || parseImmExpr(Offset))
7857 return true;
7858 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7859 return false;
7860}
7861
7862/// parseDirectiveSEHSaveRegX
7863/// ::= .seh_save_reg_x
7864bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7865 unsigned Reg;
7866 int64_t Offset;
7867 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7868 parseComma() || parseImmExpr(Offset))
7869 return true;
7870 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7871 return false;
7872}
7873
7874/// parseDirectiveSEHSaveRegP
7875/// ::= .seh_save_regp
7876bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7877 unsigned Reg;
7878 int64_t Offset;
7879 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7880 parseComma() || parseImmExpr(Offset))
7881 return true;
7882 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7883 return false;
7884}
7885
7886/// parseDirectiveSEHSaveRegPX
7887/// ::= .seh_save_regp_x
7888bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7889 unsigned Reg;
7890 int64_t Offset;
7891 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7892 parseComma() || parseImmExpr(Offset))
7893 return true;
7894 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7895 return false;
7896}
7897
7898/// parseDirectiveSEHSaveLRPair
7899/// ::= .seh_save_lrpair
7900bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7901 unsigned Reg;
7902 int64_t Offset;
7903 L = getLoc();
7904 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7905 parseComma() || parseImmExpr(Offset))
7906 return true;
7907 if (check(((Reg - 19) % 2 != 0), L,
7908 "expected register with even offset from x19"))
7909 return true;
7910 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7911 return false;
7912}
7913
7914/// parseDirectiveSEHSaveFReg
7915/// ::= .seh_save_freg
7916bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7917 unsigned Reg;
7918 int64_t Offset;
7919 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7920 parseComma() || parseImmExpr(Offset))
7921 return true;
7922 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7923 return false;
7924}
7925
7926/// parseDirectiveSEHSaveFRegX
7927/// ::= .seh_save_freg_x
7928bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7929 unsigned Reg;
7930 int64_t Offset;
7931 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7932 parseComma() || parseImmExpr(Offset))
7933 return true;
7934 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7935 return false;
7936}
7937
7938/// parseDirectiveSEHSaveFRegP
7939/// ::= .seh_save_fregp
7940bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7941 unsigned Reg;
7942 int64_t Offset;
7943 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7944 parseComma() || parseImmExpr(Offset))
7945 return true;
7946 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7947 return false;
7948}
7949
7950/// parseDirectiveSEHSaveFRegPX
7951/// ::= .seh_save_fregp_x
7952bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7953 unsigned Reg;
7954 int64_t Offset;
7955 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7956 parseComma() || parseImmExpr(Offset))
7957 return true;
7958 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7959 return false;
7960}
7961
7962/// parseDirectiveSEHSetFP
7963/// ::= .seh_set_fp
7964bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7965 getTargetStreamer().emitARM64WinCFISetFP();
7966 return false;
7967}
7968
7969/// parseDirectiveSEHAddFP
7970/// ::= .seh_add_fp
7971bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7972 int64_t Size;
7973 if (parseImmExpr(Size))
7974 return true;
7975 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7976 return false;
7977}
7978
7979/// parseDirectiveSEHNop
7980/// ::= .seh_nop
7981bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7982 getTargetStreamer().emitARM64WinCFINop();
7983 return false;
7984}
7985
7986/// parseDirectiveSEHSaveNext
7987/// ::= .seh_save_next
7988bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7989 getTargetStreamer().emitARM64WinCFISaveNext();
7990 return false;
7991}
7992
7993/// parseDirectiveSEHEpilogStart
7994/// ::= .seh_startepilogue
7995bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7996 getTargetStreamer().emitARM64WinCFIEpilogStart();
7997 return false;
7998}
7999
8000/// parseDirectiveSEHEpilogEnd
8001/// ::= .seh_endepilogue
8002bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8003 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8004 return false;
8005}
8006
8007/// parseDirectiveSEHTrapFrame
8008/// ::= .seh_trap_frame
8009bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8010 getTargetStreamer().emitARM64WinCFITrapFrame();
8011 return false;
8012}
8013
8014/// parseDirectiveSEHMachineFrame
8015/// ::= .seh_pushframe
8016bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8017 getTargetStreamer().emitARM64WinCFIMachineFrame();
8018 return false;
8019}
8020
8021/// parseDirectiveSEHContext
8022/// ::= .seh_context
8023bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8024 getTargetStreamer().emitARM64WinCFIContext();
8025 return false;
8026}
8027
8028/// parseDirectiveSEHECContext
8029/// ::= .seh_ec_context
8030bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8031 getTargetStreamer().emitARM64WinCFIECContext();
8032 return false;
8033}
8034
8035/// parseDirectiveSEHClearUnwoundToCall
8036/// ::= .seh_clear_unwound_to_call
8037bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8038 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8039 return false;
8040}
8041
8042/// parseDirectiveSEHPACSignLR
8043/// ::= .seh_pac_sign_lr
8044bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8045 getTargetStreamer().emitARM64WinCFIPACSignLR();
8046 return false;
8047}
8048
8049/// parseDirectiveSEHSaveAnyReg
8050/// ::= .seh_save_any_reg
8051/// ::= .seh_save_any_reg_p
8052/// ::= .seh_save_any_reg_x
8053/// ::= .seh_save_any_reg_px
8054bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8055 bool Writeback) {
8056 MCRegister Reg;
8057 SMLoc Start, End;
8058 int64_t Offset;
8059 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
8060 parseComma() || parseImmExpr(Offset))
8061 return true;
8062
8063 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8064 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8065 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8066 return Error(L, "invalid save_any_reg offset");
8067 unsigned EncodedReg;
8068 if (Reg == AArch64::FP)
8069 EncodedReg = 29;
8070 else if (Reg == AArch64::LR)
8071 EncodedReg = 30;
8072 else
8073 EncodedReg = Reg - AArch64::X0;
8074 if (Paired) {
8075 if (Reg == AArch64::LR)
8076 return Error(Start, "lr cannot be paired with another register");
8077 if (Writeback)
8078 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
8079 else
8080 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
8081 } else {
8082 if (Writeback)
8083 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
8084 else
8085 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
8086 }
8087 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8088 unsigned EncodedReg = Reg - AArch64::D0;
8089 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8090 return Error(L, "invalid save_any_reg offset");
8091 if (Paired) {
8092 if (Reg == AArch64::D31)
8093 return Error(Start, "d31 cannot be paired with another register");
8094 if (Writeback)
8095 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8096 else
8097 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8098 } else {
8099 if (Writeback)
8100 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8101 else
8102 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8103 }
8104 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8105 unsigned EncodedReg = Reg - AArch64::Q0;
8106 if (Offset < 0 || Offset % 16)
8107 return Error(L, "invalid save_any_reg offset");
8108 if (Paired) {
8109 if (Reg == AArch64::Q31)
8110 return Error(Start, "q31 cannot be paired with another register");
8111 if (Writeback)
8112 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8113 else
8114 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8115 } else {
8116 if (Writeback)
8117 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8118 else
8119 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8120 }
8121 } else {
8122 return Error(Start, "save_any_reg register must be x, q or d register");
8123 }
8124 return false;
8125}
8126
8127/// parseDirectiveAllocZ
8128/// ::= .seh_allocz
8129bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8130 int64_t Offset;
8131 if (parseImmExpr(Offset))
8132 return true;
8133 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8134 return false;
8135}
8136
8137/// parseDirectiveSEHSaveZReg
8138/// ::= .seh_save_zreg
8139bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8140 MCRegister RegNum;
8141 StringRef Kind;
8142 int64_t Offset;
8143 ParseStatus Res =
8144 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8145 if (!Res.isSuccess())
8146 return true;
8147 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8148 "expected register in range z8 to z23"))
8149 return true;
8150 if (parseComma() || parseImmExpr(Offset))
8151 return true;
8152 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8153 return false;
8154}
8155
8156/// parseDirectiveSEHSavePReg
8157/// ::= .seh_save_preg
8158bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8159 MCRegister RegNum;
8160 StringRef Kind;
8161 int64_t Offset;
8162 ParseStatus Res =
8163 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8164 if (!Res.isSuccess())
8165 return true;
8166 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8167 "expected register in range p4 to p15"))
8168 return true;
8169 if (parseComma() || parseImmExpr(Offset))
8170 return true;
8171 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8172 return false;
8173}
8174
8175bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8176 // Handle parsing of .aeabi_subsection directives
8177 // - On first declaration of a subsection, expect exactly three identifiers
8178 // after `.aeabi_subsection`: the subsection name and two parameters.
8179 // - When switching to an existing subsection, it is valid to provide only
8180 // the subsection name, or the name together with the two parameters.
8181 MCAsmParser &Parser = getParser();
8182
8183 // Consume the name (subsection name)
8184 StringRef SubsectionName;
8185 AArch64BuildAttributes::VendorID SubsectionNameID;
8186 if (Parser.getTok().is(AsmToken::Identifier)) {
8187 SubsectionName = Parser.getTok().getIdentifier();
8188 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8189 } else {
8190 Error(Parser.getTok().getLoc(), "subsection name not found");
8191 return true;
8192 }
8193 Parser.Lex();
8194
8195 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8196 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8197 // Check whether only the subsection name was provided.
8198 // If so, the user is trying to switch to a subsection that should have been
8199 // declared before.
8201 if (SubsectionExists) {
8202 getTargetStreamer().emitAttributesSubsection(
8203 SubsectionName,
8205 SubsectionExists->IsOptional),
8207 SubsectionExists->ParameterType));
8208 return false;
8209 }
8210 // If subsection does not exists, report error.
8211 else {
8212 Error(Parser.getTok().getLoc(),
8213 "Could not switch to subsection '" + SubsectionName +
8214 "' using subsection name, subsection has not been defined");
8215 return true;
8216 }
8217 }
8218
8219 // Otherwise, expecting 2 more parameters: consume a comma
8220 // parseComma() return *false* on success, and call Lex(), no need to call
8221 // Lex() again.
8222 if (Parser.parseComma()) {
8223 return true;
8224 }
8225
8226 // Consume the first parameter (optionality parameter)
8228 // options: optional/required
8229 if (Parser.getTok().is(AsmToken::Identifier)) {
8230 StringRef Optionality = Parser.getTok().getIdentifier();
8231 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8233 Error(Parser.getTok().getLoc(),
8235 return true;
8236 }
8237 if (SubsectionExists) {
8238 if (IsOptional != SubsectionExists->IsOptional) {
8239 Error(Parser.getTok().getLoc(),
8240 "optionality mismatch! subsection '" + SubsectionName +
8241 "' already exists with optionality defined as '" +
8243 SubsectionExists->IsOptional) +
8244 "' and not '" +
8245 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8246 return true;
8247 }
8248 }
8249 } else {
8250 Error(Parser.getTok().getLoc(),
8251 "optionality parameter not found, expected required|optional");
8252 return true;
8253 }
8254 // Check for possible IsOptional unaccepted values for known subsections
8255 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8256 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8257 Error(Parser.getTok().getLoc(),
8258 "aeabi_feature_and_bits must be marked as optional");
8259 return true;
8260 }
8261 }
8262 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8263 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8264 Error(Parser.getTok().getLoc(),
8265 "aeabi_pauthabi must be marked as required");
8266 return true;
8267 }
8268 }
8269 Parser.Lex();
8270 // consume a comma
8271 if (Parser.parseComma()) {
8272 return true;
8273 }
8274
8275 // Consume the second parameter (type parameter)
8277 if (Parser.getTok().is(AsmToken::Identifier)) {
8278 StringRef Name = Parser.getTok().getIdentifier();
8281 Error(Parser.getTok().getLoc(),
8283 return true;
8284 }
8285 if (SubsectionExists) {
8286 if (Type != SubsectionExists->ParameterType) {
8287 Error(Parser.getTok().getLoc(),
8288 "type mismatch! subsection '" + SubsectionName +
8289 "' already exists with type defined as '" +
8291 SubsectionExists->ParameterType) +
8292 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8293 "'");
8294 return true;
8295 }
8296 }
8297 } else {
8298 Error(Parser.getTok().getLoc(),
8299 "type parameter not found, expected uleb128|ntbs");
8300 return true;
8301 }
8302 // Check for possible unaccepted 'type' values for known subsections
8303 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8304 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8306 Error(Parser.getTok().getLoc(),
8307 SubsectionName + " must be marked as ULEB128");
8308 return true;
8309 }
8310 }
8311 Parser.Lex();
8312
8313 // Parsing finished, check for trailing tokens.
8315 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8316 "attributes subsection header directive");
8317 return true;
8318 }
8319
8320 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8321
8322 return false;
8323}
8324
8325bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8326 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8327 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8328 // separated by a comma.
8329 MCAsmParser &Parser = getParser();
8330
8331 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8332 getTargetStreamer().getActiveAttributesSubsection();
8333 if (nullptr == ActiveSubsection) {
8334 Error(Parser.getTok().getLoc(),
8335 "no active subsection, build attribute can not be added");
8336 return true;
8337 }
8338 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8339 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8340
8341 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8343 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8344 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8347 ActiveSubsectionName)
8349
8350 StringRef TagStr = "";
8351 unsigned Tag;
8352 if (Parser.getTok().is(AsmToken::Integer)) {
8353 Tag = getTok().getIntVal();
8354 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8355 TagStr = Parser.getTok().getIdentifier();
8356 switch (ActiveSubsectionID) {
8358 // Tag was provided as an unrecognized string instead of an unsigned
8359 // integer
8360 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8361 "' \nExcept for public subsections, "
8362 "tags have to be an unsigned int.");
8363 return true;
8364 break;
8368 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8369 TagStr + "' for subsection '" +
8370 ActiveSubsectionName + "'");
8371 return true;
8372 }
8373 break;
8377 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8378 TagStr + "' for subsection '" +
8379 ActiveSubsectionName + "'");
8380 return true;
8381 }
8382 break;
8383 }
8384 } else {
8385 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8386 return true;
8387 }
8388 Parser.Lex();
8389 // consume a comma
8390 // parseComma() return *false* on success, and call Lex(), no need to call
8391 // Lex() again.
8392 if (Parser.parseComma()) {
8393 return true;
8394 }
8395
8396 // Consume the second parameter (attribute value)
8397 unsigned ValueInt = unsigned(-1);
8398 std::string ValueStr = "";
8399 if (Parser.getTok().is(AsmToken::Integer)) {
8400 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8401 Error(
8402 Parser.getTok().getLoc(),
8403 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8404 return true;
8405 }
8406 ValueInt = getTok().getIntVal();
8407 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8408 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8409 Error(
8410 Parser.getTok().getLoc(),
8411 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8412 return true;
8413 }
8414 ValueStr = Parser.getTok().getIdentifier();
8415 } else if (Parser.getTok().is(AsmToken::String)) {
8416 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8417 Error(
8418 Parser.getTok().getLoc(),
8419 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8420 return true;
8421 }
8422 ValueStr = Parser.getTok().getString();
8423 } else {
8424 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8425 return true;
8426 }
8427 // Check for possible unaccepted values for known tags
8428 // (AEABI_FEATURE_AND_BITS)
8429 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8430 if (0 != ValueInt && 1 != ValueInt) {
8431 Error(Parser.getTok().getLoc(),
8432 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8433 "' options are 0|1");
8434 return true;
8435 }
8436 }
8437 Parser.Lex();
8438
8439 // Parsing finished. Check for trailing tokens.
8441 Error(Parser.getTok().getLoc(),
8442 "unexpected token for AArch64 build attributes tag and value "
8443 "attribute directive");
8444 return true;
8445 }
8446
8447 if (unsigned(-1) != ValueInt) {
8448 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8449 }
8450 if ("" != ValueStr) {
8451 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8452 ValueStr);
8453 }
8454 return false;
8455}
8456
8457bool AArch64AsmParser::parseExprWithSpecifier(const MCExpr *&Res, SMLoc &E) {
8458 SMLoc Loc = getLoc();
8459 if (getLexer().getKind() != AsmToken::Identifier)
8460 return TokError("expected '%' relocation specifier");
8461 StringRef Identifier = getParser().getTok().getIdentifier();
8462 auto Spec = AArch64::parsePercentSpecifierName(Identifier);
8463 if (!Spec)
8464 return TokError("invalid relocation specifier");
8465
8466 getParser().Lex(); // Eat the identifier
8467 if (parseToken(AsmToken::LParen, "expected '('"))
8468 return true;
8469
8470 const MCExpr *SubExpr;
8471 if (getParser().parseParenExpression(SubExpr, E))
8472 return true;
8473
8474 Res = MCSpecifierExpr::create(SubExpr, Spec, getContext(), Loc);
8475 return false;
8476}
8477
8478bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8479 SMLoc EndLoc;
8480 if (parseOptionalToken(AsmToken::Percent))
8481 return parseExprWithSpecifier(Res, EndLoc);
8482
8483 if (getParser().parseExpression(Res))
8484 return true;
8485 MCAsmParser &Parser = getParser();
8486 if (!parseOptionalToken(AsmToken::At))
8487 return false;
8488 if (getLexer().getKind() != AsmToken::Identifier)
8489 return Error(getLoc(), "expected relocation specifier");
8490
8491 std::string Identifier = Parser.getTok().getIdentifier().lower();
8492 SMLoc Loc = getLoc();
8493 Lex();
8494 if (Identifier == "auth")
8495 return parseAuthExpr(Res, EndLoc);
8496
8497 auto Spec = AArch64::S_None;
8498 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8499 if (Identifier == "got")
8500 Spec = AArch64::S_MACHO_GOT;
8501 }
8502 if (Spec == AArch64::S_None)
8503 return Error(Loc, "invalid relocation specifier");
8504 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8505 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8506 SRE->getLoc());
8507 else
8508 return Error(Loc, "@ specifier only allowed after a symbol");
8509
8510 for (;;) {
8511 std::optional<MCBinaryExpr::Opcode> Opcode;
8512 if (parseOptionalToken(AsmToken::Plus))
8513 Opcode = MCBinaryExpr::Add;
8514 else if (parseOptionalToken(AsmToken::Minus))
8515 Opcode = MCBinaryExpr::Sub;
8516 else
8517 break;
8518 const MCExpr *Term;
8519 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8520 return true;
8521 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8522 }
8523 return false;
8524}
8525
8526/// parseAuthExpr
8527/// ::= _sym@AUTH(ib,123[,addr])
8528/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8529/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8530bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8531 MCAsmParser &Parser = getParser();
8532 MCContext &Ctx = getContext();
8533 AsmToken Tok = Parser.getTok();
8534
8535 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8536 if (parseToken(AsmToken::LParen, "expected '('"))
8537 return true;
8538
8539 if (Parser.getTok().isNot(AsmToken::Identifier))
8540 return TokError("expected key name");
8541
8542 StringRef KeyStr = Parser.getTok().getIdentifier();
8543 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8544 if (!KeyIDOrNone)
8545 return TokError("invalid key '" + KeyStr + "'");
8546 Parser.Lex();
8547
8548 if (parseToken(AsmToken::Comma, "expected ','"))
8549 return true;
8550
8551 if (Parser.getTok().isNot(AsmToken::Integer))
8552 return TokError("expected integer discriminator");
8553 int64_t Discriminator = Parser.getTok().getIntVal();
8554
8555 if (!isUInt<16>(Discriminator))
8556 return TokError("integer discriminator " + Twine(Discriminator) +
8557 " out of range [0, 0xFFFF]");
8558 Parser.Lex();
8559
8560 bool UseAddressDiversity = false;
8561 if (Parser.getTok().is(AsmToken::Comma)) {
8562 Parser.Lex();
8563 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8564 Parser.getTok().getIdentifier() != "addr")
8565 return TokError("expected 'addr'");
8566 UseAddressDiversity = true;
8567 Parser.Lex();
8568 }
8569
8570 EndLoc = Parser.getTok().getEndLoc();
8571 if (parseToken(AsmToken::RParen, "expected ')'"))
8572 return true;
8573
8574 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8575 UseAddressDiversity, Ctx, Res->getLoc());
8576 return false;
8577}
8578
8579bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8580 AArch64::Specifier &ELFSpec,
8581 AArch64::Specifier &DarwinSpec,
8582 int64_t &Addend) {
8583 ELFSpec = AArch64::S_INVALID;
8584 DarwinSpec = AArch64::S_None;
8585 Addend = 0;
8586
8587 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8588 ELFSpec = AE->getSpecifier();
8589 Expr = AE->getSubExpr();
8590 }
8591
8592 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8593 if (SE) {
8594 // It's a simple symbol reference with no addend.
8595 DarwinSpec = AArch64::Specifier(SE->getKind());
8596 return true;
8597 }
8598
8599 // Check that it looks like a symbol + an addend
8600 MCValue Res;
8601 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8602 if (!Relocatable || Res.getSubSym())
8603 return false;
8604
8605 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8606 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8607 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8608 return false;
8609
8610 if (Res.getAddSym())
8611 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8612 Addend = Res.getConstant();
8613
8614 // It's some symbol reference + a constant addend, but really
8615 // shouldn't use both Darwin and ELF syntax.
8616 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8617}
8618
8619/// Force static initialization.
8620extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8628
8629#define GET_REGISTER_MATCHER
8630#define GET_SUBTARGET_FEATURE_NAME
8631#define GET_MATCHER_IMPLEMENTATION
8632#define GET_MNEMONIC_SPELL_CHECKER
8633#include "AArch64GenAsmMatcher.inc"
8634
8635// Define this matcher function after the auto-generated include so we
8636// have the match class enum definitions.
8637unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8638 unsigned Kind) {
8639 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8640
8641 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8642 if (!Op.isImm())
8643 return Match_InvalidOperand;
8644 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8645 if (!CE)
8646 return Match_InvalidOperand;
8647 if (CE->getValue() == ExpectedVal)
8648 return Match_Success;
8649 return Match_InvalidOperand;
8650 };
8651
8652 switch (Kind) {
8653 default:
8654 return Match_InvalidOperand;
8655 case MCK_MPR:
8656 // If the Kind is a token for the MPR register class which has the "za"
8657 // register (SME accumulator array), check if the asm is a literal "za"
8658 // token. This is for the "smstart za" alias that defines the register
8659 // as a literal token.
8660 if (Op.isTokenEqual("za"))
8661 return Match_Success;
8662 return Match_InvalidOperand;
8663
8664 // If the kind is a token for a literal immediate, check if our asm operand
8665 // matches. This is for InstAliases which have a fixed-value immediate in
8666 // the asm string, such as hints which are parsed into a specific
8667 // instruction definition.
8668#define MATCH_HASH(N) \
8669 case MCK__HASH_##N: \
8670 return MatchesOpImmediate(N);
8671 MATCH_HASH(0)
8672 MATCH_HASH(1)
8673 MATCH_HASH(2)
8674 MATCH_HASH(3)
8675 MATCH_HASH(4)
8676 MATCH_HASH(6)
8677 MATCH_HASH(7)
8678 MATCH_HASH(8)
8679 MATCH_HASH(10)
8680 MATCH_HASH(12)
8681 MATCH_HASH(14)
8682 MATCH_HASH(16)
8683 MATCH_HASH(24)
8684 MATCH_HASH(25)
8685 MATCH_HASH(26)
8686 MATCH_HASH(27)
8687 MATCH_HASH(28)
8688 MATCH_HASH(29)
8689 MATCH_HASH(30)
8690 MATCH_HASH(31)
8691 MATCH_HASH(32)
8692 MATCH_HASH(40)
8693 MATCH_HASH(48)
8694 MATCH_HASH(64)
8695#undef MATCH_HASH
8696#define MATCH_HASH_MINUS(N) \
8697 case MCK__HASH__MINUS_##N: \
8698 return MatchesOpImmediate(-N);
8702#undef MATCH_HASH_MINUS
8703 }
8704}
8705
8706ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8707
8708 SMLoc S = getLoc();
8709
8710 if (getTok().isNot(AsmToken::Identifier))
8711 return Error(S, "expected register");
8712
8713 MCRegister FirstReg;
8714 ParseStatus Res = tryParseScalarRegister(FirstReg);
8715 if (!Res.isSuccess())
8716 return Error(S, "expected first even register of a consecutive same-size "
8717 "even/odd register pair");
8718
8719 const MCRegisterClass &WRegClass =
8720 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8721 const MCRegisterClass &XRegClass =
8722 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8723
8724 bool isXReg = XRegClass.contains(FirstReg),
8725 isWReg = WRegClass.contains(FirstReg);
8726 if (!isXReg && !isWReg)
8727 return Error(S, "expected first even register of a consecutive same-size "
8728 "even/odd register pair");
8729
8730 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8731 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8732
8733 if (FirstEncoding & 0x1)
8734 return Error(S, "expected first even register of a consecutive same-size "
8735 "even/odd register pair");
8736
8737 if (getTok().isNot(AsmToken::Comma))
8738 return Error(getLoc(), "expected comma");
8739 // Eat the comma
8740 Lex();
8741
8742 SMLoc E = getLoc();
8743 MCRegister SecondReg;
8744 Res = tryParseScalarRegister(SecondReg);
8745 if (!Res.isSuccess())
8746 return Error(E, "expected second odd register of a consecutive same-size "
8747 "even/odd register pair");
8748
8749 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8750 (isXReg && !XRegClass.contains(SecondReg)) ||
8751 (isWReg && !WRegClass.contains(SecondReg)))
8752 return Error(E, "expected second odd register of a consecutive same-size "
8753 "even/odd register pair");
8754
8755 MCRegister Pair;
8756 if (isXReg) {
8757 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8758 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8759 } else {
8760 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8761 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8762 }
8763
8764 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8765 getLoc(), getContext()));
8766
8767 return ParseStatus::Success;
8768}
8769
8770template <bool ParseShiftExtend, bool ParseSuffix>
8771ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8772 const SMLoc S = getLoc();
8773 // Check for a SVE vector register specifier first.
8774 MCRegister RegNum;
8775 StringRef Kind;
8776
8777 ParseStatus Res =
8778 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8779
8780 if (!Res.isSuccess())
8781 return Res;
8782
8783 if (ParseSuffix && Kind.empty())
8784 return ParseStatus::NoMatch;
8785
8786 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8787 if (!KindRes)
8788 return ParseStatus::NoMatch;
8789
8790 unsigned ElementWidth = KindRes->second;
8791
8792 // No shift/extend is the default.
8793 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8794 Operands.push_back(AArch64Operand::CreateVectorReg(
8795 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8796
8797 ParseStatus Res = tryParseVectorIndex(Operands);
8798 if (Res.isFailure())
8799 return ParseStatus::Failure;
8800 return ParseStatus::Success;
8801 }
8802
8803 // Eat the comma
8804 Lex();
8805
8806 // Match the shift
8808 Res = tryParseOptionalShiftExtend(ExtOpnd);
8809 if (!Res.isSuccess())
8810 return Res;
8811
8812 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8813 Operands.push_back(AArch64Operand::CreateVectorReg(
8814 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8815 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8816 Ext->hasShiftExtendAmount()));
8817
8818 return ParseStatus::Success;
8819}
8820
8821ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8822 MCAsmParser &Parser = getParser();
8823
8824 SMLoc SS = getLoc();
8825 const AsmToken &TokE = getTok();
8826 bool IsHash = TokE.is(AsmToken::Hash);
8827
8828 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8829 return ParseStatus::NoMatch;
8830
8831 int64_t Pattern;
8832 if (IsHash) {
8833 Lex(); // Eat hash
8834
8835 // Parse the immediate operand.
8836 const MCExpr *ImmVal;
8837 SS = getLoc();
8838 if (Parser.parseExpression(ImmVal))
8839 return ParseStatus::Failure;
8840
8841 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8842 if (!MCE)
8843 return TokError("invalid operand for instruction");
8844
8845 Pattern = MCE->getValue();
8846 } else {
8847 // Parse the pattern
8848 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8849 if (!Pat)
8850 return ParseStatus::NoMatch;
8851
8852 Lex();
8853 Pattern = Pat->Encoding;
8854 assert(Pattern >= 0 && Pattern < 32);
8855 }
8856
8857 Operands.push_back(
8858 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8859 SS, getLoc(), getContext()));
8860
8861 return ParseStatus::Success;
8862}
8863
8864ParseStatus
8865AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8866 int64_t Pattern;
8867 SMLoc SS = getLoc();
8868 const AsmToken &TokE = getTok();
8869 // Parse the pattern
8870 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8871 TokE.getString());
8872 if (!Pat)
8873 return ParseStatus::NoMatch;
8874
8875 Lex();
8876 Pattern = Pat->Encoding;
8877 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8878
8879 Operands.push_back(
8880 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8881 SS, getLoc(), getContext()));
8882
8883 return ParseStatus::Success;
8884}
8885
8886ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8887 SMLoc SS = getLoc();
8888
8889 MCRegister XReg;
8890 if (!tryParseScalarRegister(XReg).isSuccess())
8891 return ParseStatus::NoMatch;
8892
8893 MCContext &ctx = getContext();
8894 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8895 MCRegister X8Reg = RI->getMatchingSuperReg(
8896 XReg, AArch64::x8sub_0,
8897 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8898 if (!X8Reg)
8899 return Error(SS,
8900 "expected an even-numbered x-register in the range [x0,x22]");
8901
8902 Operands.push_back(
8903 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8904 return ParseStatus::Success;
8905}
8906
8907ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8908 SMLoc S = getLoc();
8909
8910 if (getTok().isNot(AsmToken::Integer))
8911 return ParseStatus::NoMatch;
8912
8913 if (getLexer().peekTok().isNot(AsmToken::Colon))
8914 return ParseStatus::NoMatch;
8915
8916 const MCExpr *ImmF;
8917 if (getParser().parseExpression(ImmF))
8918 return ParseStatus::NoMatch;
8919
8920 if (getTok().isNot(AsmToken::Colon))
8921 return ParseStatus::NoMatch;
8922
8923 Lex(); // Eat ':'
8924 if (getTok().isNot(AsmToken::Integer))
8925 return ParseStatus::NoMatch;
8926
8927 SMLoc E = getTok().getLoc();
8928 const MCExpr *ImmL;
8929 if (getParser().parseExpression(ImmL))
8930 return ParseStatus::NoMatch;
8931
8932 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8933 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8934
8935 Operands.push_back(
8936 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8937 return ParseStatus::Success;
8938}
8939
8940template <int Adj>
8941ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8942 SMLoc S = getLoc();
8943
8944 parseOptionalToken(AsmToken::Hash);
8945 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8946
8947 if (getTok().isNot(AsmToken::Integer))
8948 return ParseStatus::NoMatch;
8949
8950 const MCExpr *Ex;
8951 if (getParser().parseExpression(Ex))
8952 return ParseStatus::NoMatch;
8953
8954 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8955 if (IsNegative)
8956 Imm = -Imm;
8957
8958 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8959 // return a value, which is certain to trigger a error message about invalid
8960 // immediate range instead of a non-descriptive invalid operand error.
8961 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8962 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8963 Imm = -2;
8964 else
8965 Imm += Adj;
8966
8967 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8968 Operands.push_back(AArch64Operand::CreateImm(
8970
8971 return ParseStatus::Success;
8972}
static bool isGPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI)
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static AArch64CC::CondCode parseCondCode(ArrayRef< MachineOperand > Cond)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1408
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1577
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:221
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:332
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
const FeatureBitset & ClearFeatureBitsTransitively(const FeatureBitset &FB)
const FeatureBitset & SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:196
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:229
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:490
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:140
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:629
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:609
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:844
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:169
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:816
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Specifier parsePercentSpecifierName(StringRef)
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name