LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 MCRegister getDstReg() const { return Dst; }
140 MCRegister getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 MCRegister Dst;
150 MCRegister Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
201
202 bool parseDirectiveVariantPCS(SMLoc L);
203
204 bool parseDirectiveSEHAllocStack(SMLoc L);
205 bool parseDirectiveSEHPrologEnd(SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
209 bool parseDirectiveSEHSaveReg(SMLoc L);
210 bool parseDirectiveSEHSaveRegX(SMLoc L);
211 bool parseDirectiveSEHSaveRegP(SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(SMLoc L);
214 bool parseDirectiveSEHSaveFReg(SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
218 bool parseDirectiveSEHSetFP(SMLoc L);
219 bool parseDirectiveSEHAddFP(SMLoc L);
220 bool parseDirectiveSEHNop(SMLoc L);
221 bool parseDirectiveSEHSaveNext(SMLoc L);
222 bool parseDirectiveSEHEpilogStart(SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(SMLoc L);
224 bool parseDirectiveSEHTrapFrame(SMLoc L);
225 bool parseDirectiveSEHMachineFrame(SMLoc L);
226 bool parseDirectiveSEHContext(SMLoc L);
227 bool parseDirectiveSEHECContext(SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
229 bool parseDirectiveSEHPACSignLR(SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
231
232 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
234 unsigned getNumRegsForRegKind(RegKind K);
235 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
238 bool MatchingInlineAsm) override;
239 /// @name Auto-generated Match Functions
240 /// {
241
242#define GET_ASSEMBLER_HEADER
243#include "AArch64GenAsmMatcher.inc"
244
245 /// }
246
247 ParseStatus tryParseScalarRegister(MCRegister &Reg);
248 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
249 RegKind MatchKind);
250 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
251 ParseStatus tryParseSVCR(OperandVector &Operands);
252 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
253 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
254 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
255 ParseStatus tryParseSysReg(OperandVector &Operands);
256 ParseStatus tryParseSysCROperand(OperandVector &Operands);
257 template <bool IsSVEPrefetch = false>
258 ParseStatus tryParsePrefetch(OperandVector &Operands);
259 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
260 ParseStatus tryParsePSBHint(OperandVector &Operands);
261 ParseStatus tryParseBTIHint(OperandVector &Operands);
262 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
263 ParseStatus tryParseAdrLabel(OperandVector &Operands);
264 template <bool AddFPZeroAsLiteral>
265 ParseStatus tryParseFPImm(OperandVector &Operands);
266 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
267 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
268 bool tryParseNeonVectorRegister(OperandVector &Operands);
269 ParseStatus tryParseVectorIndex(OperandVector &Operands);
270 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
271 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
272 template <bool ParseShiftExtend,
273 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
274 ParseStatus tryParseGPROperand(OperandVector &Operands);
275 ParseStatus tryParseZTOperand(OperandVector &Operands);
276 template <bool ParseShiftExtend, bool ParseSuffix>
277 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
278 template <RegKind RK>
279 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
281 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
282 template <RegKind VectorKind>
283 ParseStatus tryParseVectorList(OperandVector &Operands,
284 bool ExpectMatch = false);
285 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
286 ParseStatus tryParseSVEPattern(OperandVector &Operands);
287 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
288 ParseStatus tryParseGPR64x8(OperandVector &Operands);
289 ParseStatus tryParseImmRange(OperandVector &Operands);
290 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
291 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
292
293public:
294 enum AArch64MatchResultTy {
295 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
296#define GET_OPERAND_DIAGNOSTIC_TYPES
297#include "AArch64GenAsmMatcher.inc"
298 };
299 bool IsILP32;
300 bool IsWindowsArm64EC;
301
302 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
303 const MCInstrInfo &MII, const MCTargetOptions &Options)
304 : MCTargetAsmParser(Options, STI, MII) {
306 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
309 if (S.getTargetStreamer() == nullptr)
311
312 // Alias .hword/.word/.[dx]word to the target-independent
313 // .2byte/.4byte/.8byte directives as they have the same form and
314 // semantics:
315 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
316 Parser.addAliasForDirective(".hword", ".2byte");
317 Parser.addAliasForDirective(".word", ".4byte");
318 Parser.addAliasForDirective(".dword", ".8byte");
319 Parser.addAliasForDirective(".xword", ".8byte");
320
321 // Initialize the set of available features.
322 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
323 }
324
325 bool areEqualRegs(const MCParsedAsmOperand &Op1,
326 const MCParsedAsmOperand &Op2) const override;
328 SMLoc NameLoc, OperandVector &Operands) override;
329 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
331 SMLoc &EndLoc) override;
332 bool ParseDirective(AsmToken DirectiveID) override;
334 unsigned Kind) override;
335
336 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
337
338 static bool classifySymbolRef(const MCExpr *Expr,
339 AArch64MCExpr::VariantKind &ELFRefKind,
340 MCSymbolRefExpr::VariantKind &DarwinRefKind,
341 int64_t &Addend);
342};
343
344/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
345/// instruction.
346class AArch64Operand : public MCParsedAsmOperand {
347private:
348 enum KindTy {
349 k_Immediate,
350 k_ShiftedImm,
351 k_ImmRange,
352 k_CondCode,
353 k_Register,
354 k_MatrixRegister,
355 k_MatrixTileList,
356 k_SVCR,
357 k_VectorList,
358 k_VectorIndex,
359 k_Token,
360 k_SysReg,
361 k_SysCR,
362 k_Prefetch,
363 k_ShiftExtend,
364 k_FPImm,
365 k_Barrier,
366 k_PSBHint,
367 k_PHint,
368 k_BTIHint,
369 } Kind;
370
371 SMLoc StartLoc, EndLoc;
372
373 struct TokOp {
374 const char *Data;
375 unsigned Length;
376 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
377 };
378
379 // Separate shift/extend operand.
380 struct ShiftExtendOp {
382 unsigned Amount;
383 bool HasExplicitAmount;
384 };
385
386 struct RegOp {
387 unsigned RegNum;
388 RegKind Kind;
389 int ElementWidth;
390
391 // The register may be allowed as a different register class,
392 // e.g. for GPR64as32 or GPR32as64.
393 RegConstraintEqualityTy EqualityTy;
394
395 // In some cases the shift/extend needs to be explicitly parsed together
396 // with the register, rather than as a separate operand. This is needed
397 // for addressing modes where the instruction as a whole dictates the
398 // scaling/extend, rather than specific bits in the instruction.
399 // By parsing them as a single operand, we avoid the need to pass an
400 // extra operand in all CodeGen patterns (because all operands need to
401 // have an associated value), and we avoid the need to update TableGen to
402 // accept operands that have no associated bits in the instruction.
403 //
404 // An added benefit of parsing them together is that the assembler
405 // can give a sensible diagnostic if the scaling is not correct.
406 //
407 // The default is 'lsl #0' (HasExplicitAmount = false) if no
408 // ShiftExtend is specified.
409 ShiftExtendOp ShiftExtend;
410 };
411
412 struct MatrixRegOp {
413 unsigned RegNum;
414 unsigned ElementWidth;
415 MatrixKind Kind;
416 };
417
418 struct MatrixTileListOp {
419 unsigned RegMask = 0;
420 };
421
422 struct VectorListOp {
423 unsigned RegNum;
424 unsigned Count;
425 unsigned Stride;
426 unsigned NumElements;
427 unsigned ElementWidth;
428 RegKind RegisterKind;
429 };
430
431 struct VectorIndexOp {
432 int Val;
433 };
434
435 struct ImmOp {
436 const MCExpr *Val;
437 };
438
439 struct ShiftedImmOp {
440 const MCExpr *Val;
441 unsigned ShiftAmount;
442 };
443
444 struct ImmRangeOp {
445 unsigned First;
446 unsigned Last;
447 };
448
449 struct CondCodeOp {
451 };
452
453 struct FPImmOp {
454 uint64_t Val; // APFloat value bitcasted to uint64_t.
455 bool IsExact; // describes whether parsed value was exact.
456 };
457
458 struct BarrierOp {
459 const char *Data;
460 unsigned Length;
461 unsigned Val; // Not the enum since not all values have names.
462 bool HasnXSModifier;
463 };
464
465 struct SysRegOp {
466 const char *Data;
467 unsigned Length;
468 uint32_t MRSReg;
469 uint32_t MSRReg;
470 uint32_t PStateField;
471 };
472
473 struct SysCRImmOp {
474 unsigned Val;
475 };
476
477 struct PrefetchOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct PSBHintOp {
484 const char *Data;
485 unsigned Length;
486 unsigned Val;
487 };
488 struct PHintOp {
489 const char *Data;
490 unsigned Length;
491 unsigned Val;
492 };
493 struct BTIHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498
499 struct SVCROp {
500 const char *Data;
501 unsigned Length;
502 unsigned PStateField;
503 };
504
505 union {
506 struct TokOp Tok;
507 struct RegOp Reg;
508 struct MatrixRegOp MatrixReg;
509 struct MatrixTileListOp MatrixTileList;
510 struct VectorListOp VectorList;
511 struct VectorIndexOp VectorIndex;
512 struct ImmOp Imm;
513 struct ShiftedImmOp ShiftedImm;
514 struct ImmRangeOp ImmRange;
515 struct CondCodeOp CondCode;
516 struct FPImmOp FPImm;
517 struct BarrierOp Barrier;
518 struct SysRegOp SysReg;
519 struct SysCRImmOp SysCRImm;
520 struct PrefetchOp Prefetch;
521 struct PSBHintOp PSBHint;
522 struct PHintOp PHint;
523 struct BTIHintOp BTIHint;
524 struct ShiftExtendOp ShiftExtend;
525 struct SVCROp SVCR;
526 };
527
528 // Keep the MCContext around as the MCExprs may need manipulated during
529 // the add<>Operands() calls.
530 MCContext &Ctx;
531
532public:
533 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
534
535 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
536 Kind = o.Kind;
537 StartLoc = o.StartLoc;
538 EndLoc = o.EndLoc;
539 switch (Kind) {
540 case k_Token:
541 Tok = o.Tok;
542 break;
543 case k_Immediate:
544 Imm = o.Imm;
545 break;
546 case k_ShiftedImm:
547 ShiftedImm = o.ShiftedImm;
548 break;
549 case k_ImmRange:
550 ImmRange = o.ImmRange;
551 break;
552 case k_CondCode:
553 CondCode = o.CondCode;
554 break;
555 case k_FPImm:
556 FPImm = o.FPImm;
557 break;
558 case k_Barrier:
559 Barrier = o.Barrier;
560 break;
561 case k_Register:
562 Reg = o.Reg;
563 break;
564 case k_MatrixRegister:
565 MatrixReg = o.MatrixReg;
566 break;
567 case k_MatrixTileList:
568 MatrixTileList = o.MatrixTileList;
569 break;
570 case k_VectorList:
571 VectorList = o.VectorList;
572 break;
573 case k_VectorIndex:
574 VectorIndex = o.VectorIndex;
575 break;
576 case k_SysReg:
577 SysReg = o.SysReg;
578 break;
579 case k_SysCR:
580 SysCRImm = o.SysCRImm;
581 break;
582 case k_Prefetch:
583 Prefetch = o.Prefetch;
584 break;
585 case k_PSBHint:
586 PSBHint = o.PSBHint;
587 break;
588 case k_PHint:
589 PHint = o.PHint;
590 break;
591 case k_BTIHint:
592 BTIHint = o.BTIHint;
593 break;
594 case k_ShiftExtend:
595 ShiftExtend = o.ShiftExtend;
596 break;
597 case k_SVCR:
598 SVCR = o.SVCR;
599 break;
600 }
601 }
602
603 /// getStartLoc - Get the location of the first token of this operand.
604 SMLoc getStartLoc() const override { return StartLoc; }
605 /// getEndLoc - Get the location of the last token of this operand.
606 SMLoc getEndLoc() const override { return EndLoc; }
607
608 StringRef getToken() const {
609 assert(Kind == k_Token && "Invalid access!");
610 return StringRef(Tok.Data, Tok.Length);
611 }
612
613 bool isTokenSuffix() const {
614 assert(Kind == k_Token && "Invalid access!");
615 return Tok.IsSuffix;
616 }
617
618 const MCExpr *getImm() const {
619 assert(Kind == k_Immediate && "Invalid access!");
620 return Imm.Val;
621 }
622
623 const MCExpr *getShiftedImmVal() const {
624 assert(Kind == k_ShiftedImm && "Invalid access!");
625 return ShiftedImm.Val;
626 }
627
628 unsigned getShiftedImmShift() const {
629 assert(Kind == k_ShiftedImm && "Invalid access!");
630 return ShiftedImm.ShiftAmount;
631 }
632
633 unsigned getFirstImmVal() const {
634 assert(Kind == k_ImmRange && "Invalid access!");
635 return ImmRange.First;
636 }
637
638 unsigned getLastImmVal() const {
639 assert(Kind == k_ImmRange && "Invalid access!");
640 return ImmRange.Last;
641 }
642
644 assert(Kind == k_CondCode && "Invalid access!");
645 return CondCode.Code;
646 }
647
648 APFloat getFPImm() const {
649 assert (Kind == k_FPImm && "Invalid access!");
650 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
651 }
652
653 bool getFPImmIsExact() const {
654 assert (Kind == k_FPImm && "Invalid access!");
655 return FPImm.IsExact;
656 }
657
658 unsigned getBarrier() const {
659 assert(Kind == k_Barrier && "Invalid access!");
660 return Barrier.Val;
661 }
662
663 StringRef getBarrierName() const {
664 assert(Kind == k_Barrier && "Invalid access!");
665 return StringRef(Barrier.Data, Barrier.Length);
666 }
667
668 bool getBarriernXSModifier() const {
669 assert(Kind == k_Barrier && "Invalid access!");
670 return Barrier.HasnXSModifier;
671 }
672
673 MCRegister getReg() const override {
674 assert(Kind == k_Register && "Invalid access!");
675 return Reg.RegNum;
676 }
677
678 unsigned getMatrixReg() const {
679 assert(Kind == k_MatrixRegister && "Invalid access!");
680 return MatrixReg.RegNum;
681 }
682
683 unsigned getMatrixElementWidth() const {
684 assert(Kind == k_MatrixRegister && "Invalid access!");
685 return MatrixReg.ElementWidth;
686 }
687
688 MatrixKind getMatrixKind() const {
689 assert(Kind == k_MatrixRegister && "Invalid access!");
690 return MatrixReg.Kind;
691 }
692
693 unsigned getMatrixTileListRegMask() const {
694 assert(isMatrixTileList() && "Invalid access!");
695 return MatrixTileList.RegMask;
696 }
697
698 RegConstraintEqualityTy getRegEqualityTy() const {
699 assert(Kind == k_Register && "Invalid access!");
700 return Reg.EqualityTy;
701 }
702
703 unsigned getVectorListStart() const {
704 assert(Kind == k_VectorList && "Invalid access!");
705 return VectorList.RegNum;
706 }
707
708 unsigned getVectorListCount() const {
709 assert(Kind == k_VectorList && "Invalid access!");
710 return VectorList.Count;
711 }
712
713 unsigned getVectorListStride() const {
714 assert(Kind == k_VectorList && "Invalid access!");
715 return VectorList.Stride;
716 }
717
718 int getVectorIndex() const {
719 assert(Kind == k_VectorIndex && "Invalid access!");
720 return VectorIndex.Val;
721 }
722
723 StringRef getSysReg() const {
724 assert(Kind == k_SysReg && "Invalid access!");
725 return StringRef(SysReg.Data, SysReg.Length);
726 }
727
728 unsigned getSysCR() const {
729 assert(Kind == k_SysCR && "Invalid access!");
730 return SysCRImm.Val;
731 }
732
733 unsigned getPrefetch() const {
734 assert(Kind == k_Prefetch && "Invalid access!");
735 return Prefetch.Val;
736 }
737
738 unsigned getPSBHint() const {
739 assert(Kind == k_PSBHint && "Invalid access!");
740 return PSBHint.Val;
741 }
742
743 unsigned getPHint() const {
744 assert(Kind == k_PHint && "Invalid access!");
745 return PHint.Val;
746 }
747
748 StringRef getPSBHintName() const {
749 assert(Kind == k_PSBHint && "Invalid access!");
750 return StringRef(PSBHint.Data, PSBHint.Length);
751 }
752
753 StringRef getPHintName() const {
754 assert(Kind == k_PHint && "Invalid access!");
755 return StringRef(PHint.Data, PHint.Length);
756 }
757
758 unsigned getBTIHint() const {
759 assert(Kind == k_BTIHint && "Invalid access!");
760 return BTIHint.Val;
761 }
762
763 StringRef getBTIHintName() const {
764 assert(Kind == k_BTIHint && "Invalid access!");
765 return StringRef(BTIHint.Data, BTIHint.Length);
766 }
767
768 StringRef getSVCR() const {
769 assert(Kind == k_SVCR && "Invalid access!");
770 return StringRef(SVCR.Data, SVCR.Length);
771 }
772
773 StringRef getPrefetchName() const {
774 assert(Kind == k_Prefetch && "Invalid access!");
775 return StringRef(Prefetch.Data, Prefetch.Length);
776 }
777
778 AArch64_AM::ShiftExtendType getShiftExtendType() const {
779 if (Kind == k_ShiftExtend)
780 return ShiftExtend.Type;
781 if (Kind == k_Register)
782 return Reg.ShiftExtend.Type;
783 llvm_unreachable("Invalid access!");
784 }
785
786 unsigned getShiftExtendAmount() const {
787 if (Kind == k_ShiftExtend)
788 return ShiftExtend.Amount;
789 if (Kind == k_Register)
790 return Reg.ShiftExtend.Amount;
791 llvm_unreachable("Invalid access!");
792 }
793
794 bool hasShiftExtendAmount() const {
795 if (Kind == k_ShiftExtend)
796 return ShiftExtend.HasExplicitAmount;
797 if (Kind == k_Register)
798 return Reg.ShiftExtend.HasExplicitAmount;
799 llvm_unreachable("Invalid access!");
800 }
801
802 bool isImm() const override { return Kind == k_Immediate; }
803 bool isMem() const override { return false; }
804
805 bool isUImm6() const {
806 if (!isImm())
807 return false;
808 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
809 if (!MCE)
810 return false;
811 int64_t Val = MCE->getValue();
812 return (Val >= 0 && Val < 64);
813 }
814
815 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
816
817 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
818 return isImmScaled<Bits, Scale>(true);
819 }
820
821 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
822 DiagnosticPredicate isUImmScaled() const {
823 if (IsRange && isImmRange() &&
824 (getLastImmVal() != getFirstImmVal() + Offset))
825 return DiagnosticPredicateTy::NoMatch;
826
827 return isImmScaled<Bits, Scale, IsRange>(false);
828 }
829
830 template <int Bits, int Scale, bool IsRange = false>
831 DiagnosticPredicate isImmScaled(bool Signed) const {
832 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
833 (isImmRange() && !IsRange))
834 return DiagnosticPredicateTy::NoMatch;
835
836 int64_t Val;
837 if (isImmRange())
838 Val = getFirstImmVal();
839 else {
840 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
841 if (!MCE)
842 return DiagnosticPredicateTy::NoMatch;
843 Val = MCE->getValue();
844 }
845
846 int64_t MinVal, MaxVal;
847 if (Signed) {
848 int64_t Shift = Bits - 1;
849 MinVal = (int64_t(1) << Shift) * -Scale;
850 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
851 } else {
852 MinVal = 0;
853 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
854 }
855
856 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
857 return DiagnosticPredicateTy::Match;
858
859 return DiagnosticPredicateTy::NearMatch;
860 }
861
862 DiagnosticPredicate isSVEPattern() const {
863 if (!isImm())
864 return DiagnosticPredicateTy::NoMatch;
865 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
866 if (!MCE)
867 return DiagnosticPredicateTy::NoMatch;
868 int64_t Val = MCE->getValue();
869 if (Val >= 0 && Val < 32)
870 return DiagnosticPredicateTy::Match;
871 return DiagnosticPredicateTy::NearMatch;
872 }
873
874 DiagnosticPredicate isSVEVecLenSpecifier() const {
875 if (!isImm())
876 return DiagnosticPredicateTy::NoMatch;
877 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
878 if (!MCE)
879 return DiagnosticPredicateTy::NoMatch;
880 int64_t Val = MCE->getValue();
881 if (Val >= 0 && Val <= 1)
882 return DiagnosticPredicateTy::Match;
883 return DiagnosticPredicateTy::NearMatch;
884 }
885
886 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
888 MCSymbolRefExpr::VariantKind DarwinRefKind;
889 int64_t Addend;
890 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
891 Addend)) {
892 // If we don't understand the expression, assume the best and
893 // let the fixup and relocation code deal with it.
894 return true;
895 }
896
897 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
898 ELFRefKind == AArch64MCExpr::VK_LO12 ||
899 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
900 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
901 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
902 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
903 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
904 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
906 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
908 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
909 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
910 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
911 // Note that we don't range-check the addend. It's adjusted modulo page
912 // size when converted, so there is no "out of range" condition when using
913 // @pageoff.
914 return true;
915 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
916 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
917 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
918 return Addend == 0;
919 }
920
921 return false;
922 }
923
924 template <int Scale> bool isUImm12Offset() const {
925 if (!isImm())
926 return false;
927
928 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
929 if (!MCE)
930 return isSymbolicUImm12Offset(getImm());
931
932 int64_t Val = MCE->getValue();
933 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
934 }
935
936 template <int N, int M>
937 bool isImmInRange() const {
938 if (!isImm())
939 return false;
940 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941 if (!MCE)
942 return false;
943 int64_t Val = MCE->getValue();
944 return (Val >= N && Val <= M);
945 }
946
947 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
948 // a logical immediate can always be represented when inverted.
949 template <typename T>
950 bool isLogicalImm() const {
951 if (!isImm())
952 return false;
953 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
954 if (!MCE)
955 return false;
956
957 int64_t Val = MCE->getValue();
958 // Avoid left shift by 64 directly.
959 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
960 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
961 if ((Val & Upper) && (Val & Upper) != Upper)
962 return false;
963
964 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
965 }
966
967 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
968
969 bool isImmRange() const { return Kind == k_ImmRange; }
970
971 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
972 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
973 /// immediate that can be shifted by 'Shift'.
974 template <unsigned Width>
975 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
976 if (isShiftedImm() && Width == getShiftedImmShift())
977 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
978 return std::make_pair(CE->getValue(), Width);
979
980 if (isImm())
981 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
982 int64_t Val = CE->getValue();
983 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
984 return std::make_pair(Val >> Width, Width);
985 else
986 return std::make_pair(Val, 0u);
987 }
988
989 return {};
990 }
991
992 bool isAddSubImm() const {
993 if (!isShiftedImm() && !isImm())
994 return false;
995
996 const MCExpr *Expr;
997
998 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
999 if (isShiftedImm()) {
1000 unsigned Shift = ShiftedImm.ShiftAmount;
1001 Expr = ShiftedImm.Val;
1002 if (Shift != 0 && Shift != 12)
1003 return false;
1004 } else {
1005 Expr = getImm();
1006 }
1007
1008 AArch64MCExpr::VariantKind ELFRefKind;
1009 MCSymbolRefExpr::VariantKind DarwinRefKind;
1010 int64_t Addend;
1011 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1012 DarwinRefKind, Addend)) {
1013 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1014 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
1015 (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
1016 ELFRefKind == AArch64MCExpr::VK_LO12 ||
1017 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
1018 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
1019 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
1020 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
1021 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
1022 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
1023 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
1024 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
1026 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
1027 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1028 }
1029
1030 // If it's a constant, it should be a real immediate in range.
1031 if (auto ShiftedVal = getShiftedVal<12>())
1032 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1033
1034 // If it's an expression, we hope for the best and let the fixup/relocation
1035 // code deal with it.
1036 return true;
1037 }
1038
1039 bool isAddSubImmNeg() const {
1040 if (!isShiftedImm() && !isImm())
1041 return false;
1042
1043 // Otherwise it should be a real negative immediate in range.
1044 if (auto ShiftedVal = getShiftedVal<12>())
1045 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1046
1047 return false;
1048 }
1049
1050 // Signed value in the range -128 to +127. For element widths of
1051 // 16 bits or higher it may also be a signed multiple of 256 in the
1052 // range -32768 to +32512.
1053 // For element-width of 8 bits a range of -128 to 255 is accepted,
1054 // since a copy of a byte can be either signed/unsigned.
1055 template <typename T>
1056 DiagnosticPredicate isSVECpyImm() const {
1057 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1058 return DiagnosticPredicateTy::NoMatch;
1059
1060 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1061 std::is_same<int8_t, T>::value;
1062 if (auto ShiftedImm = getShiftedVal<8>())
1063 if (!(IsByte && ShiftedImm->second) &&
1064 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1065 << ShiftedImm->second))
1066 return DiagnosticPredicateTy::Match;
1067
1068 return DiagnosticPredicateTy::NearMatch;
1069 }
1070
1071 // Unsigned value in the range 0 to 255. For element widths of
1072 // 16 bits or higher it may also be a signed multiple of 256 in the
1073 // range 0 to 65280.
1074 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1075 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1076 return DiagnosticPredicateTy::NoMatch;
1077
1078 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1079 std::is_same<int8_t, T>::value;
1080 if (auto ShiftedImm = getShiftedVal<8>())
1081 if (!(IsByte && ShiftedImm->second) &&
1082 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1083 << ShiftedImm->second))
1084 return DiagnosticPredicateTy::Match;
1085
1086 return DiagnosticPredicateTy::NearMatch;
1087 }
1088
1089 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1090 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1091 return DiagnosticPredicateTy::Match;
1092 return DiagnosticPredicateTy::NoMatch;
1093 }
1094
1095 bool isCondCode() const { return Kind == k_CondCode; }
1096
1097 bool isSIMDImmType10() const {
1098 if (!isImm())
1099 return false;
1100 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1101 if (!MCE)
1102 return false;
1104 }
1105
1106 template<int N>
1107 bool isBranchTarget() const {
1108 if (!isImm())
1109 return false;
1110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1111 if (!MCE)
1112 return true;
1113 int64_t Val = MCE->getValue();
1114 if (Val & 0x3)
1115 return false;
1116 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1117 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1118 }
1119
1120 bool
1121 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1122 if (!isImm())
1123 return false;
1124
1125 AArch64MCExpr::VariantKind ELFRefKind;
1126 MCSymbolRefExpr::VariantKind DarwinRefKind;
1127 int64_t Addend;
1128 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1129 DarwinRefKind, Addend)) {
1130 return false;
1131 }
1132 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1133 return false;
1134
1135 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1136 }
1137
1138 bool isMovWSymbolG3() const {
1140 }
1141
1142 bool isMovWSymbolG2() const {
1143 return isMovWSymbol(
1148 }
1149
1150 bool isMovWSymbolG1() const {
1151 return isMovWSymbol(
1157 }
1158
1159 bool isMovWSymbolG0() const {
1160 return isMovWSymbol(
1166 }
1167
1168 template<int RegWidth, int Shift>
1169 bool isMOVZMovAlias() const {
1170 if (!isImm()) return false;
1171
1172 const MCExpr *E = getImm();
1173 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1174 uint64_t Value = CE->getValue();
1175
1176 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1177 }
1178 // Only supports the case of Shift being 0 if an expression is used as an
1179 // operand
1180 return !Shift && E;
1181 }
1182
1183 template<int RegWidth, int Shift>
1184 bool isMOVNMovAlias() const {
1185 if (!isImm()) return false;
1186
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188 if (!CE) return false;
1189 uint64_t Value = CE->getValue();
1190
1191 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1192 }
1193
1194 bool isFPImm() const {
1195 return Kind == k_FPImm &&
1196 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1197 }
1198
1199 bool isBarrier() const {
1200 return Kind == k_Barrier && !getBarriernXSModifier();
1201 }
1202 bool isBarriernXS() const {
1203 return Kind == k_Barrier && getBarriernXSModifier();
1204 }
1205 bool isSysReg() const { return Kind == k_SysReg; }
1206
1207 bool isMRSSystemRegister() const {
1208 if (!isSysReg()) return false;
1209
1210 return SysReg.MRSReg != -1U;
1211 }
1212
1213 bool isMSRSystemRegister() const {
1214 if (!isSysReg()) return false;
1215 return SysReg.MSRReg != -1U;
1216 }
1217
1218 bool isSystemPStateFieldWithImm0_1() const {
1219 if (!isSysReg()) return false;
1220 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1221 }
1222
1223 bool isSystemPStateFieldWithImm0_15() const {
1224 if (!isSysReg())
1225 return false;
1226 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1227 }
1228
1229 bool isSVCR() const {
1230 if (Kind != k_SVCR)
1231 return false;
1232 return SVCR.PStateField != -1U;
1233 }
1234
1235 bool isReg() const override {
1236 return Kind == k_Register;
1237 }
1238
1239 bool isVectorList() const { return Kind == k_VectorList; }
1240
1241 bool isScalarReg() const {
1242 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1243 }
1244
1245 bool isNeonVectorReg() const {
1246 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1247 }
1248
1249 bool isNeonVectorRegLo() const {
1250 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1251 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1252 Reg.RegNum) ||
1253 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1254 Reg.RegNum));
1255 }
1256
1257 bool isNeonVectorReg0to7() const {
1258 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1259 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1260 Reg.RegNum));
1261 }
1262
1263 bool isMatrix() const { return Kind == k_MatrixRegister; }
1264 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1265
1266 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1267 RegKind RK;
1268 switch (Class) {
1269 case AArch64::PPRRegClassID:
1270 case AArch64::PPR_3bRegClassID:
1271 case AArch64::PPR_p8to15RegClassID:
1272 case AArch64::PNRRegClassID:
1273 case AArch64::PNR_p8to15RegClassID:
1274 case AArch64::PPRorPNRRegClassID:
1275 RK = RegKind::SVEPredicateAsCounter;
1276 break;
1277 default:
1278 llvm_unreachable("Unsupport register class");
1279 }
1280
1281 return (Kind == k_Register && Reg.Kind == RK) &&
1282 AArch64MCRegisterClasses[Class].contains(getReg());
1283 }
1284
1285 template <unsigned Class> bool isSVEVectorReg() const {
1286 RegKind RK;
1287 switch (Class) {
1288 case AArch64::ZPRRegClassID:
1289 case AArch64::ZPR_3bRegClassID:
1290 case AArch64::ZPR_4bRegClassID:
1291 case AArch64::ZPRMul2_LoRegClassID:
1292 case AArch64::ZPRMul2_HiRegClassID:
1293 case AArch64::ZPR_KRegClassID:
1294 RK = RegKind::SVEDataVector;
1295 break;
1296 case AArch64::PPRRegClassID:
1297 case AArch64::PPR_3bRegClassID:
1298 case AArch64::PPR_p8to15RegClassID:
1299 case AArch64::PNRRegClassID:
1300 case AArch64::PNR_p8to15RegClassID:
1301 case AArch64::PPRorPNRRegClassID:
1302 RK = RegKind::SVEPredicateVector;
1303 break;
1304 default:
1305 llvm_unreachable("Unsupport register class");
1306 }
1307
1308 return (Kind == k_Register && Reg.Kind == RK) &&
1309 AArch64MCRegisterClasses[Class].contains(getReg());
1310 }
1311
1312 template <unsigned Class> bool isFPRasZPR() const {
1313 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1314 AArch64MCRegisterClasses[Class].contains(getReg());
1315 }
1316
1317 template <int ElementWidth, unsigned Class>
1318 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1319 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1320 return DiagnosticPredicateTy::NoMatch;
1321
1322 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1323 return DiagnosticPredicateTy::Match;
1324
1325 return DiagnosticPredicateTy::NearMatch;
1326 }
1327
1328 template <int ElementWidth, unsigned Class>
1329 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1330 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1331 Reg.Kind != RegKind::SVEPredicateVector))
1332 return DiagnosticPredicateTy::NoMatch;
1333
1334 if ((isSVEPredicateAsCounterReg<Class>() ||
1335 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1336 Reg.ElementWidth == ElementWidth)
1337 return DiagnosticPredicateTy::Match;
1338
1339 return DiagnosticPredicateTy::NearMatch;
1340 }
1341
1342 template <int ElementWidth, unsigned Class>
1343 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1344 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1345 return DiagnosticPredicateTy::NoMatch;
1346
1347 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1348 return DiagnosticPredicateTy::Match;
1349
1350 return DiagnosticPredicateTy::NearMatch;
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1356 return DiagnosticPredicateTy::NoMatch;
1357
1358 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1359 return DiagnosticPredicateTy::Match;
1360
1361 return DiagnosticPredicateTy::NearMatch;
1362 }
1363
1364 template <int ElementWidth, unsigned Class,
1365 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1366 bool ShiftWidthAlwaysSame>
1367 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1368 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1369 if (!VectorMatch.isMatch())
1370 return DiagnosticPredicateTy::NoMatch;
1371
1372 // Give a more specific diagnostic when the user has explicitly typed in
1373 // a shift-amount that does not match what is expected, but for which
1374 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1375 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1376 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1377 ShiftExtendTy == AArch64_AM::SXTW) &&
1378 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1379 return DiagnosticPredicateTy::NoMatch;
1380
1381 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1382 return DiagnosticPredicateTy::Match;
1383
1384 return DiagnosticPredicateTy::NearMatch;
1385 }
1386
1387 bool isGPR32as64() const {
1388 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1389 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1390 }
1391
1392 bool isGPR64as32() const {
1393 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1394 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1395 }
1396
1397 bool isGPR64x8() const {
1398 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1399 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1400 Reg.RegNum);
1401 }
1402
1403 bool isWSeqPair() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1406 Reg.RegNum);
1407 }
1408
1409 bool isXSeqPair() const {
1410 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1411 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1412 Reg.RegNum);
1413 }
1414
1415 bool isSyspXzrPair() const {
1416 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1417 }
1418
1419 template<int64_t Angle, int64_t Remainder>
1420 DiagnosticPredicate isComplexRotation() const {
1421 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1422
1423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1424 if (!CE) return DiagnosticPredicateTy::NoMatch;
1425 uint64_t Value = CE->getValue();
1426
1427 if (Value % Angle == Remainder && Value <= 270)
1428 return DiagnosticPredicateTy::Match;
1429 return DiagnosticPredicateTy::NearMatch;
1430 }
1431
1432 template <unsigned RegClassID> bool isGPR64() const {
1433 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1434 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1435 }
1436
1437 template <unsigned RegClassID, int ExtWidth>
1438 DiagnosticPredicate isGPR64WithShiftExtend() const {
1439 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1440 return DiagnosticPredicateTy::NoMatch;
1441
1442 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1443 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1444 return DiagnosticPredicateTy::Match;
1445 return DiagnosticPredicateTy::NearMatch;
1446 }
1447
1448 /// Is this a vector list with the type implicit (presumably attached to the
1449 /// instruction itself)?
1450 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1451 bool isImplicitlyTypedVectorList() const {
1452 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1453 VectorList.NumElements == 0 &&
1454 VectorList.RegisterKind == VectorKind &&
1455 (!IsConsecutive || (VectorList.Stride == 1));
1456 }
1457
1458 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1459 unsigned ElementWidth, unsigned Stride = 1>
1460 bool isTypedVectorList() const {
1461 if (Kind != k_VectorList)
1462 return false;
1463 if (VectorList.Count != NumRegs)
1464 return false;
1465 if (VectorList.RegisterKind != VectorKind)
1466 return false;
1467 if (VectorList.ElementWidth != ElementWidth)
1468 return false;
1469 if (VectorList.Stride != Stride)
1470 return false;
1471 return VectorList.NumElements == NumElements;
1472 }
1473
1474 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1475 unsigned ElementWidth, unsigned RegClass>
1476 DiagnosticPredicate isTypedVectorListMultiple() const {
1477 bool Res =
1478 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1479 if (!Res)
1480 return DiagnosticPredicateTy::NoMatch;
1481 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1482 return DiagnosticPredicateTy::NearMatch;
1483 return DiagnosticPredicateTy::Match;
1484 }
1485
1486 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1487 unsigned ElementWidth>
1488 DiagnosticPredicate isTypedVectorListStrided() const {
1489 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1490 ElementWidth, Stride>();
1491 if (!Res)
1492 return DiagnosticPredicateTy::NoMatch;
1493 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1494 ((VectorList.RegNum >= AArch64::Z16) &&
1495 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1496 return DiagnosticPredicateTy::Match;
1497 return DiagnosticPredicateTy::NoMatch;
1498 }
1499
1500 template <int Min, int Max>
1501 DiagnosticPredicate isVectorIndex() const {
1502 if (Kind != k_VectorIndex)
1503 return DiagnosticPredicateTy::NoMatch;
1504 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1505 return DiagnosticPredicateTy::Match;
1506 return DiagnosticPredicateTy::NearMatch;
1507 }
1508
1509 bool isToken() const override { return Kind == k_Token; }
1510
1511 bool isTokenEqual(StringRef Str) const {
1512 return Kind == k_Token && getToken() == Str;
1513 }
1514 bool isSysCR() const { return Kind == k_SysCR; }
1515 bool isPrefetch() const { return Kind == k_Prefetch; }
1516 bool isPSBHint() const { return Kind == k_PSBHint; }
1517 bool isPHint() const { return Kind == k_PHint; }
1518 bool isBTIHint() const { return Kind == k_BTIHint; }
1519 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1520 bool isShifter() const {
1521 if (!isShiftExtend())
1522 return false;
1523
1524 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1525 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1526 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1527 ST == AArch64_AM::MSL);
1528 }
1529
1530 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1531 if (Kind != k_FPImm)
1532 return DiagnosticPredicateTy::NoMatch;
1533
1534 if (getFPImmIsExact()) {
1535 // Lookup the immediate from table of supported immediates.
1536 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1537 assert(Desc && "Unknown enum value");
1538
1539 // Calculate its FP value.
1540 APFloat RealVal(APFloat::IEEEdouble());
1541 auto StatusOrErr =
1542 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1543 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1544 llvm_unreachable("FP immediate is not exact");
1545
1546 if (getFPImm().bitwiseIsEqual(RealVal))
1547 return DiagnosticPredicateTy::Match;
1548 }
1549
1550 return DiagnosticPredicateTy::NearMatch;
1551 }
1552
1553 template <unsigned ImmA, unsigned ImmB>
1554 DiagnosticPredicate isExactFPImm() const {
1555 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1556 if ((Res = isExactFPImm<ImmA>()))
1557 return DiagnosticPredicateTy::Match;
1558 if ((Res = isExactFPImm<ImmB>()))
1559 return DiagnosticPredicateTy::Match;
1560 return Res;
1561 }
1562
1563 bool isExtend() const {
1564 if (!isShiftExtend())
1565 return false;
1566
1567 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1568 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1569 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1570 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1571 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1572 ET == AArch64_AM::LSL) &&
1573 getShiftExtendAmount() <= 4;
1574 }
1575
1576 bool isExtend64() const {
1577 if (!isExtend())
1578 return false;
1579 // Make sure the extend expects a 32-bit source register.
1580 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1581 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1582 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1583 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1584 }
1585
1586 bool isExtendLSL64() const {
1587 if (!isExtend())
1588 return false;
1589 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1590 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1591 ET == AArch64_AM::LSL) &&
1592 getShiftExtendAmount() <= 4;
1593 }
1594
1595 bool isLSLImm3Shift() const {
1596 if (!isShiftExtend())
1597 return false;
1598 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1599 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1600 }
1601
1602 template<int Width> bool isMemXExtend() const {
1603 if (!isExtend())
1604 return false;
1605 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1606 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1607 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1608 getShiftExtendAmount() == 0);
1609 }
1610
1611 template<int Width> bool isMemWExtend() const {
1612 if (!isExtend())
1613 return false;
1614 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1615 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1616 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1617 getShiftExtendAmount() == 0);
1618 }
1619
1620 template <unsigned width>
1621 bool isArithmeticShifter() const {
1622 if (!isShifter())
1623 return false;
1624
1625 // An arithmetic shifter is LSL, LSR, or ASR.
1626 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1627 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1628 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1629 }
1630
1631 template <unsigned width>
1632 bool isLogicalShifter() const {
1633 if (!isShifter())
1634 return false;
1635
1636 // A logical shifter is LSL, LSR, ASR or ROR.
1637 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1638 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1639 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1640 getShiftExtendAmount() < width;
1641 }
1642
1643 bool isMovImm32Shifter() const {
1644 if (!isShifter())
1645 return false;
1646
1647 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1648 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1649 if (ST != AArch64_AM::LSL)
1650 return false;
1651 uint64_t Val = getShiftExtendAmount();
1652 return (Val == 0 || Val == 16);
1653 }
1654
1655 bool isMovImm64Shifter() const {
1656 if (!isShifter())
1657 return false;
1658
1659 // A MOVi shifter is LSL of 0 or 16.
1660 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1661 if (ST != AArch64_AM::LSL)
1662 return false;
1663 uint64_t Val = getShiftExtendAmount();
1664 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1665 }
1666
1667 bool isLogicalVecShifter() const {
1668 if (!isShifter())
1669 return false;
1670
1671 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1672 unsigned Shift = getShiftExtendAmount();
1673 return getShiftExtendType() == AArch64_AM::LSL &&
1674 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1675 }
1676
1677 bool isLogicalVecHalfWordShifter() const {
1678 if (!isLogicalVecShifter())
1679 return false;
1680
1681 // A logical vector shifter is a left shift by 0 or 8.
1682 unsigned Shift = getShiftExtendAmount();
1683 return getShiftExtendType() == AArch64_AM::LSL &&
1684 (Shift == 0 || Shift == 8);
1685 }
1686
1687 bool isMoveVecShifter() const {
1688 if (!isShiftExtend())
1689 return false;
1690
1691 // A logical vector shifter is a left shift by 8 or 16.
1692 unsigned Shift = getShiftExtendAmount();
1693 return getShiftExtendType() == AArch64_AM::MSL &&
1694 (Shift == 8 || Shift == 16);
1695 }
1696
1697 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1698 // to LDUR/STUR when the offset is not legal for the former but is for
1699 // the latter. As such, in addition to checking for being a legal unscaled
1700 // address, also check that it is not a legal scaled address. This avoids
1701 // ambiguity in the matcher.
1702 template<int Width>
1703 bool isSImm9OffsetFB() const {
1704 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1705 }
1706
1707 bool isAdrpLabel() const {
1708 // Validation was handled during parsing, so we just verify that
1709 // something didn't go haywire.
1710 if (!isImm())
1711 return false;
1712
1713 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1714 int64_t Val = CE->getValue();
1715 int64_t Min = - (4096 * (1LL << (21 - 1)));
1716 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1717 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1718 }
1719
1720 return true;
1721 }
1722
1723 bool isAdrLabel() const {
1724 // Validation was handled during parsing, so we just verify that
1725 // something didn't go haywire.
1726 if (!isImm())
1727 return false;
1728
1729 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1730 int64_t Val = CE->getValue();
1731 int64_t Min = - (1LL << (21 - 1));
1732 int64_t Max = ((1LL << (21 - 1)) - 1);
1733 return Val >= Min && Val <= Max;
1734 }
1735
1736 return true;
1737 }
1738
1739 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1740 DiagnosticPredicate isMatrixRegOperand() const {
1741 if (!isMatrix())
1742 return DiagnosticPredicateTy::NoMatch;
1743 if (getMatrixKind() != Kind ||
1744 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1745 EltSize != getMatrixElementWidth())
1746 return DiagnosticPredicateTy::NearMatch;
1747 return DiagnosticPredicateTy::Match;
1748 }
1749
1750 bool isPAuthPCRelLabel16Operand() const {
1751 // PAuth PCRel16 operands are similar to regular branch targets, but only
1752 // negative values are allowed for concrete immediates as signing instr
1753 // should be in a lower address.
1754 if (!isImm())
1755 return false;
1756 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1757 if (!MCE)
1758 return true;
1759 int64_t Val = MCE->getValue();
1760 if (Val & 0b11)
1761 return false;
1762 return (Val <= 0) && (Val > -(1 << 18));
1763 }
1764
1765 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1766 // Add as immediates when possible. Null MCExpr = 0.
1767 if (!Expr)
1769 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1770 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1771 else
1773 }
1774
1775 void addRegOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!");
1778 }
1779
1780 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1783 }
1784
1785 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1786 assert(N == 1 && "Invalid number of operands!");
1787 assert(
1788 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1789
1790 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1791 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1792 RI->getEncodingValue(getReg()));
1793
1795 }
1796
1797 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1798 assert(N == 1 && "Invalid number of operands!");
1799 assert(
1800 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1801
1802 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1803 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1804 RI->getEncodingValue(getReg()));
1805
1807 }
1808
1809 template <int Width>
1810 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1811 unsigned Base;
1812 switch (Width) {
1813 case 8: Base = AArch64::B0; break;
1814 case 16: Base = AArch64::H0; break;
1815 case 32: Base = AArch64::S0; break;
1816 case 64: Base = AArch64::D0; break;
1817 case 128: Base = AArch64::Q0; break;
1818 default:
1819 llvm_unreachable("Unsupported width");
1820 }
1821 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1822 }
1823
1824 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1825 assert(N == 1 && "Invalid number of operands!");
1826 unsigned Reg = getReg();
1827 // Normalise to PPR
1828 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1829 Reg = Reg - AArch64::PN0 + AArch64::P0;
1831 }
1832
1833 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1834 assert(N == 1 && "Invalid number of operands!");
1835 Inst.addOperand(
1836 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1837 }
1838
1839 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!");
1841 assert(
1842 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1843 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1844 }
1845
1846 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1847 assert(N == 1 && "Invalid number of operands!");
1848 assert(
1849 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1851 }
1852
1853 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!");
1856 }
1857
1858 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1859 assert(N == 1 && "Invalid number of operands!");
1861 }
1862
1863 enum VecListIndexType {
1864 VecListIdx_DReg = 0,
1865 VecListIdx_QReg = 1,
1866 VecListIdx_ZReg = 2,
1867 VecListIdx_PReg = 3,
1868 };
1869
1870 template <VecListIndexType RegTy, unsigned NumRegs,
1871 bool IsConsecutive = false>
1872 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1873 assert(N == 1 && "Invalid number of operands!");
1874 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1875 "Expected consecutive registers");
1876 static const unsigned FirstRegs[][5] = {
1877 /* DReg */ { AArch64::Q0,
1878 AArch64::D0, AArch64::D0_D1,
1879 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1880 /* QReg */ { AArch64::Q0,
1881 AArch64::Q0, AArch64::Q0_Q1,
1882 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1883 /* ZReg */ { AArch64::Z0,
1884 AArch64::Z0, AArch64::Z0_Z1,
1885 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1886 /* PReg */ { AArch64::P0,
1887 AArch64::P0, AArch64::P0_P1 }
1888 };
1889
1890 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1891 " NumRegs must be <= 4 for ZRegs");
1892
1893 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1894 " NumRegs must be <= 2 for PRegs");
1895
1896 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1897 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1898 FirstRegs[(unsigned)RegTy][0]));
1899 }
1900
1901 template <unsigned NumRegs>
1902 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1903 assert(N == 1 && "Invalid number of operands!");
1904 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1905
1906 switch (NumRegs) {
1907 case 2:
1908 if (getVectorListStart() < AArch64::Z16) {
1909 assert((getVectorListStart() < AArch64::Z8) &&
1910 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1912 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1913 } else {
1914 assert((getVectorListStart() < AArch64::Z24) &&
1915 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1917 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1918 }
1919 break;
1920 case 4:
1921 if (getVectorListStart() < AArch64::Z16) {
1922 assert((getVectorListStart() < AArch64::Z4) &&
1923 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1925 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1926 } else {
1927 assert((getVectorListStart() < AArch64::Z20) &&
1928 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1930 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1931 }
1932 break;
1933 default:
1934 llvm_unreachable("Unsupported number of registers for strided vec list");
1935 }
1936 }
1937
1938 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1939 assert(N == 1 && "Invalid number of operands!");
1940 unsigned RegMask = getMatrixTileListRegMask();
1941 assert(RegMask <= 0xFF && "Invalid mask!");
1942 Inst.addOperand(MCOperand::createImm(RegMask));
1943 }
1944
1945 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1946 assert(N == 1 && "Invalid number of operands!");
1947 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1948 }
1949
1950 template <unsigned ImmIs0, unsigned ImmIs1>
1951 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1952 assert(N == 1 && "Invalid number of operands!");
1953 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1954 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1955 }
1956
1957 void addImmOperands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!");
1959 // If this is a pageoff symrefexpr with an addend, adjust the addend
1960 // to be only the page-offset portion. Otherwise, just add the expr
1961 // as-is.
1962 addExpr(Inst, getImm());
1963 }
1964
1965 template <int Shift>
1966 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1967 assert(N == 2 && "Invalid number of operands!");
1968 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1969 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1970 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1971 } else if (isShiftedImm()) {
1972 addExpr(Inst, getShiftedImmVal());
1973 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1974 } else {
1975 addExpr(Inst, getImm());
1977 }
1978 }
1979
1980 template <int Shift>
1981 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1982 assert(N == 2 && "Invalid number of operands!");
1983 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1984 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1985 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1986 } else
1987 llvm_unreachable("Not a shifted negative immediate");
1988 }
1989
1990 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1991 assert(N == 1 && "Invalid number of operands!");
1993 }
1994
1995 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1996 assert(N == 1 && "Invalid number of operands!");
1997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1998 if (!MCE)
1999 addExpr(Inst, getImm());
2000 else
2001 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2002 }
2003
2004 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2005 addImmOperands(Inst, N);
2006 }
2007
2008 template<int Scale>
2009 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2010 assert(N == 1 && "Invalid number of operands!");
2011 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2012
2013 if (!MCE) {
2014 Inst.addOperand(MCOperand::createExpr(getImm()));
2015 return;
2016 }
2017 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2018 }
2019
2020 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2021 assert(N == 1 && "Invalid number of operands!");
2022 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2024 }
2025
2026 template <int Scale>
2027 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2028 assert(N == 1 && "Invalid number of operands!");
2029 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2030 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2031 }
2032
2033 template <int Scale>
2034 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2035 assert(N == 1 && "Invalid number of operands!");
2036 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2037 }
2038
2039 template <typename T>
2040 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2041 assert(N == 1 && "Invalid number of operands!");
2042 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2043 std::make_unsigned_t<T> Val = MCE->getValue();
2044 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2045 Inst.addOperand(MCOperand::createImm(encoding));
2046 }
2047
2048 template <typename T>
2049 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2052 std::make_unsigned_t<T> Val = ~MCE->getValue();
2053 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2054 Inst.addOperand(MCOperand::createImm(encoding));
2055 }
2056
2057 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2058 assert(N == 1 && "Invalid number of operands!");
2059 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2061 Inst.addOperand(MCOperand::createImm(encoding));
2062 }
2063
2064 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2065 // Branch operands don't encode the low bits, so shift them off
2066 // here. If it's a label, however, just put it on directly as there's
2067 // not enough information now to do anything.
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2070 if (!MCE) {
2071 addExpr(Inst, getImm());
2072 return;
2073 }
2074 assert(MCE && "Invalid constant immediate operand!");
2075 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2076 }
2077
2078 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2079 // PC-relative operands don't encode the low bits, so shift them off
2080 // here. If it's a label, however, just put it on directly as there's
2081 // not enough information now to do anything.
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2084 if (!MCE) {
2085 addExpr(Inst, getImm());
2086 return;
2087 }
2088 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2089 }
2090
2091 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2092 // Branch operands don't encode the low bits, so shift them off
2093 // here. If it's a label, however, just put it on directly as there's
2094 // not enough information now to do anything.
2095 assert(N == 1 && "Invalid number of operands!");
2096 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2097 if (!MCE) {
2098 addExpr(Inst, getImm());
2099 return;
2100 }
2101 assert(MCE && "Invalid constant immediate operand!");
2102 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2103 }
2104
2105 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2120 // Branch operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 assert(MCE && "Invalid constant immediate operand!");
2130 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2131 }
2132
2133 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2134 assert(N == 1 && "Invalid number of operands!");
2136 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2137 }
2138
2139 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(MCOperand::createImm(getBarrier()));
2142 }
2143
2144 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146 Inst.addOperand(MCOperand::createImm(getBarrier()));
2147 }
2148
2149 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2150 assert(N == 1 && "Invalid number of operands!");
2151
2152 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2153 }
2154
2155 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2156 assert(N == 1 && "Invalid number of operands!");
2157
2158 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2159 }
2160
2161 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2162 assert(N == 1 && "Invalid number of operands!");
2163
2164 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2165 }
2166
2167 void addSVCROperands(MCInst &Inst, unsigned N) const {
2168 assert(N == 1 && "Invalid number of operands!");
2169
2170 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2171 }
2172
2173 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2175
2176 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2177 }
2178
2179 void addSysCROperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(MCOperand::createImm(getSysCR()));
2182 }
2183
2184 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2187 }
2188
2189 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2192 }
2193
2194 void addPHintOperands(MCInst &Inst, unsigned N) const {
2195 assert(N == 1 && "Invalid number of operands!");
2196 Inst.addOperand(MCOperand::createImm(getPHint()));
2197 }
2198
2199 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2200 assert(N == 1 && "Invalid number of operands!");
2201 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2202 }
2203
2204 void addShifterOperands(MCInst &Inst, unsigned N) const {
2205 assert(N == 1 && "Invalid number of operands!");
2206 unsigned Imm =
2207 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2209 }
2210
2211 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2212 assert(N == 1 && "Invalid number of operands!");
2213 unsigned Imm = getShiftExtendAmount();
2215 }
2216
2217 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2218 assert(N == 1 && "Invalid number of operands!");
2219
2220 if (!isScalarReg())
2221 return;
2222
2223 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2224 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2226 if (Reg != AArch64::XZR)
2227 llvm_unreachable("wrong register");
2228
2229 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2230 }
2231
2232 void addExtendOperands(MCInst &Inst, unsigned N) const {
2233 assert(N == 1 && "Invalid number of operands!");
2234 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2235 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2236 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2238 }
2239
2240 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2243 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2244 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2246 }
2247
2248 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2249 assert(N == 2 && "Invalid number of operands!");
2250 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2251 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2252 Inst.addOperand(MCOperand::createImm(IsSigned));
2253 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2254 }
2255
2256 // For 8-bit load/store instructions with a register offset, both the
2257 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2258 // they're disambiguated by whether the shift was explicit or implicit rather
2259 // than its size.
2260 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2261 assert(N == 2 && "Invalid number of operands!");
2262 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2263 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2264 Inst.addOperand(MCOperand::createImm(IsSigned));
2265 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2266 }
2267
2268 template<int Shift>
2269 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2270 assert(N == 1 && "Invalid number of operands!");
2271
2272 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2273 if (CE) {
2274 uint64_t Value = CE->getValue();
2275 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2276 } else {
2277 addExpr(Inst, getImm());
2278 }
2279 }
2280
2281 template<int Shift>
2282 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284
2285 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2286 uint64_t Value = CE->getValue();
2287 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2288 }
2289
2290 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2293 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2294 }
2295
2296 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2297 assert(N == 1 && "Invalid number of operands!");
2298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2299 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2300 }
2301
2302 void print(raw_ostream &OS) const override;
2303
2304 static std::unique_ptr<AArch64Operand>
2305 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2306 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2307 Op->Tok.Data = Str.data();
2308 Op->Tok.Length = Str.size();
2309 Op->Tok.IsSuffix = IsSuffix;
2310 Op->StartLoc = S;
2311 Op->EndLoc = S;
2312 return Op;
2313 }
2314
2315 static std::unique_ptr<AArch64Operand>
2316 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2317 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2319 unsigned ShiftAmount = 0,
2320 unsigned HasExplicitAmount = false) {
2321 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2322 Op->Reg.RegNum = RegNum;
2323 Op->Reg.Kind = Kind;
2324 Op->Reg.ElementWidth = 0;
2325 Op->Reg.EqualityTy = EqTy;
2326 Op->Reg.ShiftExtend.Type = ExtTy;
2327 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2328 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2329 Op->StartLoc = S;
2330 Op->EndLoc = E;
2331 return Op;
2332 }
2333
2334 static std::unique_ptr<AArch64Operand>
2335 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2336 SMLoc S, SMLoc E, MCContext &Ctx,
2338 unsigned ShiftAmount = 0,
2339 unsigned HasExplicitAmount = false) {
2340 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2341 Kind == RegKind::SVEPredicateVector ||
2342 Kind == RegKind::SVEPredicateAsCounter) &&
2343 "Invalid vector kind");
2344 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2345 HasExplicitAmount);
2346 Op->Reg.ElementWidth = ElementWidth;
2347 return Op;
2348 }
2349
2350 static std::unique_ptr<AArch64Operand>
2351 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2352 unsigned NumElements, unsigned ElementWidth,
2353 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2354 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2355 Op->VectorList.RegNum = RegNum;
2356 Op->VectorList.Count = Count;
2357 Op->VectorList.Stride = Stride;
2358 Op->VectorList.NumElements = NumElements;
2359 Op->VectorList.ElementWidth = ElementWidth;
2360 Op->VectorList.RegisterKind = RegisterKind;
2361 Op->StartLoc = S;
2362 Op->EndLoc = E;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2368 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2369 Op->VectorIndex.Val = Idx;
2370 Op->StartLoc = S;
2371 Op->EndLoc = E;
2372 return Op;
2373 }
2374
2375 static std::unique_ptr<AArch64Operand>
2376 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2377 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2378 Op->MatrixTileList.RegMask = RegMask;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2385 const unsigned ElementWidth) {
2386 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2387 RegMap = {
2388 {{0, AArch64::ZAB0},
2389 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2390 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2391 {{8, AArch64::ZAB0},
2392 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2393 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2394 {{16, AArch64::ZAH0},
2395 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2396 {{16, AArch64::ZAH1},
2397 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2398 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2399 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2400 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2401 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2402 };
2403
2404 if (ElementWidth == 64)
2405 OutRegs.insert(Reg);
2406 else {
2407 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2408 assert(!Regs.empty() && "Invalid tile or element width!");
2409 for (auto OutReg : Regs)
2410 OutRegs.insert(OutReg);
2411 }
2412 }
2413
2414 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2415 SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2417 Op->Imm.Val = Val;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2424 unsigned ShiftAmount,
2425 SMLoc S, SMLoc E,
2426 MCContext &Ctx) {
2427 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2428 Op->ShiftedImm .Val = Val;
2429 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2430 Op->StartLoc = S;
2431 Op->EndLoc = E;
2432 return Op;
2433 }
2434
2435 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2436 unsigned Last, SMLoc S,
2437 SMLoc E,
2438 MCContext &Ctx) {
2439 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2440 Op->ImmRange.First = First;
2441 Op->ImmRange.Last = Last;
2442 Op->EndLoc = E;
2443 return Op;
2444 }
2445
2446 static std::unique_ptr<AArch64Operand>
2447 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2448 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2449 Op->CondCode.Code = Code;
2450 Op->StartLoc = S;
2451 Op->EndLoc = E;
2452 return Op;
2453 }
2454
2455 static std::unique_ptr<AArch64Operand>
2456 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2457 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2458 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2459 Op->FPImm.IsExact = IsExact;
2460 Op->StartLoc = S;
2461 Op->EndLoc = S;
2462 return Op;
2463 }
2464
2465 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2466 StringRef Str,
2467 SMLoc S,
2468 MCContext &Ctx,
2469 bool HasnXSModifier) {
2470 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2471 Op->Barrier.Val = Val;
2472 Op->Barrier.Data = Str.data();
2473 Op->Barrier.Length = Str.size();
2474 Op->Barrier.HasnXSModifier = HasnXSModifier;
2475 Op->StartLoc = S;
2476 Op->EndLoc = S;
2477 return Op;
2478 }
2479
2480 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2481 uint32_t MRSReg,
2482 uint32_t MSRReg,
2483 uint32_t PStateField,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2486 Op->SysReg.Data = Str.data();
2487 Op->SysReg.Length = Str.size();
2488 Op->SysReg.MRSReg = MRSReg;
2489 Op->SysReg.MSRReg = MSRReg;
2490 Op->SysReg.PStateField = PStateField;
2491 Op->StartLoc = S;
2492 Op->EndLoc = S;
2493 return Op;
2494 }
2495
2496 static std::unique_ptr<AArch64Operand>
2497 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2498 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2499 Op->PHint.Val = Val;
2500 Op->PHint.Data = Str.data();
2501 Op->PHint.Length = Str.size();
2502 Op->StartLoc = S;
2503 Op->EndLoc = S;
2504 return Op;
2505 }
2506
2507 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2508 SMLoc E, MCContext &Ctx) {
2509 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2510 Op->SysCRImm.Val = Val;
2511 Op->StartLoc = S;
2512 Op->EndLoc = E;
2513 return Op;
2514 }
2515
2516 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2517 StringRef Str,
2518 SMLoc S,
2519 MCContext &Ctx) {
2520 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2521 Op->Prefetch.Val = Val;
2522 Op->Barrier.Data = Str.data();
2523 Op->Barrier.Length = Str.size();
2524 Op->StartLoc = S;
2525 Op->EndLoc = S;
2526 return Op;
2527 }
2528
2529 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2530 StringRef Str,
2531 SMLoc S,
2532 MCContext &Ctx) {
2533 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2534 Op->PSBHint.Val = Val;
2535 Op->PSBHint.Data = Str.data();
2536 Op->PSBHint.Length = Str.size();
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2543 StringRef Str,
2544 SMLoc S,
2545 MCContext &Ctx) {
2546 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2547 Op->BTIHint.Val = Val | 32;
2548 Op->BTIHint.Data = Str.data();
2549 Op->BTIHint.Length = Str.size();
2550 Op->StartLoc = S;
2551 Op->EndLoc = S;
2552 return Op;
2553 }
2554
2555 static std::unique_ptr<AArch64Operand>
2556 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2557 SMLoc S, SMLoc E, MCContext &Ctx) {
2558 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2559 Op->MatrixReg.RegNum = RegNum;
2560 Op->MatrixReg.ElementWidth = ElementWidth;
2561 Op->MatrixReg.Kind = Kind;
2562 Op->StartLoc = S;
2563 Op->EndLoc = E;
2564 return Op;
2565 }
2566
2567 static std::unique_ptr<AArch64Operand>
2568 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2569 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2570 Op->SVCR.PStateField = PStateField;
2571 Op->SVCR.Data = Str.data();
2572 Op->SVCR.Length = Str.size();
2573 Op->StartLoc = S;
2574 Op->EndLoc = S;
2575 return Op;
2576 }
2577
2578 static std::unique_ptr<AArch64Operand>
2579 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2580 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2581 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2582 Op->ShiftExtend.Type = ShOp;
2583 Op->ShiftExtend.Amount = Val;
2584 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2585 Op->StartLoc = S;
2586 Op->EndLoc = E;
2587 return Op;
2588 }
2589};
2590
2591} // end anonymous namespace.
2592
2593void AArch64Operand::print(raw_ostream &OS) const {
2594 switch (Kind) {
2595 case k_FPImm:
2596 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2597 if (!getFPImmIsExact())
2598 OS << " (inexact)";
2599 OS << ">";
2600 break;
2601 case k_Barrier: {
2602 StringRef Name = getBarrierName();
2603 if (!Name.empty())
2604 OS << "<barrier " << Name << ">";
2605 else
2606 OS << "<barrier invalid #" << getBarrier() << ">";
2607 break;
2608 }
2609 case k_Immediate:
2610 OS << *getImm();
2611 break;
2612 case k_ShiftedImm: {
2613 unsigned Shift = getShiftedImmShift();
2614 OS << "<shiftedimm ";
2615 OS << *getShiftedImmVal();
2616 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2617 break;
2618 }
2619 case k_ImmRange: {
2620 OS << "<immrange ";
2621 OS << getFirstImmVal();
2622 OS << ":" << getLastImmVal() << ">";
2623 break;
2624 }
2625 case k_CondCode:
2626 OS << "<condcode " << getCondCode() << ">";
2627 break;
2628 case k_VectorList: {
2629 OS << "<vectorlist ";
2630 unsigned Reg = getVectorListStart();
2631 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2632 OS << Reg + i * getVectorListStride() << " ";
2633 OS << ">";
2634 break;
2635 }
2636 case k_VectorIndex:
2637 OS << "<vectorindex " << getVectorIndex() << ">";
2638 break;
2639 case k_SysReg:
2640 OS << "<sysreg: " << getSysReg() << '>';
2641 break;
2642 case k_Token:
2643 OS << "'" << getToken() << "'";
2644 break;
2645 case k_SysCR:
2646 OS << "c" << getSysCR();
2647 break;
2648 case k_Prefetch: {
2649 StringRef Name = getPrefetchName();
2650 if (!Name.empty())
2651 OS << "<prfop " << Name << ">";
2652 else
2653 OS << "<prfop invalid #" << getPrefetch() << ">";
2654 break;
2655 }
2656 case k_PSBHint:
2657 OS << getPSBHintName();
2658 break;
2659 case k_PHint:
2660 OS << getPHintName();
2661 break;
2662 case k_BTIHint:
2663 OS << getBTIHintName();
2664 break;
2665 case k_MatrixRegister:
2666 OS << "<matrix " << getMatrixReg() << ">";
2667 break;
2668 case k_MatrixTileList: {
2669 OS << "<matrixlist ";
2670 unsigned RegMask = getMatrixTileListRegMask();
2671 unsigned MaxBits = 8;
2672 for (unsigned I = MaxBits; I > 0; --I)
2673 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2674 OS << '>';
2675 break;
2676 }
2677 case k_SVCR: {
2678 OS << getSVCR();
2679 break;
2680 }
2681 case k_Register:
2682 OS << "<register " << getReg() << ">";
2683 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2684 break;
2685 [[fallthrough]];
2686 case k_ShiftExtend:
2687 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2688 << getShiftExtendAmount();
2689 if (!hasShiftExtendAmount())
2690 OS << "<imp>";
2691 OS << '>';
2692 break;
2693 }
2694}
2695
2696/// @name Auto-generated Match Functions
2697/// {
2698
2700
2701/// }
2702
2704 return StringSwitch<unsigned>(Name.lower())
2705 .Case("v0", AArch64::Q0)
2706 .Case("v1", AArch64::Q1)
2707 .Case("v2", AArch64::Q2)
2708 .Case("v3", AArch64::Q3)
2709 .Case("v4", AArch64::Q4)
2710 .Case("v5", AArch64::Q5)
2711 .Case("v6", AArch64::Q6)
2712 .Case("v7", AArch64::Q7)
2713 .Case("v8", AArch64::Q8)
2714 .Case("v9", AArch64::Q9)
2715 .Case("v10", AArch64::Q10)
2716 .Case("v11", AArch64::Q11)
2717 .Case("v12", AArch64::Q12)
2718 .Case("v13", AArch64::Q13)
2719 .Case("v14", AArch64::Q14)
2720 .Case("v15", AArch64::Q15)
2721 .Case("v16", AArch64::Q16)
2722 .Case("v17", AArch64::Q17)
2723 .Case("v18", AArch64::Q18)
2724 .Case("v19", AArch64::Q19)
2725 .Case("v20", AArch64::Q20)
2726 .Case("v21", AArch64::Q21)
2727 .Case("v22", AArch64::Q22)
2728 .Case("v23", AArch64::Q23)
2729 .Case("v24", AArch64::Q24)
2730 .Case("v25", AArch64::Q25)
2731 .Case("v26", AArch64::Q26)
2732 .Case("v27", AArch64::Q27)
2733 .Case("v28", AArch64::Q28)
2734 .Case("v29", AArch64::Q29)
2735 .Case("v30", AArch64::Q30)
2736 .Case("v31", AArch64::Q31)
2737 .Default(0);
2738}
2739
2740/// Returns an optional pair of (#elements, element-width) if Suffix
2741/// is a valid vector kind. Where the number of elements in a vector
2742/// or the vector width is implicit or explicitly unknown (but still a
2743/// valid suffix kind), 0 is used.
2744static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2745 RegKind VectorKind) {
2746 std::pair<int, int> Res = {-1, -1};
2747
2748 switch (VectorKind) {
2749 case RegKind::NeonVector:
2751 .Case("", {0, 0})
2752 .Case(".1d", {1, 64})
2753 .Case(".1q", {1, 128})
2754 // '.2h' needed for fp16 scalar pairwise reductions
2755 .Case(".2h", {2, 16})
2756 .Case(".2b", {2, 8})
2757 .Case(".2s", {2, 32})
2758 .Case(".2d", {2, 64})
2759 // '.4b' is another special case for the ARMv8.2a dot product
2760 // operand
2761 .Case(".4b", {4, 8})
2762 .Case(".4h", {4, 16})
2763 .Case(".4s", {4, 32})
2764 .Case(".8b", {8, 8})
2765 .Case(".8h", {8, 16})
2766 .Case(".16b", {16, 8})
2767 // Accept the width neutral ones, too, for verbose syntax. If
2768 // those aren't used in the right places, the token operand won't
2769 // match so all will work out.
2770 .Case(".b", {0, 8})
2771 .Case(".h", {0, 16})
2772 .Case(".s", {0, 32})
2773 .Case(".d", {0, 64})
2774 .Default({-1, -1});
2775 break;
2776 case RegKind::SVEPredicateAsCounter:
2777 case RegKind::SVEPredicateVector:
2778 case RegKind::SVEDataVector:
2779 case RegKind::Matrix:
2781 .Case("", {0, 0})
2782 .Case(".b", {0, 8})
2783 .Case(".h", {0, 16})
2784 .Case(".s", {0, 32})
2785 .Case(".d", {0, 64})
2786 .Case(".q", {0, 128})
2787 .Default({-1, -1});
2788 break;
2789 default:
2790 llvm_unreachable("Unsupported RegKind");
2791 }
2792
2793 if (Res == std::make_pair(-1, -1))
2794 return std::nullopt;
2795
2796 return std::optional<std::pair<int, int>>(Res);
2797}
2798
2799static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2800 return parseVectorKind(Suffix, VectorKind).has_value();
2801}
2802
2804 return StringSwitch<unsigned>(Name.lower())
2805 .Case("z0", AArch64::Z0)
2806 .Case("z1", AArch64::Z1)
2807 .Case("z2", AArch64::Z2)
2808 .Case("z3", AArch64::Z3)
2809 .Case("z4", AArch64::Z4)
2810 .Case("z5", AArch64::Z5)
2811 .Case("z6", AArch64::Z6)
2812 .Case("z7", AArch64::Z7)
2813 .Case("z8", AArch64::Z8)
2814 .Case("z9", AArch64::Z9)
2815 .Case("z10", AArch64::Z10)
2816 .Case("z11", AArch64::Z11)
2817 .Case("z12", AArch64::Z12)
2818 .Case("z13", AArch64::Z13)
2819 .Case("z14", AArch64::Z14)
2820 .Case("z15", AArch64::Z15)
2821 .Case("z16", AArch64::Z16)
2822 .Case("z17", AArch64::Z17)
2823 .Case("z18", AArch64::Z18)
2824 .Case("z19", AArch64::Z19)
2825 .Case("z20", AArch64::Z20)
2826 .Case("z21", AArch64::Z21)
2827 .Case("z22", AArch64::Z22)
2828 .Case("z23", AArch64::Z23)
2829 .Case("z24", AArch64::Z24)
2830 .Case("z25", AArch64::Z25)
2831 .Case("z26", AArch64::Z26)
2832 .Case("z27", AArch64::Z27)
2833 .Case("z28", AArch64::Z28)
2834 .Case("z29", AArch64::Z29)
2835 .Case("z30", AArch64::Z30)
2836 .Case("z31", AArch64::Z31)
2837 .Default(0);
2838}
2839
2841 return StringSwitch<unsigned>(Name.lower())
2842 .Case("p0", AArch64::P0)
2843 .Case("p1", AArch64::P1)
2844 .Case("p2", AArch64::P2)
2845 .Case("p3", AArch64::P3)
2846 .Case("p4", AArch64::P4)
2847 .Case("p5", AArch64::P5)
2848 .Case("p6", AArch64::P6)
2849 .Case("p7", AArch64::P7)
2850 .Case("p8", AArch64::P8)
2851 .Case("p9", AArch64::P9)
2852 .Case("p10", AArch64::P10)
2853 .Case("p11", AArch64::P11)
2854 .Case("p12", AArch64::P12)
2855 .Case("p13", AArch64::P13)
2856 .Case("p14", AArch64::P14)
2857 .Case("p15", AArch64::P15)
2858 .Default(0);
2859}
2860
2862 return StringSwitch<unsigned>(Name.lower())
2863 .Case("pn0", AArch64::PN0)
2864 .Case("pn1", AArch64::PN1)
2865 .Case("pn2", AArch64::PN2)
2866 .Case("pn3", AArch64::PN3)
2867 .Case("pn4", AArch64::PN4)
2868 .Case("pn5", AArch64::PN5)
2869 .Case("pn6", AArch64::PN6)
2870 .Case("pn7", AArch64::PN7)
2871 .Case("pn8", AArch64::PN8)
2872 .Case("pn9", AArch64::PN9)
2873 .Case("pn10", AArch64::PN10)
2874 .Case("pn11", AArch64::PN11)
2875 .Case("pn12", AArch64::PN12)
2876 .Case("pn13", AArch64::PN13)
2877 .Case("pn14", AArch64::PN14)
2878 .Case("pn15", AArch64::PN15)
2879 .Default(0);
2880}
2881
2883 return StringSwitch<unsigned>(Name.lower())
2884 .Case("za0.d", AArch64::ZAD0)
2885 .Case("za1.d", AArch64::ZAD1)
2886 .Case("za2.d", AArch64::ZAD2)
2887 .Case("za3.d", AArch64::ZAD3)
2888 .Case("za4.d", AArch64::ZAD4)
2889 .Case("za5.d", AArch64::ZAD5)
2890 .Case("za6.d", AArch64::ZAD6)
2891 .Case("za7.d", AArch64::ZAD7)
2892 .Case("za0.s", AArch64::ZAS0)
2893 .Case("za1.s", AArch64::ZAS1)
2894 .Case("za2.s", AArch64::ZAS2)
2895 .Case("za3.s", AArch64::ZAS3)
2896 .Case("za0.h", AArch64::ZAH0)
2897 .Case("za1.h", AArch64::ZAH1)
2898 .Case("za0.b", AArch64::ZAB0)
2899 .Default(0);
2900}
2901
2903 return StringSwitch<unsigned>(Name.lower())
2904 .Case("za", AArch64::ZA)
2905 .Case("za0.q", AArch64::ZAQ0)
2906 .Case("za1.q", AArch64::ZAQ1)
2907 .Case("za2.q", AArch64::ZAQ2)
2908 .Case("za3.q", AArch64::ZAQ3)
2909 .Case("za4.q", AArch64::ZAQ4)
2910 .Case("za5.q", AArch64::ZAQ5)
2911 .Case("za6.q", AArch64::ZAQ6)
2912 .Case("za7.q", AArch64::ZAQ7)
2913 .Case("za8.q", AArch64::ZAQ8)
2914 .Case("za9.q", AArch64::ZAQ9)
2915 .Case("za10.q", AArch64::ZAQ10)
2916 .Case("za11.q", AArch64::ZAQ11)
2917 .Case("za12.q", AArch64::ZAQ12)
2918 .Case("za13.q", AArch64::ZAQ13)
2919 .Case("za14.q", AArch64::ZAQ14)
2920 .Case("za15.q", AArch64::ZAQ15)
2921 .Case("za0.d", AArch64::ZAD0)
2922 .Case("za1.d", AArch64::ZAD1)
2923 .Case("za2.d", AArch64::ZAD2)
2924 .Case("za3.d", AArch64::ZAD3)
2925 .Case("za4.d", AArch64::ZAD4)
2926 .Case("za5.d", AArch64::ZAD5)
2927 .Case("za6.d", AArch64::ZAD6)
2928 .Case("za7.d", AArch64::ZAD7)
2929 .Case("za0.s", AArch64::ZAS0)
2930 .Case("za1.s", AArch64::ZAS1)
2931 .Case("za2.s", AArch64::ZAS2)
2932 .Case("za3.s", AArch64::ZAS3)
2933 .Case("za0.h", AArch64::ZAH0)
2934 .Case("za1.h", AArch64::ZAH1)
2935 .Case("za0.b", AArch64::ZAB0)
2936 .Case("za0h.q", AArch64::ZAQ0)
2937 .Case("za1h.q", AArch64::ZAQ1)
2938 .Case("za2h.q", AArch64::ZAQ2)
2939 .Case("za3h.q", AArch64::ZAQ3)
2940 .Case("za4h.q", AArch64::ZAQ4)
2941 .Case("za5h.q", AArch64::ZAQ5)
2942 .Case("za6h.q", AArch64::ZAQ6)
2943 .Case("za7h.q", AArch64::ZAQ7)
2944 .Case("za8h.q", AArch64::ZAQ8)
2945 .Case("za9h.q", AArch64::ZAQ9)
2946 .Case("za10h.q", AArch64::ZAQ10)
2947 .Case("za11h.q", AArch64::ZAQ11)
2948 .Case("za12h.q", AArch64::ZAQ12)
2949 .Case("za13h.q", AArch64::ZAQ13)
2950 .Case("za14h.q", AArch64::ZAQ14)
2951 .Case("za15h.q", AArch64::ZAQ15)
2952 .Case("za0h.d", AArch64::ZAD0)
2953 .Case("za1h.d", AArch64::ZAD1)
2954 .Case("za2h.d", AArch64::ZAD2)
2955 .Case("za3h.d", AArch64::ZAD3)
2956 .Case("za4h.d", AArch64::ZAD4)
2957 .Case("za5h.d", AArch64::ZAD5)
2958 .Case("za6h.d", AArch64::ZAD6)
2959 .Case("za7h.d", AArch64::ZAD7)
2960 .Case("za0h.s", AArch64::ZAS0)
2961 .Case("za1h.s", AArch64::ZAS1)
2962 .Case("za2h.s", AArch64::ZAS2)
2963 .Case("za3h.s", AArch64::ZAS3)
2964 .Case("za0h.h", AArch64::ZAH0)
2965 .Case("za1h.h", AArch64::ZAH1)
2966 .Case("za0h.b", AArch64::ZAB0)
2967 .Case("za0v.q", AArch64::ZAQ0)
2968 .Case("za1v.q", AArch64::ZAQ1)
2969 .Case("za2v.q", AArch64::ZAQ2)
2970 .Case("za3v.q", AArch64::ZAQ3)
2971 .Case("za4v.q", AArch64::ZAQ4)
2972 .Case("za5v.q", AArch64::ZAQ5)
2973 .Case("za6v.q", AArch64::ZAQ6)
2974 .Case("za7v.q", AArch64::ZAQ7)
2975 .Case("za8v.q", AArch64::ZAQ8)
2976 .Case("za9v.q", AArch64::ZAQ9)
2977 .Case("za10v.q", AArch64::ZAQ10)
2978 .Case("za11v.q", AArch64::ZAQ11)
2979 .Case("za12v.q", AArch64::ZAQ12)
2980 .Case("za13v.q", AArch64::ZAQ13)
2981 .Case("za14v.q", AArch64::ZAQ14)
2982 .Case("za15v.q", AArch64::ZAQ15)
2983 .Case("za0v.d", AArch64::ZAD0)
2984 .Case("za1v.d", AArch64::ZAD1)
2985 .Case("za2v.d", AArch64::ZAD2)
2986 .Case("za3v.d", AArch64::ZAD3)
2987 .Case("za4v.d", AArch64::ZAD4)
2988 .Case("za5v.d", AArch64::ZAD5)
2989 .Case("za6v.d", AArch64::ZAD6)
2990 .Case("za7v.d", AArch64::ZAD7)
2991 .Case("za0v.s", AArch64::ZAS0)
2992 .Case("za1v.s", AArch64::ZAS1)
2993 .Case("za2v.s", AArch64::ZAS2)
2994 .Case("za3v.s", AArch64::ZAS3)
2995 .Case("za0v.h", AArch64::ZAH0)
2996 .Case("za1v.h", AArch64::ZAH1)
2997 .Case("za0v.b", AArch64::ZAB0)
2998 .Default(0);
2999}
3000
3001bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3002 SMLoc &EndLoc) {
3003 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3004}
3005
3006ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3007 SMLoc &EndLoc) {
3008 StartLoc = getLoc();
3009 ParseStatus Res = tryParseScalarRegister(Reg);
3010 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 return Res;
3012}
3013
3014// Matches a register name or register alias previously defined by '.req'
3015unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3016 RegKind Kind) {
3017 unsigned RegNum = 0;
3018 if ((RegNum = matchSVEDataVectorRegName(Name)))
3019 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3020
3021 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3022 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3023
3025 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3026
3027 if ((RegNum = MatchNeonVectorRegName(Name)))
3028 return Kind == RegKind::NeonVector ? RegNum : 0;
3029
3030 if ((RegNum = matchMatrixRegName(Name)))
3031 return Kind == RegKind::Matrix ? RegNum : 0;
3032
3033 if (Name.equals_insensitive("zt0"))
3034 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3035
3036 // The parsed register must be of RegKind Scalar
3037 if ((RegNum = MatchRegisterName(Name)))
3038 return (Kind == RegKind::Scalar) ? RegNum : 0;
3039
3040 if (!RegNum) {
3041 // Handle a few common aliases of registers.
3042 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3043 .Case("fp", AArch64::FP)
3044 .Case("lr", AArch64::LR)
3045 .Case("x31", AArch64::XZR)
3046 .Case("w31", AArch64::WZR)
3047 .Default(0))
3048 return Kind == RegKind::Scalar ? RegNum : 0;
3049
3050 // Check for aliases registered via .req. Canonicalize to lower case.
3051 // That's more consistent since register names are case insensitive, and
3052 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3053 auto Entry = RegisterReqs.find(Name.lower());
3054 if (Entry == RegisterReqs.end())
3055 return 0;
3056
3057 // set RegNum if the match is the right kind of register
3058 if (Kind == Entry->getValue().first)
3059 RegNum = Entry->getValue().second;
3060 }
3061 return RegNum;
3062}
3063
3064unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3065 switch (K) {
3066 case RegKind::Scalar:
3067 case RegKind::NeonVector:
3068 case RegKind::SVEDataVector:
3069 return 32;
3070 case RegKind::Matrix:
3071 case RegKind::SVEPredicateVector:
3072 case RegKind::SVEPredicateAsCounter:
3073 return 16;
3074 case RegKind::LookupTable:
3075 return 1;
3076 }
3077 llvm_unreachable("Unsupported RegKind");
3078}
3079
3080/// tryParseScalarRegister - Try to parse a register name. The token must be an
3081/// Identifier when called, and if it is a register name the token is eaten and
3082/// the register is added to the operand list.
3083ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3084 const AsmToken &Tok = getTok();
3085 if (Tok.isNot(AsmToken::Identifier))
3086 return ParseStatus::NoMatch;
3087
3088 std::string lowerCase = Tok.getString().lower();
3089 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3090 if (Reg == 0)
3091 return ParseStatus::NoMatch;
3092
3093 RegNum = Reg;
3094 Lex(); // Eat identifier token.
3095 return ParseStatus::Success;
3096}
3097
3098/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3099ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3100 SMLoc S = getLoc();
3101
3102 if (getTok().isNot(AsmToken::Identifier))
3103 return Error(S, "Expected cN operand where 0 <= N <= 15");
3104
3105 StringRef Tok = getTok().getIdentifier();
3106 if (Tok[0] != 'c' && Tok[0] != 'C')
3107 return Error(S, "Expected cN operand where 0 <= N <= 15");
3108
3109 uint32_t CRNum;
3110 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3111 if (BadNum || CRNum > 15)
3112 return Error(S, "Expected cN operand where 0 <= N <= 15");
3113
3114 Lex(); // Eat identifier token.
3115 Operands.push_back(
3116 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3117 return ParseStatus::Success;
3118}
3119
3120// Either an identifier for named values or a 6-bit immediate.
3121ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3122 SMLoc S = getLoc();
3123 const AsmToken &Tok = getTok();
3124
3125 unsigned MaxVal = 63;
3126
3127 // Immediate case, with optional leading hash:
3128 if (parseOptionalToken(AsmToken::Hash) ||
3129 Tok.is(AsmToken::Integer)) {
3130 const MCExpr *ImmVal;
3131 if (getParser().parseExpression(ImmVal))
3132 return ParseStatus::Failure;
3133
3134 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3135 if (!MCE)
3136 return TokError("immediate value expected for prefetch operand");
3137 unsigned prfop = MCE->getValue();
3138 if (prfop > MaxVal)
3139 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3140 "] expected");
3141
3142 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3143 Operands.push_back(AArch64Operand::CreatePrefetch(
3144 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3145 return ParseStatus::Success;
3146 }
3147
3148 if (Tok.isNot(AsmToken::Identifier))
3149 return TokError("prefetch hint expected");
3150
3151 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3152 if (!RPRFM)
3153 return TokError("prefetch hint expected");
3154
3155 Operands.push_back(AArch64Operand::CreatePrefetch(
3156 RPRFM->Encoding, Tok.getString(), S, getContext()));
3157 Lex(); // Eat identifier token.
3158 return ParseStatus::Success;
3159}
3160
3161/// tryParsePrefetch - Try to parse a prefetch operand.
3162template <bool IsSVEPrefetch>
3163ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3164 SMLoc S = getLoc();
3165 const AsmToken &Tok = getTok();
3166
3167 auto LookupByName = [](StringRef N) {
3168 if (IsSVEPrefetch) {
3169 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3170 return std::optional<unsigned>(Res->Encoding);
3171 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3172 return std::optional<unsigned>(Res->Encoding);
3173 return std::optional<unsigned>();
3174 };
3175
3176 auto LookupByEncoding = [](unsigned E) {
3177 if (IsSVEPrefetch) {
3178 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3179 return std::optional<StringRef>(Res->Name);
3180 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3181 return std::optional<StringRef>(Res->Name);
3182 return std::optional<StringRef>();
3183 };
3184 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3185
3186 // Either an identifier for named values or a 5-bit immediate.
3187 // Eat optional hash.
3188 if (parseOptionalToken(AsmToken::Hash) ||
3189 Tok.is(AsmToken::Integer)) {
3190 const MCExpr *ImmVal;
3191 if (getParser().parseExpression(ImmVal))
3192 return ParseStatus::Failure;
3193
3194 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3195 if (!MCE)
3196 return TokError("immediate value expected for prefetch operand");
3197 unsigned prfop = MCE->getValue();
3198 if (prfop > MaxVal)
3199 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3200 "] expected");
3201
3202 auto PRFM = LookupByEncoding(MCE->getValue());
3203 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3204 S, getContext()));
3205 return ParseStatus::Success;
3206 }
3207
3208 if (Tok.isNot(AsmToken::Identifier))
3209 return TokError("prefetch hint expected");
3210
3211 auto PRFM = LookupByName(Tok.getString());
3212 if (!PRFM)
3213 return TokError("prefetch hint expected");
3214
3215 Operands.push_back(AArch64Operand::CreatePrefetch(
3216 *PRFM, Tok.getString(), S, getContext()));
3217 Lex(); // Eat identifier token.
3218 return ParseStatus::Success;
3219}
3220
3221/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3222ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3223 SMLoc S = getLoc();
3224 const AsmToken &Tok = getTok();
3225 if (Tok.isNot(AsmToken::Identifier))
3226 return TokError("invalid operand for instruction");
3227
3228 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3229 if (!PSB)
3230 return TokError("invalid operand for instruction");
3231
3232 Operands.push_back(AArch64Operand::CreatePSBHint(
3233 PSB->Encoding, Tok.getString(), S, getContext()));
3234 Lex(); // Eat identifier token.
3235 return ParseStatus::Success;
3236}
3237
3238ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3239 SMLoc StartLoc = getLoc();
3240
3241 MCRegister RegNum;
3242
3243 // The case where xzr, xzr is not present is handled by an InstAlias.
3244
3245 auto RegTok = getTok(); // in case we need to backtrack
3246 if (!tryParseScalarRegister(RegNum).isSuccess())
3247 return ParseStatus::NoMatch;
3248
3249 if (RegNum != AArch64::XZR) {
3250 getLexer().UnLex(RegTok);
3251 return ParseStatus::NoMatch;
3252 }
3253
3254 if (parseComma())
3255 return ParseStatus::Failure;
3256
3257 if (!tryParseScalarRegister(RegNum).isSuccess())
3258 return TokError("expected register operand");
3259
3260 if (RegNum != AArch64::XZR)
3261 return TokError("xzr must be followed by xzr");
3262
3263 // We need to push something, since we claim this is an operand in .td.
3264 // See also AArch64AsmParser::parseKeywordOperand.
3265 Operands.push_back(AArch64Operand::CreateReg(
3266 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3267
3268 return ParseStatus::Success;
3269}
3270
3271/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3272ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3273 SMLoc S = getLoc();
3274 const AsmToken &Tok = getTok();
3275 if (Tok.isNot(AsmToken::Identifier))
3276 return TokError("invalid operand for instruction");
3277
3278 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3279 if (!BTI)
3280 return TokError("invalid operand for instruction");
3281
3282 Operands.push_back(AArch64Operand::CreateBTIHint(
3283 BTI->Encoding, Tok.getString(), S, getContext()));
3284 Lex(); // Eat identifier token.
3285 return ParseStatus::Success;
3286}
3287
3288/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3289/// instruction.
3290ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3291 SMLoc S = getLoc();
3292 const MCExpr *Expr = nullptr;
3293
3294 if (getTok().is(AsmToken::Hash)) {
3295 Lex(); // Eat hash token.
3296 }
3297
3298 if (parseSymbolicImmVal(Expr))
3299 return ParseStatus::Failure;
3300
3301 AArch64MCExpr::VariantKind ELFRefKind;
3302 MCSymbolRefExpr::VariantKind DarwinRefKind;
3303 int64_t Addend;
3304 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3305 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3306 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3307 // No modifier was specified at all; this is the syntax for an ELF basic
3308 // ADRP relocation (unfortunately).
3309 Expr =
3311 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3312 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3313 Addend != 0) {
3314 return Error(S, "gotpage label reference not allowed an addend");
3315 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3316 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3317 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3318 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3319 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3320 ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
3321 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3322 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3323 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE &&
3325 // The operand must be an @page or @gotpage qualified symbolref.
3326 return Error(S, "page or gotpage label reference expected");
3327 }
3328 }
3329
3330 // We have either a label reference possibly with addend or an immediate. The
3331 // addend is a raw value here. The linker will adjust it to only reference the
3332 // page.
3333 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3334 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3335
3336 return ParseStatus::Success;
3337}
3338
3339/// tryParseAdrLabel - Parse and validate a source label for the ADR
3340/// instruction.
3341ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3342 SMLoc S = getLoc();
3343 const MCExpr *Expr = nullptr;
3344
3345 // Leave anything with a bracket to the default for SVE
3346 if (getTok().is(AsmToken::LBrac))
3347 return ParseStatus::NoMatch;
3348
3349 if (getTok().is(AsmToken::Hash))
3350 Lex(); // Eat hash token.
3351
3352 if (parseSymbolicImmVal(Expr))
3353 return ParseStatus::Failure;
3354
3355 AArch64MCExpr::VariantKind ELFRefKind;
3356 MCSymbolRefExpr::VariantKind DarwinRefKind;
3357 int64_t Addend;
3358 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3359 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3360 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3361 // No modifier was specified at all; this is the syntax for an ELF basic
3362 // ADR relocation (unfortunately).
3363 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3364 } else if (ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE) {
3365 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3366 // adr. It's not actually GOT entry page address but the GOT address
3367 // itself - we just share the same variant kind with :got_auth: operator
3368 // applied for adrp.
3369 // TODO: can we somehow get current TargetMachine object to call
3370 // getCodeModel() on it to ensure we are using tiny code model?
3371 return Error(S, "unexpected adr label");
3372 }
3373 }
3374
3375 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3376 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseFPImm - A floating point immediate expression operand.
3381template <bool AddFPZeroAsLiteral>
3382ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384
3385 bool Hash = parseOptionalToken(AsmToken::Hash);
3386
3387 // Handle negation, as that still comes through as a separate token.
3388 bool isNegative = parseOptionalToken(AsmToken::Minus);
3389
3390 const AsmToken &Tok = getTok();
3391 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3392 if (!Hash)
3393 return ParseStatus::NoMatch;
3394 return TokError("invalid floating point immediate");
3395 }
3396
3397 // Parse hexadecimal representation.
3398 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3399 if (Tok.getIntVal() > 255 || isNegative)
3400 return TokError("encoded floating point value out of range");
3401
3403 Operands.push_back(
3404 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3405 } else {
3406 // Parse FP representation.
3407 APFloat RealVal(APFloat::IEEEdouble());
3408 auto StatusOrErr =
3409 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3410 if (errorToBool(StatusOrErr.takeError()))
3411 return TokError("invalid floating point representation");
3412
3413 if (isNegative)
3414 RealVal.changeSign();
3415
3416 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3417 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3418 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3419 } else
3420 Operands.push_back(AArch64Operand::CreateFPImm(
3421 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3422 }
3423
3424 Lex(); // Eat the token.
3425
3426 return ParseStatus::Success;
3427}
3428
3429/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3430/// a shift suffix, for example '#1, lsl #12'.
3432AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3433 SMLoc S = getLoc();
3434
3435 if (getTok().is(AsmToken::Hash))
3436 Lex(); // Eat '#'
3437 else if (getTok().isNot(AsmToken::Integer))
3438 // Operand should start from # or should be integer, emit error otherwise.
3439 return ParseStatus::NoMatch;
3440
3441 if (getTok().is(AsmToken::Integer) &&
3442 getLexer().peekTok().is(AsmToken::Colon))
3443 return tryParseImmRange(Operands);
3444
3445 const MCExpr *Imm = nullptr;
3446 if (parseSymbolicImmVal(Imm))
3447 return ParseStatus::Failure;
3448 else if (getTok().isNot(AsmToken::Comma)) {
3449 Operands.push_back(
3450 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3451 return ParseStatus::Success;
3452 }
3453
3454 // Eat ','
3455 Lex();
3456 StringRef VecGroup;
3457 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3458 Operands.push_back(
3459 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3460 Operands.push_back(
3461 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3462 return ParseStatus::Success;
3463 }
3464
3465 // The optional operand must be "lsl #N" where N is non-negative.
3466 if (!getTok().is(AsmToken::Identifier) ||
3467 !getTok().getIdentifier().equals_insensitive("lsl"))
3468 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3469
3470 // Eat 'lsl'
3471 Lex();
3472
3473 parseOptionalToken(AsmToken::Hash);
3474
3475 if (getTok().isNot(AsmToken::Integer))
3476 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3477
3478 int64_t ShiftAmount = getTok().getIntVal();
3479
3480 if (ShiftAmount < 0)
3481 return Error(getLoc(), "positive shift amount required");
3482 Lex(); // Eat the number
3483
3484 // Just in case the optional lsl #0 is used for immediates other than zero.
3485 if (ShiftAmount == 0 && Imm != nullptr) {
3486 Operands.push_back(
3487 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3488 return ParseStatus::Success;
3489 }
3490
3491 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3492 getLoc(), getContext()));
3493 return ParseStatus::Success;
3494}
3495
3496/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3497/// suggestion to help common typos.
3499AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3501 .Case("eq", AArch64CC::EQ)
3502 .Case("ne", AArch64CC::NE)
3503 .Case("cs", AArch64CC::HS)
3504 .Case("hs", AArch64CC::HS)
3505 .Case("cc", AArch64CC::LO)
3506 .Case("lo", AArch64CC::LO)
3507 .Case("mi", AArch64CC::MI)
3508 .Case("pl", AArch64CC::PL)
3509 .Case("vs", AArch64CC::VS)
3510 .Case("vc", AArch64CC::VC)
3511 .Case("hi", AArch64CC::HI)
3512 .Case("ls", AArch64CC::LS)
3513 .Case("ge", AArch64CC::GE)
3514 .Case("lt", AArch64CC::LT)
3515 .Case("gt", AArch64CC::GT)
3516 .Case("le", AArch64CC::LE)
3517 .Case("al", AArch64CC::AL)
3518 .Case("nv", AArch64CC::NV)
3520
3521 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3523 .Case("none", AArch64CC::EQ)
3524 .Case("any", AArch64CC::NE)
3525 .Case("nlast", AArch64CC::HS)
3526 .Case("last", AArch64CC::LO)
3527 .Case("first", AArch64CC::MI)
3528 .Case("nfrst", AArch64CC::PL)
3529 .Case("pmore", AArch64CC::HI)
3530 .Case("plast", AArch64CC::LS)
3531 .Case("tcont", AArch64CC::GE)
3532 .Case("tstop", AArch64CC::LT)
3534
3535 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3536 Suggestion = "nfrst";
3537 }
3538 return CC;
3539}
3540
3541/// parseCondCode - Parse a Condition Code operand.
3542bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3543 bool invertCondCode) {
3544 SMLoc S = getLoc();
3545 const AsmToken &Tok = getTok();
3546 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3547
3548 StringRef Cond = Tok.getString();
3549 std::string Suggestion;
3550 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3551 if (CC == AArch64CC::Invalid) {
3552 std::string Msg = "invalid condition code";
3553 if (!Suggestion.empty())
3554 Msg += ", did you mean " + Suggestion + "?";
3555 return TokError(Msg);
3556 }
3557 Lex(); // Eat identifier token.
3558
3559 if (invertCondCode) {
3560 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3561 return TokError("condition codes AL and NV are invalid for this instruction");
3563 }
3564
3565 Operands.push_back(
3566 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3567 return false;
3568}
3569
3570ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3571 const AsmToken &Tok = getTok();
3572 SMLoc S = getLoc();
3573
3574 if (Tok.isNot(AsmToken::Identifier))
3575 return TokError("invalid operand for instruction");
3576
3577 unsigned PStateImm = -1;
3578 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3579 if (!SVCR)
3580 return ParseStatus::NoMatch;
3581 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3582 PStateImm = SVCR->Encoding;
3583
3584 Operands.push_back(
3585 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3586 Lex(); // Eat identifier token.
3587 return ParseStatus::Success;
3588}
3589
3590ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3591 const AsmToken &Tok = getTok();
3592 SMLoc S = getLoc();
3593
3594 StringRef Name = Tok.getString();
3595
3596 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3597 Lex(); // eat "za[.(b|h|s|d)]"
3598 unsigned ElementWidth = 0;
3599 auto DotPosition = Name.find('.');
3600 if (DotPosition != StringRef::npos) {
3601 const auto &KindRes =
3602 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3603 if (!KindRes)
3604 return TokError(
3605 "Expected the register to be followed by element width suffix");
3606 ElementWidth = KindRes->second;
3607 }
3608 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3609 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3610 getContext()));
3611 if (getLexer().is(AsmToken::LBrac)) {
3612 // There's no comma after matrix operand, so we can parse the next operand
3613 // immediately.
3614 if (parseOperand(Operands, false, false))
3615 return ParseStatus::NoMatch;
3616 }
3617 return ParseStatus::Success;
3618 }
3619
3620 // Try to parse matrix register.
3621 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3622 if (!Reg)
3623 return ParseStatus::NoMatch;
3624
3625 size_t DotPosition = Name.find('.');
3626 assert(DotPosition != StringRef::npos && "Unexpected register");
3627
3628 StringRef Head = Name.take_front(DotPosition);
3629 StringRef Tail = Name.drop_front(DotPosition);
3630 StringRef RowOrColumn = Head.take_back();
3631
3632 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3633 .Case("h", MatrixKind::Row)
3634 .Case("v", MatrixKind::Col)
3635 .Default(MatrixKind::Tile);
3636
3637 // Next up, parsing the suffix
3638 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3639 if (!KindRes)
3640 return TokError(
3641 "Expected the register to be followed by element width suffix");
3642 unsigned ElementWidth = KindRes->second;
3643
3644 Lex();
3645
3646 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3647 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3648
3649 if (getLexer().is(AsmToken::LBrac)) {
3650 // There's no comma after matrix operand, so we can parse the next operand
3651 // immediately.
3652 if (parseOperand(Operands, false, false))
3653 return ParseStatus::NoMatch;
3654 }
3655 return ParseStatus::Success;
3656}
3657
3658/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3659/// them if present.
3661AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3662 const AsmToken &Tok = getTok();
3663 std::string LowerID = Tok.getString().lower();
3666 .Case("lsl", AArch64_AM::LSL)
3667 .Case("lsr", AArch64_AM::LSR)
3668 .Case("asr", AArch64_AM::ASR)
3669 .Case("ror", AArch64_AM::ROR)
3670 .Case("msl", AArch64_AM::MSL)
3671 .Case("uxtb", AArch64_AM::UXTB)
3672 .Case("uxth", AArch64_AM::UXTH)
3673 .Case("uxtw", AArch64_AM::UXTW)
3674 .Case("uxtx", AArch64_AM::UXTX)
3675 .Case("sxtb", AArch64_AM::SXTB)
3676 .Case("sxth", AArch64_AM::SXTH)
3677 .Case("sxtw", AArch64_AM::SXTW)
3678 .Case("sxtx", AArch64_AM::SXTX)
3680
3682 return ParseStatus::NoMatch;
3683
3684 SMLoc S = Tok.getLoc();
3685 Lex();
3686
3687 bool Hash = parseOptionalToken(AsmToken::Hash);
3688
3689 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3690 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3691 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3692 ShOp == AArch64_AM::MSL) {
3693 // We expect a number here.
3694 return TokError("expected #imm after shift specifier");
3695 }
3696
3697 // "extend" type operations don't need an immediate, #0 is implicit.
3698 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3699 Operands.push_back(
3700 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3701 return ParseStatus::Success;
3702 }
3703
3704 // Make sure we do actually have a number, identifier or a parenthesized
3705 // expression.
3706 SMLoc E = getLoc();
3707 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3708 !getTok().is(AsmToken::Identifier))
3709 return Error(E, "expected integer shift amount");
3710
3711 const MCExpr *ImmVal;
3712 if (getParser().parseExpression(ImmVal))
3713 return ParseStatus::Failure;
3714
3715 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3716 if (!MCE)
3717 return Error(E, "expected constant '#imm' after shift specifier");
3718
3719 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3720 Operands.push_back(AArch64Operand::CreateShiftExtend(
3721 ShOp, MCE->getValue(), true, S, E, getContext()));
3722 return ParseStatus::Success;
3723}
3724
3725static const struct Extension {
3726 const char *Name;
3728} ExtensionMap[] = {
3729 {"crc", {AArch64::FeatureCRC}},
3730 {"sm4", {AArch64::FeatureSM4}},
3731 {"sha3", {AArch64::FeatureSHA3}},
3732 {"sha2", {AArch64::FeatureSHA2}},
3733 {"aes", {AArch64::FeatureAES}},
3734 {"crypto", {AArch64::FeatureCrypto}},
3735 {"fp", {AArch64::FeatureFPARMv8}},
3736 {"simd", {AArch64::FeatureNEON}},
3737 {"ras", {AArch64::FeatureRAS}},
3738 {"rasv2", {AArch64::FeatureRASv2}},
3739 {"lse", {AArch64::FeatureLSE}},
3740 {"predres", {AArch64::FeaturePredRes}},
3741 {"predres2", {AArch64::FeatureSPECRES2}},
3742 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3743 {"mte", {AArch64::FeatureMTE}},
3744 {"memtag", {AArch64::FeatureMTE}},
3745 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3746 {"pan", {AArch64::FeaturePAN}},
3747 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3748 {"ccpp", {AArch64::FeatureCCPP}},
3749 {"rcpc", {AArch64::FeatureRCPC}},
3750 {"rng", {AArch64::FeatureRandGen}},
3751 {"sve", {AArch64::FeatureSVE}},
3752 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3753 {"sve2", {AArch64::FeatureSVE2}},
3754 {"sve-aes", {AArch64::FeatureSVEAES}},
3755 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3756 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3757 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3758 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3759 {"sve2p1", {AArch64::FeatureSVE2p1}},
3760 {"ls64", {AArch64::FeatureLS64}},
3761 {"xs", {AArch64::FeatureXS}},
3762 {"pauth", {AArch64::FeaturePAuth}},
3763 {"flagm", {AArch64::FeatureFlagM}},
3764 {"rme", {AArch64::FeatureRME}},
3765 {"sme", {AArch64::FeatureSME}},
3766 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3767 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3768 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3769 {"sme2", {AArch64::FeatureSME2}},
3770 {"sme2p1", {AArch64::FeatureSME2p1}},
3771 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3772 {"hbc", {AArch64::FeatureHBC}},
3773 {"mops", {AArch64::FeatureMOPS}},
3774 {"mec", {AArch64::FeatureMEC}},
3775 {"the", {AArch64::FeatureTHE}},
3776 {"d128", {AArch64::FeatureD128}},
3777 {"lse128", {AArch64::FeatureLSE128}},
3778 {"ite", {AArch64::FeatureITE}},
3779 {"cssc", {AArch64::FeatureCSSC}},
3780 {"rcpc3", {AArch64::FeatureRCPC3}},
3781 {"gcs", {AArch64::FeatureGCS}},
3782 {"bf16", {AArch64::FeatureBF16}},
3783 {"compnum", {AArch64::FeatureComplxNum}},
3784 {"dotprod", {AArch64::FeatureDotProd}},
3785 {"f32mm", {AArch64::FeatureMatMulFP32}},
3786 {"f64mm", {AArch64::FeatureMatMulFP64}},
3787 {"fp16", {AArch64::FeatureFullFP16}},
3788 {"fp16fml", {AArch64::FeatureFP16FML}},
3789 {"i8mm", {AArch64::FeatureMatMulInt8}},
3790 {"lor", {AArch64::FeatureLOR}},
3791 {"profile", {AArch64::FeatureSPE}},
3792 // "rdma" is the name documented by binutils for the feature, but
3793 // binutils also accepts incomplete prefixes of features, so "rdm"
3794 // works too. Support both spellings here.
3795 {"rdm", {AArch64::FeatureRDM}},
3796 {"rdma", {AArch64::FeatureRDM}},
3797 {"sb", {AArch64::FeatureSB}},
3798 {"ssbs", {AArch64::FeatureSSBS}},
3799 {"tme", {AArch64::FeatureTME}},
3800 {"fp8", {AArch64::FeatureFP8}},
3801 {"faminmax", {AArch64::FeatureFAMINMAX}},
3802 {"fp8fma", {AArch64::FeatureFP8FMA}},
3803 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3804 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3805 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3806 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3807 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3808 {"lut", {AArch64::FeatureLUT}},
3809 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3810 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3811 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3812 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3813 {"cpa", {AArch64::FeatureCPA}},
3814 {"tlbiw", {AArch64::FeatureTLBIW}},
3815 {"pops", {AArch64::FeaturePoPS}},
3816 {"cmpbr", {AArch64::FeatureCMPBR}},
3817 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3818 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3819 {"fprcvt", {AArch64::FeatureFPRCVT}},
3820 {"lsfe", {AArch64::FeatureLSFE}},
3821 {"sme2p2", {AArch64::FeatureSME2p2}},
3822 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3823 {"sve2p2", {AArch64::FeatureSVE2p2}},
3824 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3825 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3826 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3827 {"lsui", {AArch64::FeatureLSUI}},
3828 {"occmo", {AArch64::FeatureOCCMO}},
3829 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3831
3832static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3833 if (FBS[AArch64::HasV8_0aOps])
3834 Str += "ARMv8a";
3835 if (FBS[AArch64::HasV8_1aOps])
3836 Str += "ARMv8.1a";
3837 else if (FBS[AArch64::HasV8_2aOps])
3838 Str += "ARMv8.2a";
3839 else if (FBS[AArch64::HasV8_3aOps])
3840 Str += "ARMv8.3a";
3841 else if (FBS[AArch64::HasV8_4aOps])
3842 Str += "ARMv8.4a";
3843 else if (FBS[AArch64::HasV8_5aOps])
3844 Str += "ARMv8.5a";
3845 else if (FBS[AArch64::HasV8_6aOps])
3846 Str += "ARMv8.6a";
3847 else if (FBS[AArch64::HasV8_7aOps])
3848 Str += "ARMv8.7a";
3849 else if (FBS[AArch64::HasV8_8aOps])
3850 Str += "ARMv8.8a";
3851 else if (FBS[AArch64::HasV8_9aOps])
3852 Str += "ARMv8.9a";
3853 else if (FBS[AArch64::HasV9_0aOps])
3854 Str += "ARMv9-a";
3855 else if (FBS[AArch64::HasV9_1aOps])
3856 Str += "ARMv9.1a";
3857 else if (FBS[AArch64::HasV9_2aOps])
3858 Str += "ARMv9.2a";
3859 else if (FBS[AArch64::HasV9_3aOps])
3860 Str += "ARMv9.3a";
3861 else if (FBS[AArch64::HasV9_4aOps])
3862 Str += "ARMv9.4a";
3863 else if (FBS[AArch64::HasV9_5aOps])
3864 Str += "ARMv9.5a";
3865 else if (FBS[AArch64::HasV9_6aOps])
3866 Str += "ARMv9.6a";
3867 else if (FBS[AArch64::HasV8_0rOps])
3868 Str += "ARMv8r";
3869 else {
3870 SmallVector<std::string, 2> ExtMatches;
3871 for (const auto& Ext : ExtensionMap) {
3872 // Use & in case multiple features are enabled
3873 if ((FBS & Ext.Features) != FeatureBitset())
3874 ExtMatches.push_back(Ext.Name);
3875 }
3876 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3877 }
3878}
3879
3880void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3881 SMLoc S) {
3882 const uint16_t Op2 = Encoding & 7;
3883 const uint16_t Cm = (Encoding & 0x78) >> 3;
3884 const uint16_t Cn = (Encoding & 0x780) >> 7;
3885 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3886
3887 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3888
3889 Operands.push_back(
3890 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3891 Operands.push_back(
3892 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3893 Operands.push_back(
3894 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3895 Expr = MCConstantExpr::create(Op2, getContext());
3896 Operands.push_back(
3897 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3898}
3899
3900/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3901/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3902bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3904 if (Name.contains('.'))
3905 return TokError("invalid operand");
3906
3907 Mnemonic = Name;
3908 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3909
3910 const AsmToken &Tok = getTok();
3911 StringRef Op = Tok.getString();
3912 SMLoc S = Tok.getLoc();
3913
3914 if (Mnemonic == "ic") {
3915 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3916 if (!IC)
3917 return TokError("invalid operand for IC instruction");
3918 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3919 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3921 return TokError(Str);
3922 }
3923 createSysAlias(IC->Encoding, Operands, S);
3924 } else if (Mnemonic == "dc") {
3925 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3926 if (!DC)
3927 return TokError("invalid operand for DC instruction");
3928 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3929 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3931 return TokError(Str);
3932 }
3933 createSysAlias(DC->Encoding, Operands, S);
3934 } else if (Mnemonic == "at") {
3935 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3936 if (!AT)
3937 return TokError("invalid operand for AT instruction");
3938 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3939 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3941 return TokError(Str);
3942 }
3943 createSysAlias(AT->Encoding, Operands, S);
3944 } else if (Mnemonic == "tlbi") {
3945 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3946 if (!TLBI)
3947 return TokError("invalid operand for TLBI instruction");
3948 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3949 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3951 return TokError(Str);
3952 }
3953 createSysAlias(TLBI->Encoding, Operands, S);
3954 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3955
3956 if (Op.lower() != "rctx")
3957 return TokError("invalid operand for prediction restriction instruction");
3958
3959 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3960 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3961 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3962
3963 if (Mnemonic == "cosp" && !hasSpecres2)
3964 return TokError("COSP requires: predres2");
3965 if (!hasPredres)
3966 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3967
3968 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3969 : Mnemonic == "dvp" ? 0b101
3970 : Mnemonic == "cosp" ? 0b110
3971 : Mnemonic == "cpp" ? 0b111
3972 : 0;
3973 assert(PRCTX_Op2 &&
3974 "Invalid mnemonic for prediction restriction instruction");
3975 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3976 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3977
3978 createSysAlias(Encoding, Operands, S);
3979 }
3980
3981 Lex(); // Eat operand.
3982
3983 bool ExpectRegister = !Op.contains_insensitive("all");
3984 bool HasRegister = false;
3985
3986 // Check for the optional register operand.
3987 if (parseOptionalToken(AsmToken::Comma)) {
3988 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3989 return TokError("expected register operand");
3990 HasRegister = true;
3991 }
3992
3993 if (ExpectRegister && !HasRegister)
3994 return TokError("specified " + Mnemonic + " op requires a register");
3995 else if (!ExpectRegister && HasRegister)
3996 return TokError("specified " + Mnemonic + " op does not use a register");
3997
3998 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3999 return true;
4000
4001 return false;
4002}
4003
4004/// parseSyspAlias - The TLBIP instructions are simple aliases for
4005/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4006bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4008 if (Name.contains('.'))
4009 return TokError("invalid operand");
4010
4011 Mnemonic = Name;
4012 Operands.push_back(
4013 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4014
4015 const AsmToken &Tok = getTok();
4016 StringRef Op = Tok.getString();
4017 SMLoc S = Tok.getLoc();
4018
4019 if (Mnemonic == "tlbip") {
4020 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4021 if (HasnXSQualifier) {
4022 Op = Op.drop_back(3);
4023 }
4024 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4025 if (!TLBIorig)
4026 return TokError("invalid operand for TLBIP instruction");
4027 const AArch64TLBI::TLBI TLBI(
4028 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4029 TLBIorig->NeedsReg,
4030 HasnXSQualifier
4031 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4032 : TLBIorig->FeaturesRequired);
4033 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4034 std::string Name =
4035 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4036 std::string Str("TLBIP " + Name + " requires: ");
4038 return TokError(Str);
4039 }
4040 createSysAlias(TLBI.Encoding, Operands, S);
4041 }
4042
4043 Lex(); // Eat operand.
4044
4045 if (parseComma())
4046 return true;
4047
4048 if (Tok.isNot(AsmToken::Identifier))
4049 return TokError("expected register identifier");
4050 auto Result = tryParseSyspXzrPair(Operands);
4051 if (Result.isNoMatch())
4052 Result = tryParseGPRSeqPair(Operands);
4053 if (!Result.isSuccess())
4054 return TokError("specified " + Mnemonic +
4055 " op requires a pair of registers");
4056
4057 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4058 return true;
4059
4060 return false;
4061}
4062
4063ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4064 MCAsmParser &Parser = getParser();
4065 const AsmToken &Tok = getTok();
4066
4067 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4068 return TokError("'csync' operand expected");
4069 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4070 // Immediate operand.
4071 const MCExpr *ImmVal;
4072 SMLoc ExprLoc = getLoc();
4073 AsmToken IntTok = Tok;
4074 if (getParser().parseExpression(ImmVal))
4075 return ParseStatus::Failure;
4076 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4077 if (!MCE)
4078 return Error(ExprLoc, "immediate value expected for barrier operand");
4079 int64_t Value = MCE->getValue();
4080 if (Mnemonic == "dsb" && Value > 15) {
4081 // This case is a no match here, but it might be matched by the nXS
4082 // variant. Deliberately not unlex the optional '#' as it is not necessary
4083 // to characterize an integer immediate.
4084 Parser.getLexer().UnLex(IntTok);
4085 return ParseStatus::NoMatch;
4086 }
4087 if (Value < 0 || Value > 15)
4088 return Error(ExprLoc, "barrier operand out of range");
4089 auto DB = AArch64DB::lookupDBByEncoding(Value);
4090 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4091 ExprLoc, getContext(),
4092 false /*hasnXSModifier*/));
4093 return ParseStatus::Success;
4094 }
4095
4096 if (Tok.isNot(AsmToken::Identifier))
4097 return TokError("invalid operand for instruction");
4098
4099 StringRef Operand = Tok.getString();
4100 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4101 auto DB = AArch64DB::lookupDBByName(Operand);
4102 // The only valid named option for ISB is 'sy'
4103 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4104 return TokError("'sy' or #imm operand expected");
4105 // The only valid named option for TSB is 'csync'
4106 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4107 return TokError("'csync' operand expected");
4108 if (!DB && !TSB) {
4109 if (Mnemonic == "dsb") {
4110 // This case is a no match here, but it might be matched by the nXS
4111 // variant.
4112 return ParseStatus::NoMatch;
4113 }
4114 return TokError("invalid barrier option name");
4115 }
4116
4117 Operands.push_back(AArch64Operand::CreateBarrier(
4118 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4119 getContext(), false /*hasnXSModifier*/));
4120 Lex(); // Consume the option
4121
4122 return ParseStatus::Success;
4123}
4124
4126AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4127 const AsmToken &Tok = getTok();
4128
4129 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4130 if (Mnemonic != "dsb")
4131 return ParseStatus::Failure;
4132
4133 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4134 // Immediate operand.
4135 const MCExpr *ImmVal;
4136 SMLoc ExprLoc = getLoc();
4137 if (getParser().parseExpression(ImmVal))
4138 return ParseStatus::Failure;
4139 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4140 if (!MCE)
4141 return Error(ExprLoc, "immediate value expected for barrier operand");
4142 int64_t Value = MCE->getValue();
4143 // v8.7-A DSB in the nXS variant accepts only the following immediate
4144 // values: 16, 20, 24, 28.
4145 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4146 return Error(ExprLoc, "barrier operand out of range");
4147 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4148 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4149 ExprLoc, getContext(),
4150 true /*hasnXSModifier*/));
4151 return ParseStatus::Success;
4152 }
4153
4154 if (Tok.isNot(AsmToken::Identifier))
4155 return TokError("invalid operand for instruction");
4156
4157 StringRef Operand = Tok.getString();
4158 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4159
4160 if (!DB)
4161 return TokError("invalid barrier option name");
4162
4163 Operands.push_back(
4164 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4165 getContext(), true /*hasnXSModifier*/));
4166 Lex(); // Consume the option
4167
4168 return ParseStatus::Success;
4169}
4170
4171ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4172 const AsmToken &Tok = getTok();
4173
4174 if (Tok.isNot(AsmToken::Identifier))
4175 return ParseStatus::NoMatch;
4176
4177 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4178 return ParseStatus::NoMatch;
4179
4180 int MRSReg, MSRReg;
4181 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4182 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4183 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4184 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4185 } else
4186 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4187
4188 unsigned PStateImm = -1;
4189 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4190 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4191 PStateImm = PState15->Encoding;
4192 if (!PState15) {
4193 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4194 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4195 PStateImm = PState1->Encoding;
4196 }
4197
4198 Operands.push_back(
4199 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4200 PStateImm, getContext()));
4201 Lex(); // Eat identifier
4202
4203 return ParseStatus::Success;
4204}
4205
4207AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4208 SMLoc S = getLoc();
4209 const AsmToken &Tok = getTok();
4210 if (Tok.isNot(AsmToken::Identifier))
4211 return TokError("invalid operand for instruction");
4212
4214 if (!PH)
4215 return TokError("invalid operand for instruction");
4216
4217 Operands.push_back(AArch64Operand::CreatePHintInst(
4218 PH->Encoding, Tok.getString(), S, getContext()));
4219 Lex(); // Eat identifier token.
4220 return ParseStatus::Success;
4221}
4222
4223/// tryParseNeonVectorRegister - Parse a vector register operand.
4224bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4225 if (getTok().isNot(AsmToken::Identifier))
4226 return true;
4227
4228 SMLoc S = getLoc();
4229 // Check for a vector register specifier first.
4232 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4233 if (!Res.isSuccess())
4234 return true;
4235
4236 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4237 if (!KindRes)
4238 return true;
4239
4240 unsigned ElementWidth = KindRes->second;
4241 Operands.push_back(
4242 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4243 S, getLoc(), getContext()));
4244
4245 // If there was an explicit qualifier, that goes on as a literal text
4246 // operand.
4247 if (!Kind.empty())
4248 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4249
4250 return tryParseVectorIndex(Operands).isFailure();
4251}
4252
4253ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4254 SMLoc SIdx = getLoc();
4255 if (parseOptionalToken(AsmToken::LBrac)) {
4256 const MCExpr *ImmVal;
4257 if (getParser().parseExpression(ImmVal))
4258 return ParseStatus::NoMatch;
4259 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4260 if (!MCE)
4261 return TokError("immediate value expected for vector index");
4262
4263 SMLoc E = getLoc();
4264
4265 if (parseToken(AsmToken::RBrac, "']' expected"))
4266 return ParseStatus::Failure;
4267
4268 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4269 E, getContext()));
4270 return ParseStatus::Success;
4271 }
4272
4273 return ParseStatus::NoMatch;
4274}
4275
4276// tryParseVectorRegister - Try to parse a vector register name with
4277// optional kind specifier. If it is a register specifier, eat the token
4278// and return it.
4279ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4280 StringRef &Kind,
4281 RegKind MatchKind) {
4282 const AsmToken &Tok = getTok();
4283
4284 if (Tok.isNot(AsmToken::Identifier))
4285 return ParseStatus::NoMatch;
4286
4287 StringRef Name = Tok.getString();
4288 // If there is a kind specifier, it's separated from the register name by
4289 // a '.'.
4290 size_t Start = 0, Next = Name.find('.');
4291 StringRef Head = Name.slice(Start, Next);
4292 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4293
4294 if (RegNum) {
4295 if (Next != StringRef::npos) {
4296 Kind = Name.substr(Next);
4297 if (!isValidVectorKind(Kind, MatchKind))
4298 return TokError("invalid vector kind qualifier");
4299 }
4300 Lex(); // Eat the register token.
4301
4302 Reg = RegNum;
4303 return ParseStatus::Success;
4304 }
4305
4306 return ParseStatus::NoMatch;
4307}
4308
4309ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4312 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4313 if (!Status.isSuccess())
4314 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4315 return Status;
4316}
4317
4318/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4319template <RegKind RK>
4321AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4322 // Check for a SVE predicate register specifier first.
4323 const SMLoc S = getLoc();
4325 MCRegister RegNum;
4326 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4327 if (!Res.isSuccess())
4328 return Res;
4329
4330 const auto &KindRes = parseVectorKind(Kind, RK);
4331 if (!KindRes)
4332 return ParseStatus::NoMatch;
4333
4334 unsigned ElementWidth = KindRes->second;
4335 Operands.push_back(AArch64Operand::CreateVectorReg(
4336 RegNum, RK, ElementWidth, S,
4337 getLoc(), getContext()));
4338
4339 if (getLexer().is(AsmToken::LBrac)) {
4340 if (RK == RegKind::SVEPredicateAsCounter) {
4341 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4342 if (ResIndex.isSuccess())
4343 return ParseStatus::Success;
4344 } else {
4345 // Indexed predicate, there's no comma so try parse the next operand
4346 // immediately.
4347 if (parseOperand(Operands, false, false))
4348 return ParseStatus::NoMatch;
4349 }
4350 }
4351
4352 // Not all predicates are followed by a '/m' or '/z'.
4353 if (getTok().isNot(AsmToken::Slash))
4354 return ParseStatus::Success;
4355
4356 // But when they do they shouldn't have an element type suffix.
4357 if (!Kind.empty())
4358 return Error(S, "not expecting size suffix");
4359
4360 // Add a literal slash as operand
4361 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4362
4363 Lex(); // Eat the slash.
4364
4365 // Zeroing or merging?
4366 auto Pred = getTok().getString().lower();
4367 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4368 return Error(getLoc(), "expecting 'z' predication");
4369
4370 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4371 return Error(getLoc(), "expecting 'm' or 'z' predication");
4372
4373 // Add zero/merge token.
4374 const char *ZM = Pred == "z" ? "z" : "m";
4375 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4376
4377 Lex(); // Eat zero/merge token.
4378 return ParseStatus::Success;
4379}
4380
4381/// parseRegister - Parse a register operand.
4382bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4383 // Try for a Neon vector register.
4384 if (!tryParseNeonVectorRegister(Operands))
4385 return false;
4386
4387 if (tryParseZTOperand(Operands).isSuccess())
4388 return false;
4389
4390 // Otherwise try for a scalar register.
4391 if (tryParseGPROperand<false>(Operands).isSuccess())
4392 return false;
4393
4394 return true;
4395}
4396
4397bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4398 bool HasELFModifier = false;
4400
4401 if (parseOptionalToken(AsmToken::Colon)) {
4402 HasELFModifier = true;
4403
4404 if (getTok().isNot(AsmToken::Identifier))
4405 return TokError("expect relocation specifier in operand after ':'");
4406
4407 std::string LowerCase = getTok().getIdentifier().lower();
4408 RefKind =
4411 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4412 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4413 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4414 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4415 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4416 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4417 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4418 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4419 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4420 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4421 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4422 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4423 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4424 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4425 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4426 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4427 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4428 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4429 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4430 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4431 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4432 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4433 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4434 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4435 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4436 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4437 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4438 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4439 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4440 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4441 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4442 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4443 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4444 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4445 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4446 .Case("tlsdesc_auth_lo12", AArch64MCExpr::VK_TLSDESC_AUTH_LO12)
4448 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4449 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4451 .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
4453 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4454 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4455 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4458 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4459 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4461
4462 if (RefKind == AArch64MCExpr::VK_INVALID)
4463 return TokError("expect relocation specifier in operand after ':'");
4464
4465 Lex(); // Eat identifier
4466
4467 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4468 return true;
4469 }
4470
4471 if (getParser().parseExpression(ImmVal))
4472 return true;
4473
4474 if (HasELFModifier)
4475 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4476
4477 return false;
4478}
4479
4480ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4481 if (getTok().isNot(AsmToken::LCurly))
4482 return ParseStatus::NoMatch;
4483
4484 auto ParseMatrixTile = [this](unsigned &Reg,
4485 unsigned &ElementWidth) -> ParseStatus {
4486 StringRef Name = getTok().getString();
4487 size_t DotPosition = Name.find('.');
4488 if (DotPosition == StringRef::npos)
4489 return ParseStatus::NoMatch;
4490
4491 unsigned RegNum = matchMatrixTileListRegName(Name);
4492 if (!RegNum)
4493 return ParseStatus::NoMatch;
4494
4495 StringRef Tail = Name.drop_front(DotPosition);
4496 const std::optional<std::pair<int, int>> &KindRes =
4497 parseVectorKind(Tail, RegKind::Matrix);
4498 if (!KindRes)
4499 return TokError(
4500 "Expected the register to be followed by element width suffix");
4501 ElementWidth = KindRes->second;
4502 Reg = RegNum;
4503 Lex(); // Eat the register.
4504 return ParseStatus::Success;
4505 };
4506
4507 SMLoc S = getLoc();
4508 auto LCurly = getTok();
4509 Lex(); // Eat left bracket token.
4510
4511 // Empty matrix list
4512 if (parseOptionalToken(AsmToken::RCurly)) {
4513 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4514 /*RegMask=*/0, S, getLoc(), getContext()));
4515 return ParseStatus::Success;
4516 }
4517
4518 // Try parse {za} alias early
4519 if (getTok().getString().equals_insensitive("za")) {
4520 Lex(); // Eat 'za'
4521
4522 if (parseToken(AsmToken::RCurly, "'}' expected"))
4523 return ParseStatus::Failure;
4524
4525 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4526 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4527 return ParseStatus::Success;
4528 }
4529
4530 SMLoc TileLoc = getLoc();
4531
4532 unsigned FirstReg, ElementWidth;
4533 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4534 if (!ParseRes.isSuccess()) {
4535 getLexer().UnLex(LCurly);
4536 return ParseRes;
4537 }
4538
4539 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4540
4541 unsigned PrevReg = FirstReg;
4542
4544 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4545
4546 SmallSet<unsigned, 8> SeenRegs;
4547 SeenRegs.insert(FirstReg);
4548
4549 while (parseOptionalToken(AsmToken::Comma)) {
4550 TileLoc = getLoc();
4551 unsigned Reg, NextElementWidth;
4552 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4553 if (!ParseRes.isSuccess())
4554 return ParseRes;
4555
4556 // Element size must match on all regs in the list.
4557 if (ElementWidth != NextElementWidth)
4558 return Error(TileLoc, "mismatched register size suffix");
4559
4560 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4561 Warning(TileLoc, "tile list not in ascending order");
4562
4563 if (SeenRegs.contains(Reg))
4564 Warning(TileLoc, "duplicate tile in list");
4565 else {
4566 SeenRegs.insert(Reg);
4567 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4568 }
4569
4570 PrevReg = Reg;
4571 }
4572
4573 if (parseToken(AsmToken::RCurly, "'}' expected"))
4574 return ParseStatus::Failure;
4575
4576 unsigned RegMask = 0;
4577 for (auto Reg : DRegs)
4578 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4579 RI->getEncodingValue(AArch64::ZAD0));
4580 Operands.push_back(
4581 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4582
4583 return ParseStatus::Success;
4584}
4585
4586template <RegKind VectorKind>
4587ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4588 bool ExpectMatch) {
4589 MCAsmParser &Parser = getParser();
4590 if (!getTok().is(AsmToken::LCurly))
4591 return ParseStatus::NoMatch;
4592
4593 // Wrapper around parse function
4594 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4595 bool NoMatchIsError) -> ParseStatus {
4596 auto RegTok = getTok();
4597 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4598 if (ParseRes.isSuccess()) {
4599 if (parseVectorKind(Kind, VectorKind))
4600 return ParseRes;
4601 llvm_unreachable("Expected a valid vector kind");
4602 }
4603
4604 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4605 RegTok.getString().equals_insensitive("zt0"))
4606 return ParseStatus::NoMatch;
4607
4608 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4609 (ParseRes.isNoMatch() && NoMatchIsError &&
4610 !RegTok.getString().starts_with_insensitive("za")))
4611 return Error(Loc, "vector register expected");
4612
4613 return ParseStatus::NoMatch;
4614 };
4615
4616 int NumRegs = getNumRegsForRegKind(VectorKind);
4617 SMLoc S = getLoc();
4618 auto LCurly = getTok();
4619 Lex(); // Eat left bracket token.
4620
4622 MCRegister FirstReg;
4623 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4624
4625 // Put back the original left bracket if there was no match, so that
4626 // different types of list-operands can be matched (e.g. SVE, Neon).
4627 if (ParseRes.isNoMatch())
4628 Parser.getLexer().UnLex(LCurly);
4629
4630 if (!ParseRes.isSuccess())
4631 return ParseRes;
4632
4633 int64_t PrevReg = FirstReg;
4634 unsigned Count = 1;
4635
4636 int Stride = 1;
4637 if (parseOptionalToken(AsmToken::Minus)) {
4638 SMLoc Loc = getLoc();
4639 StringRef NextKind;
4640
4642 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4643 if (!ParseRes.isSuccess())
4644 return ParseRes;
4645
4646 // Any Kind suffices must match on all regs in the list.
4647 if (Kind != NextKind)
4648 return Error(Loc, "mismatched register size suffix");
4649
4650 unsigned Space =
4651 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4652
4653 if (Space == 0 || Space > 3)
4654 return Error(Loc, "invalid number of vectors");
4655
4656 Count += Space;
4657 }
4658 else {
4659 bool HasCalculatedStride = false;
4660 while (parseOptionalToken(AsmToken::Comma)) {
4661 SMLoc Loc = getLoc();
4662 StringRef NextKind;
4664 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4665 if (!ParseRes.isSuccess())
4666 return ParseRes;
4667
4668 // Any Kind suffices must match on all regs in the list.
4669 if (Kind != NextKind)
4670 return Error(Loc, "mismatched register size suffix");
4671
4672 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4673 unsigned PrevRegVal =
4674 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4675 if (!HasCalculatedStride) {
4676 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4677 : (RegVal + NumRegs - PrevRegVal);
4678 HasCalculatedStride = true;
4679 }
4680
4681 // Register must be incremental (with a wraparound at last register).
4682 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4683 return Error(Loc, "registers must have the same sequential stride");
4684
4685 PrevReg = Reg;
4686 ++Count;
4687 }
4688 }
4689
4690 if (parseToken(AsmToken::RCurly, "'}' expected"))
4691 return ParseStatus::Failure;
4692
4693 if (Count > 4)
4694 return Error(S, "invalid number of vectors");
4695
4696 unsigned NumElements = 0;
4697 unsigned ElementWidth = 0;
4698 if (!Kind.empty()) {
4699 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4700 std::tie(NumElements, ElementWidth) = *VK;
4701 }
4702
4703 Operands.push_back(AArch64Operand::CreateVectorList(
4704 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4705 getLoc(), getContext()));
4706
4707 return ParseStatus::Success;
4708}
4709
4710/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4711bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4712 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4713 if (!ParseRes.isSuccess())
4714 return true;
4715
4716 return tryParseVectorIndex(Operands).isFailure();
4717}
4718
4719ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4720 SMLoc StartLoc = getLoc();
4721
4722 MCRegister RegNum;
4723 ParseStatus Res = tryParseScalarRegister(RegNum);
4724 if (!Res.isSuccess())
4725 return Res;
4726
4727 if (!parseOptionalToken(AsmToken::Comma)) {
4728 Operands.push_back(AArch64Operand::CreateReg(
4729 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4730 return ParseStatus::Success;
4731 }
4732
4733 parseOptionalToken(AsmToken::Hash);
4734
4735 if (getTok().isNot(AsmToken::Integer))
4736 return Error(getLoc(), "index must be absent or #0");
4737
4738 const MCExpr *ImmVal;
4739 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4740 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4741 return Error(getLoc(), "index must be absent or #0");
4742
4743 Operands.push_back(AArch64Operand::CreateReg(
4744 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4745 return ParseStatus::Success;
4746}
4747
4748ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4749 SMLoc StartLoc = getLoc();
4750 const AsmToken &Tok = getTok();
4751 std::string Name = Tok.getString().lower();
4752
4753 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4754
4755 if (RegNum == 0)
4756 return ParseStatus::NoMatch;
4757
4758 Operands.push_back(AArch64Operand::CreateReg(
4759 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4760 Lex(); // Eat register.
4761
4762 // Check if register is followed by an index
4763 if (parseOptionalToken(AsmToken::LBrac)) {
4764 Operands.push_back(
4765 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4766 const MCExpr *ImmVal;
4767 if (getParser().parseExpression(ImmVal))
4768 return ParseStatus::NoMatch;
4769 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4770 if (!MCE)
4771 return TokError("immediate value expected for vector index");
4772 Operands.push_back(AArch64Operand::CreateImm(
4773 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4774 getLoc(), getContext()));
4775 if (parseOptionalToken(AsmToken::Comma))
4776 if (parseOptionalMulOperand(Operands))
4777 return ParseStatus::Failure;
4778 if (parseToken(AsmToken::RBrac, "']' expected"))
4779 return ParseStatus::Failure;
4780 Operands.push_back(
4781 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4782 }
4783 return ParseStatus::Success;
4784}
4785
4786template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4787ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4788 SMLoc StartLoc = getLoc();
4789
4790 MCRegister RegNum;
4791 ParseStatus Res = tryParseScalarRegister(RegNum);
4792 if (!Res.isSuccess())
4793 return Res;
4794
4795 // No shift/extend is the default.
4796 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4797 Operands.push_back(AArch64Operand::CreateReg(
4798 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4799 return ParseStatus::Success;
4800 }
4801
4802 // Eat the comma
4803 Lex();
4804
4805 // Match the shift
4807 Res = tryParseOptionalShiftExtend(ExtOpnd);
4808 if (!Res.isSuccess())
4809 return Res;
4810
4811 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4812 Operands.push_back(AArch64Operand::CreateReg(
4813 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4814 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4815 Ext->hasShiftExtendAmount()));
4816
4817 return ParseStatus::Success;
4818}
4819
4820bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4821 MCAsmParser &Parser = getParser();
4822
4823 // Some SVE instructions have a decoration after the immediate, i.e.
4824 // "mul vl". We parse them here and add tokens, which must be present in the
4825 // asm string in the tablegen instruction.
4826 bool NextIsVL =
4827 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4828 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4829 if (!getTok().getString().equals_insensitive("mul") ||
4830 !(NextIsVL || NextIsHash))
4831 return true;
4832
4833 Operands.push_back(
4834 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4835 Lex(); // Eat the "mul"
4836
4837 if (NextIsVL) {
4838 Operands.push_back(
4839 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4840 Lex(); // Eat the "vl"
4841 return false;
4842 }
4843
4844 if (NextIsHash) {
4845 Lex(); // Eat the #
4846 SMLoc S = getLoc();
4847
4848 // Parse immediate operand.
4849 const MCExpr *ImmVal;
4850 if (!Parser.parseExpression(ImmVal))
4851 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4852 Operands.push_back(AArch64Operand::CreateImm(
4853 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4854 getContext()));
4855 return false;
4856 }
4857 }
4858
4859 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4860}
4861
4862bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4863 StringRef &VecGroup) {
4864 MCAsmParser &Parser = getParser();
4865 auto Tok = Parser.getTok();
4866 if (Tok.isNot(AsmToken::Identifier))
4867 return true;
4868
4870 .Case("vgx2", "vgx2")
4871 .Case("vgx4", "vgx4")
4872 .Default("");
4873
4874 if (VG.empty())
4875 return true;
4876
4877 VecGroup = VG;
4878 Parser.Lex(); // Eat vgx[2|4]
4879 return false;
4880}
4881
4882bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4883 auto Tok = getTok();
4884 if (Tok.isNot(AsmToken::Identifier))
4885 return true;
4886
4887 auto Keyword = Tok.getString();
4889 .Case("sm", "sm")
4890 .Case("za", "za")
4891 .Default(Keyword);
4892 Operands.push_back(
4893 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4894