LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
280 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
281 template <RegKind VectorKind>
282 ParseStatus tryParseVectorList(OperandVector &Operands,
283 bool ExpectMatch = false);
284 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
285 ParseStatus tryParseSVEPattern(OperandVector &Operands);
286 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
287 ParseStatus tryParseGPR64x8(OperandVector &Operands);
288 ParseStatus tryParseImmRange(OperandVector &Operands);
289
290public:
291 enum AArch64MatchResultTy {
292 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
293#define GET_OPERAND_DIAGNOSTIC_TYPES
294#include "AArch64GenAsmMatcher.inc"
295 };
296 bool IsILP32;
297 bool IsWindowsArm64EC;
298
299 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
300 const MCInstrInfo &MII, const MCTargetOptions &Options)
301 : MCTargetAsmParser(Options, STI, MII) {
303 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
306 if (S.getTargetStreamer() == nullptr)
308
309 // Alias .hword/.word/.[dx]word to the target-independent
310 // .2byte/.4byte/.8byte directives as they have the same form and
311 // semantics:
312 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
313 Parser.addAliasForDirective(".hword", ".2byte");
314 Parser.addAliasForDirective(".word", ".4byte");
315 Parser.addAliasForDirective(".dword", ".8byte");
316 Parser.addAliasForDirective(".xword", ".8byte");
317
318 // Initialize the set of available features.
319 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
320 }
321
322 bool areEqualRegs(const MCParsedAsmOperand &Op1,
323 const MCParsedAsmOperand &Op2) const override;
325 SMLoc NameLoc, OperandVector &Operands) override;
326 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
328 SMLoc &EndLoc) override;
329 bool ParseDirective(AsmToken DirectiveID) override;
331 unsigned Kind) override;
332
333 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
334
335 static bool classifySymbolRef(const MCExpr *Expr,
336 AArch64MCExpr::VariantKind &ELFRefKind,
337 MCSymbolRefExpr::VariantKind &DarwinRefKind,
338 int64_t &Addend);
339};
340
341/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
342/// instruction.
343class AArch64Operand : public MCParsedAsmOperand {
344private:
345 enum KindTy {
346 k_Immediate,
347 k_ShiftedImm,
348 k_ImmRange,
349 k_CondCode,
350 k_Register,
351 k_MatrixRegister,
352 k_MatrixTileList,
353 k_SVCR,
354 k_VectorList,
355 k_VectorIndex,
356 k_Token,
357 k_SysReg,
358 k_SysCR,
359 k_Prefetch,
360 k_ShiftExtend,
361 k_FPImm,
362 k_Barrier,
363 k_PSBHint,
364 k_BTIHint,
365 } Kind;
366
367 SMLoc StartLoc, EndLoc;
368
369 struct TokOp {
370 const char *Data;
371 unsigned Length;
372 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
373 };
374
375 // Separate shift/extend operand.
376 struct ShiftExtendOp {
378 unsigned Amount;
379 bool HasExplicitAmount;
380 };
381
382 struct RegOp {
383 unsigned RegNum;
384 RegKind Kind;
385 int ElementWidth;
386
387 // The register may be allowed as a different register class,
388 // e.g. for GPR64as32 or GPR32as64.
389 RegConstraintEqualityTy EqualityTy;
390
391 // In some cases the shift/extend needs to be explicitly parsed together
392 // with the register, rather than as a separate operand. This is needed
393 // for addressing modes where the instruction as a whole dictates the
394 // scaling/extend, rather than specific bits in the instruction.
395 // By parsing them as a single operand, we avoid the need to pass an
396 // extra operand in all CodeGen patterns (because all operands need to
397 // have an associated value), and we avoid the need to update TableGen to
398 // accept operands that have no associated bits in the instruction.
399 //
400 // An added benefit of parsing them together is that the assembler
401 // can give a sensible diagnostic if the scaling is not correct.
402 //
403 // The default is 'lsl #0' (HasExplicitAmount = false) if no
404 // ShiftExtend is specified.
405 ShiftExtendOp ShiftExtend;
406 };
407
408 struct MatrixRegOp {
409 unsigned RegNum;
410 unsigned ElementWidth;
411 MatrixKind Kind;
412 };
413
414 struct MatrixTileListOp {
415 unsigned RegMask = 0;
416 };
417
418 struct VectorListOp {
419 unsigned RegNum;
420 unsigned Count;
421 unsigned Stride;
422 unsigned NumElements;
423 unsigned ElementWidth;
424 RegKind RegisterKind;
425 };
426
427 struct VectorIndexOp {
428 int Val;
429 };
430
431 struct ImmOp {
432 const MCExpr *Val;
433 };
434
435 struct ShiftedImmOp {
436 const MCExpr *Val;
437 unsigned ShiftAmount;
438 };
439
440 struct ImmRangeOp {
441 unsigned First;
442 unsigned Last;
443 };
444
445 struct CondCodeOp {
447 };
448
449 struct FPImmOp {
450 uint64_t Val; // APFloat value bitcasted to uint64_t.
451 bool IsExact; // describes whether parsed value was exact.
452 };
453
454 struct BarrierOp {
455 const char *Data;
456 unsigned Length;
457 unsigned Val; // Not the enum since not all values have names.
458 bool HasnXSModifier;
459 };
460
461 struct SysRegOp {
462 const char *Data;
463 unsigned Length;
464 uint32_t MRSReg;
465 uint32_t MSRReg;
466 uint32_t PStateField;
467 };
468
469 struct SysCRImmOp {
470 unsigned Val;
471 };
472
473 struct PrefetchOp {
474 const char *Data;
475 unsigned Length;
476 unsigned Val;
477 };
478
479 struct PSBHintOp {
480 const char *Data;
481 unsigned Length;
482 unsigned Val;
483 };
484
485 struct BTIHintOp {
486 const char *Data;
487 unsigned Length;
488 unsigned Val;
489 };
490
491 struct SVCROp {
492 const char *Data;
493 unsigned Length;
494 unsigned PStateField;
495 };
496
497 union {
498 struct TokOp Tok;
499 struct RegOp Reg;
500 struct MatrixRegOp MatrixReg;
501 struct MatrixTileListOp MatrixTileList;
502 struct VectorListOp VectorList;
503 struct VectorIndexOp VectorIndex;
504 struct ImmOp Imm;
505 struct ShiftedImmOp ShiftedImm;
506 struct ImmRangeOp ImmRange;
507 struct CondCodeOp CondCode;
508 struct FPImmOp FPImm;
509 struct BarrierOp Barrier;
510 struct SysRegOp SysReg;
511 struct SysCRImmOp SysCRImm;
512 struct PrefetchOp Prefetch;
513 struct PSBHintOp PSBHint;
514 struct BTIHintOp BTIHint;
515 struct ShiftExtendOp ShiftExtend;
516 struct SVCROp SVCR;
517 };
518
519 // Keep the MCContext around as the MCExprs may need manipulated during
520 // the add<>Operands() calls.
521 MCContext &Ctx;
522
523public:
524 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
525
526 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
527 Kind = o.Kind;
528 StartLoc = o.StartLoc;
529 EndLoc = o.EndLoc;
530 switch (Kind) {
531 case k_Token:
532 Tok = o.Tok;
533 break;
534 case k_Immediate:
535 Imm = o.Imm;
536 break;
537 case k_ShiftedImm:
538 ShiftedImm = o.ShiftedImm;
539 break;
540 case k_ImmRange:
541 ImmRange = o.ImmRange;
542 break;
543 case k_CondCode:
544 CondCode = o.CondCode;
545 break;
546 case k_FPImm:
547 FPImm = o.FPImm;
548 break;
549 case k_Barrier:
550 Barrier = o.Barrier;
551 break;
552 case k_Register:
553 Reg = o.Reg;
554 break;
555 case k_MatrixRegister:
556 MatrixReg = o.MatrixReg;
557 break;
558 case k_MatrixTileList:
559 MatrixTileList = o.MatrixTileList;
560 break;
561 case k_VectorList:
562 VectorList = o.VectorList;
563 break;
564 case k_VectorIndex:
565 VectorIndex = o.VectorIndex;
566 break;
567 case k_SysReg:
568 SysReg = o.SysReg;
569 break;
570 case k_SysCR:
571 SysCRImm = o.SysCRImm;
572 break;
573 case k_Prefetch:
574 Prefetch = o.Prefetch;
575 break;
576 case k_PSBHint:
577 PSBHint = o.PSBHint;
578 break;
579 case k_BTIHint:
580 BTIHint = o.BTIHint;
581 break;
582 case k_ShiftExtend:
583 ShiftExtend = o.ShiftExtend;
584 break;
585 case k_SVCR:
586 SVCR = o.SVCR;
587 break;
588 }
589 }
590
591 /// getStartLoc - Get the location of the first token of this operand.
592 SMLoc getStartLoc() const override { return StartLoc; }
593 /// getEndLoc - Get the location of the last token of this operand.
594 SMLoc getEndLoc() const override { return EndLoc; }
595
596 StringRef getToken() const {
597 assert(Kind == k_Token && "Invalid access!");
598 return StringRef(Tok.Data, Tok.Length);
599 }
600
601 bool isTokenSuffix() const {
602 assert(Kind == k_Token && "Invalid access!");
603 return Tok.IsSuffix;
604 }
605
606 const MCExpr *getImm() const {
607 assert(Kind == k_Immediate && "Invalid access!");
608 return Imm.Val;
609 }
610
611 const MCExpr *getShiftedImmVal() const {
612 assert(Kind == k_ShiftedImm && "Invalid access!");
613 return ShiftedImm.Val;
614 }
615
616 unsigned getShiftedImmShift() const {
617 assert(Kind == k_ShiftedImm && "Invalid access!");
618 return ShiftedImm.ShiftAmount;
619 }
620
621 unsigned getFirstImmVal() const {
622 assert(Kind == k_ImmRange && "Invalid access!");
623 return ImmRange.First;
624 }
625
626 unsigned getLastImmVal() const {
627 assert(Kind == k_ImmRange && "Invalid access!");
628 return ImmRange.Last;
629 }
630
632 assert(Kind == k_CondCode && "Invalid access!");
633 return CondCode.Code;
634 }
635
636 APFloat getFPImm() const {
637 assert (Kind == k_FPImm && "Invalid access!");
638 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
639 }
640
641 bool getFPImmIsExact() const {
642 assert (Kind == k_FPImm && "Invalid access!");
643 return FPImm.IsExact;
644 }
645
646 unsigned getBarrier() const {
647 assert(Kind == k_Barrier && "Invalid access!");
648 return Barrier.Val;
649 }
650
651 StringRef getBarrierName() const {
652 assert(Kind == k_Barrier && "Invalid access!");
653 return StringRef(Barrier.Data, Barrier.Length);
654 }
655
656 bool getBarriernXSModifier() const {
657 assert(Kind == k_Barrier && "Invalid access!");
658 return Barrier.HasnXSModifier;
659 }
660
661 MCRegister getReg() const override {
662 assert(Kind == k_Register && "Invalid access!");
663 return Reg.RegNum;
664 }
665
666 unsigned getMatrixReg() const {
667 assert(Kind == k_MatrixRegister && "Invalid access!");
668 return MatrixReg.RegNum;
669 }
670
671 unsigned getMatrixElementWidth() const {
672 assert(Kind == k_MatrixRegister && "Invalid access!");
673 return MatrixReg.ElementWidth;
674 }
675
676 MatrixKind getMatrixKind() const {
677 assert(Kind == k_MatrixRegister && "Invalid access!");
678 return MatrixReg.Kind;
679 }
680
681 unsigned getMatrixTileListRegMask() const {
682 assert(isMatrixTileList() && "Invalid access!");
683 return MatrixTileList.RegMask;
684 }
685
686 RegConstraintEqualityTy getRegEqualityTy() const {
687 assert(Kind == k_Register && "Invalid access!");
688 return Reg.EqualityTy;
689 }
690
691 unsigned getVectorListStart() const {
692 assert(Kind == k_VectorList && "Invalid access!");
693 return VectorList.RegNum;
694 }
695
696 unsigned getVectorListCount() const {
697 assert(Kind == k_VectorList && "Invalid access!");
698 return VectorList.Count;
699 }
700
701 unsigned getVectorListStride() const {
702 assert(Kind == k_VectorList && "Invalid access!");
703 return VectorList.Stride;
704 }
705
706 int getVectorIndex() const {
707 assert(Kind == k_VectorIndex && "Invalid access!");
708 return VectorIndex.Val;
709 }
710
711 StringRef getSysReg() const {
712 assert(Kind == k_SysReg && "Invalid access!");
713 return StringRef(SysReg.Data, SysReg.Length);
714 }
715
716 unsigned getSysCR() const {
717 assert(Kind == k_SysCR && "Invalid access!");
718 return SysCRImm.Val;
719 }
720
721 unsigned getPrefetch() const {
722 assert(Kind == k_Prefetch && "Invalid access!");
723 return Prefetch.Val;
724 }
725
726 unsigned getPSBHint() const {
727 assert(Kind == k_PSBHint && "Invalid access!");
728 return PSBHint.Val;
729 }
730
731 StringRef getPSBHintName() const {
732 assert(Kind == k_PSBHint && "Invalid access!");
733 return StringRef(PSBHint.Data, PSBHint.Length);
734 }
735
736 unsigned getBTIHint() const {
737 assert(Kind == k_BTIHint && "Invalid access!");
738 return BTIHint.Val;
739 }
740
741 StringRef getBTIHintName() const {
742 assert(Kind == k_BTIHint && "Invalid access!");
743 return StringRef(BTIHint.Data, BTIHint.Length);
744 }
745
746 StringRef getSVCR() const {
747 assert(Kind == k_SVCR && "Invalid access!");
748 return StringRef(SVCR.Data, SVCR.Length);
749 }
750
751 StringRef getPrefetchName() const {
752 assert(Kind == k_Prefetch && "Invalid access!");
753 return StringRef(Prefetch.Data, Prefetch.Length);
754 }
755
756 AArch64_AM::ShiftExtendType getShiftExtendType() const {
757 if (Kind == k_ShiftExtend)
758 return ShiftExtend.Type;
759 if (Kind == k_Register)
760 return Reg.ShiftExtend.Type;
761 llvm_unreachable("Invalid access!");
762 }
763
764 unsigned getShiftExtendAmount() const {
765 if (Kind == k_ShiftExtend)
766 return ShiftExtend.Amount;
767 if (Kind == k_Register)
768 return Reg.ShiftExtend.Amount;
769 llvm_unreachable("Invalid access!");
770 }
771
772 bool hasShiftExtendAmount() const {
773 if (Kind == k_ShiftExtend)
774 return ShiftExtend.HasExplicitAmount;
775 if (Kind == k_Register)
776 return Reg.ShiftExtend.HasExplicitAmount;
777 llvm_unreachable("Invalid access!");
778 }
779
780 bool isImm() const override { return Kind == k_Immediate; }
781 bool isMem() const override { return false; }
782
783 bool isUImm6() const {
784 if (!isImm())
785 return false;
786 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
787 if (!MCE)
788 return false;
789 int64_t Val = MCE->getValue();
790 return (Val >= 0 && Val < 64);
791 }
792
793 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
794
795 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
796 return isImmScaled<Bits, Scale>(true);
797 }
798
799 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
800 DiagnosticPredicate isUImmScaled() const {
801 if (IsRange && isImmRange() &&
802 (getLastImmVal() != getFirstImmVal() + Offset))
803 return DiagnosticPredicateTy::NoMatch;
804
805 return isImmScaled<Bits, Scale, IsRange>(false);
806 }
807
808 template <int Bits, int Scale, bool IsRange = false>
809 DiagnosticPredicate isImmScaled(bool Signed) const {
810 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
811 (isImmRange() && !IsRange))
812 return DiagnosticPredicateTy::NoMatch;
813
814 int64_t Val;
815 if (isImmRange())
816 Val = getFirstImmVal();
817 else {
818 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
819 if (!MCE)
820 return DiagnosticPredicateTy::NoMatch;
821 Val = MCE->getValue();
822 }
823
824 int64_t MinVal, MaxVal;
825 if (Signed) {
826 int64_t Shift = Bits - 1;
827 MinVal = (int64_t(1) << Shift) * -Scale;
828 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
829 } else {
830 MinVal = 0;
831 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
832 }
833
834 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
835 return DiagnosticPredicateTy::Match;
836
837 return DiagnosticPredicateTy::NearMatch;
838 }
839
840 DiagnosticPredicate isSVEPattern() const {
841 if (!isImm())
842 return DiagnosticPredicateTy::NoMatch;
843 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
844 if (!MCE)
845 return DiagnosticPredicateTy::NoMatch;
846 int64_t Val = MCE->getValue();
847 if (Val >= 0 && Val < 32)
848 return DiagnosticPredicateTy::Match;
849 return DiagnosticPredicateTy::NearMatch;
850 }
851
852 DiagnosticPredicate isSVEVecLenSpecifier() const {
853 if (!isImm())
854 return DiagnosticPredicateTy::NoMatch;
855 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
856 if (!MCE)
857 return DiagnosticPredicateTy::NoMatch;
858 int64_t Val = MCE->getValue();
859 if (Val >= 0 && Val <= 1)
860 return DiagnosticPredicateTy::Match;
861 return DiagnosticPredicateTy::NearMatch;
862 }
863
864 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
866 MCSymbolRefExpr::VariantKind DarwinRefKind;
867 int64_t Addend;
868 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
869 Addend)) {
870 // If we don't understand the expression, assume the best and
871 // let the fixup and relocation code deal with it.
872 return true;
873 }
874
875 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
876 ELFRefKind == AArch64MCExpr::VK_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
878 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
879 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
880 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
881 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
883 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
884 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
885 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
886 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
887 // Note that we don't range-check the addend. It's adjusted modulo page
888 // size when converted, so there is no "out of range" condition when using
889 // @pageoff.
890 return true;
891 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
892 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
893 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
894 return Addend == 0;
895 }
896
897 return false;
898 }
899
900 template <int Scale> bool isUImm12Offset() const {
901 if (!isImm())
902 return false;
903
904 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
905 if (!MCE)
906 return isSymbolicUImm12Offset(getImm());
907
908 int64_t Val = MCE->getValue();
909 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
910 }
911
912 template <int N, int M>
913 bool isImmInRange() const {
914 if (!isImm())
915 return false;
916 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
917 if (!MCE)
918 return false;
919 int64_t Val = MCE->getValue();
920 return (Val >= N && Val <= M);
921 }
922
923 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
924 // a logical immediate can always be represented when inverted.
925 template <typename T>
926 bool isLogicalImm() const {
927 if (!isImm())
928 return false;
929 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
930 if (!MCE)
931 return false;
932
933 int64_t Val = MCE->getValue();
934 // Avoid left shift by 64 directly.
935 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
936 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
937 if ((Val & Upper) && (Val & Upper) != Upper)
938 return false;
939
940 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
941 }
942
943 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
944
945 bool isImmRange() const { return Kind == k_ImmRange; }
946
947 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
948 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
949 /// immediate that can be shifted by 'Shift'.
950 template <unsigned Width>
951 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
952 if (isShiftedImm() && Width == getShiftedImmShift())
953 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
954 return std::make_pair(CE->getValue(), Width);
955
956 if (isImm())
957 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
958 int64_t Val = CE->getValue();
959 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
960 return std::make_pair(Val >> Width, Width);
961 else
962 return std::make_pair(Val, 0u);
963 }
964
965 return {};
966 }
967
968 bool isAddSubImm() const {
969 if (!isShiftedImm() && !isImm())
970 return false;
971
972 const MCExpr *Expr;
973
974 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
975 if (isShiftedImm()) {
976 unsigned Shift = ShiftedImm.ShiftAmount;
977 Expr = ShiftedImm.Val;
978 if (Shift != 0 && Shift != 12)
979 return false;
980 } else {
981 Expr = getImm();
982 }
983
985 MCSymbolRefExpr::VariantKind DarwinRefKind;
986 int64_t Addend;
987 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
988 DarwinRefKind, Addend)) {
989 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
990 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
991 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
992 || ELFRefKind == AArch64MCExpr::VK_LO12
993 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
994 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
995 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
996 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
997 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
998 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
999 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
1000 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
1001 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1002 }
1003
1004 // If it's a constant, it should be a real immediate in range.
1005 if (auto ShiftedVal = getShiftedVal<12>())
1006 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1007
1008 // If it's an expression, we hope for the best and let the fixup/relocation
1009 // code deal with it.
1010 return true;
1011 }
1012
1013 bool isAddSubImmNeg() const {
1014 if (!isShiftedImm() && !isImm())
1015 return false;
1016
1017 // Otherwise it should be a real negative immediate in range.
1018 if (auto ShiftedVal = getShiftedVal<12>())
1019 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1020
1021 return false;
1022 }
1023
1024 // Signed value in the range -128 to +127. For element widths of
1025 // 16 bits or higher it may also be a signed multiple of 256 in the
1026 // range -32768 to +32512.
1027 // For element-width of 8 bits a range of -128 to 255 is accepted,
1028 // since a copy of a byte can be either signed/unsigned.
1029 template <typename T>
1030 DiagnosticPredicate isSVECpyImm() const {
1031 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1032 return DiagnosticPredicateTy::NoMatch;
1033
1034 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1035 std::is_same<int8_t, T>::value;
1036 if (auto ShiftedImm = getShiftedVal<8>())
1037 if (!(IsByte && ShiftedImm->second) &&
1038 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1039 << ShiftedImm->second))
1040 return DiagnosticPredicateTy::Match;
1041
1042 return DiagnosticPredicateTy::NearMatch;
1043 }
1044
1045 // Unsigned value in the range 0 to 255. For element widths of
1046 // 16 bits or higher it may also be a signed multiple of 256 in the
1047 // range 0 to 65280.
1048 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1049 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1050 return DiagnosticPredicateTy::NoMatch;
1051
1052 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1053 std::is_same<int8_t, T>::value;
1054 if (auto ShiftedImm = getShiftedVal<8>())
1055 if (!(IsByte && ShiftedImm->second) &&
1056 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1057 << ShiftedImm->second))
1058 return DiagnosticPredicateTy::Match;
1059
1060 return DiagnosticPredicateTy::NearMatch;
1061 }
1062
1063 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1064 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1065 return DiagnosticPredicateTy::Match;
1066 return DiagnosticPredicateTy::NoMatch;
1067 }
1068
1069 bool isCondCode() const { return Kind == k_CondCode; }
1070
1071 bool isSIMDImmType10() const {
1072 if (!isImm())
1073 return false;
1074 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1075 if (!MCE)
1076 return false;
1078 }
1079
1080 template<int N>
1081 bool isBranchTarget() const {
1082 if (!isImm())
1083 return false;
1084 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1085 if (!MCE)
1086 return true;
1087 int64_t Val = MCE->getValue();
1088 if (Val & 0x3)
1089 return false;
1090 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1091 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1092 }
1093
1094 bool
1095 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1096 if (!isImm())
1097 return false;
1098
1099 AArch64MCExpr::VariantKind ELFRefKind;
1100 MCSymbolRefExpr::VariantKind DarwinRefKind;
1101 int64_t Addend;
1102 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1103 DarwinRefKind, Addend)) {
1104 return false;
1105 }
1106 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1107 return false;
1108
1109 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1110 }
1111
1112 bool isMovWSymbolG3() const {
1114 }
1115
1116 bool isMovWSymbolG2() const {
1117 return isMovWSymbol(
1122 }
1123
1124 bool isMovWSymbolG1() const {
1125 return isMovWSymbol(
1131 }
1132
1133 bool isMovWSymbolG0() const {
1134 return isMovWSymbol(
1140 }
1141
1142 template<int RegWidth, int Shift>
1143 bool isMOVZMovAlias() const {
1144 if (!isImm()) return false;
1145
1146 const MCExpr *E = getImm();
1147 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1148 uint64_t Value = CE->getValue();
1149
1150 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1151 }
1152 // Only supports the case of Shift being 0 if an expression is used as an
1153 // operand
1154 return !Shift && E;
1155 }
1156
1157 template<int RegWidth, int Shift>
1158 bool isMOVNMovAlias() const {
1159 if (!isImm()) return false;
1160
1161 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1162 if (!CE) return false;
1163 uint64_t Value = CE->getValue();
1164
1165 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1166 }
1167
1168 bool isFPImm() const {
1169 return Kind == k_FPImm &&
1170 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1171 }
1172
1173 bool isBarrier() const {
1174 return Kind == k_Barrier && !getBarriernXSModifier();
1175 }
1176 bool isBarriernXS() const {
1177 return Kind == k_Barrier && getBarriernXSModifier();
1178 }
1179 bool isSysReg() const { return Kind == k_SysReg; }
1180
1181 bool isMRSSystemRegister() const {
1182 if (!isSysReg()) return false;
1183
1184 return SysReg.MRSReg != -1U;
1185 }
1186
1187 bool isMSRSystemRegister() const {
1188 if (!isSysReg()) return false;
1189 return SysReg.MSRReg != -1U;
1190 }
1191
1192 bool isSystemPStateFieldWithImm0_1() const {
1193 if (!isSysReg()) return false;
1194 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1195 }
1196
1197 bool isSystemPStateFieldWithImm0_15() const {
1198 if (!isSysReg())
1199 return false;
1200 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1201 }
1202
1203 bool isSVCR() const {
1204 if (Kind != k_SVCR)
1205 return false;
1206 return SVCR.PStateField != -1U;
1207 }
1208
1209 bool isReg() const override {
1210 return Kind == k_Register;
1211 }
1212
1213 bool isVectorList() const { return Kind == k_VectorList; }
1214
1215 bool isScalarReg() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1217 }
1218
1219 bool isNeonVectorReg() const {
1220 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1221 }
1222
1223 bool isNeonVectorRegLo() const {
1224 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1225 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1226 Reg.RegNum) ||
1227 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1228 Reg.RegNum));
1229 }
1230
1231 bool isNeonVectorReg0to7() const {
1232 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1233 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1234 Reg.RegNum));
1235 }
1236
1237 bool isMatrix() const { return Kind == k_MatrixRegister; }
1238 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1239
1240 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1241 RegKind RK;
1242 switch (Class) {
1243 case AArch64::PPRRegClassID:
1244 case AArch64::PPR_3bRegClassID:
1245 case AArch64::PPR_p8to15RegClassID:
1246 case AArch64::PNRRegClassID:
1247 case AArch64::PNR_p8to15RegClassID:
1248 case AArch64::PPRorPNRRegClassID:
1249 RK = RegKind::SVEPredicateAsCounter;
1250 break;
1251 default:
1252 llvm_unreachable("Unsupport register class");
1253 }
1254
1255 return (Kind == k_Register && Reg.Kind == RK) &&
1256 AArch64MCRegisterClasses[Class].contains(getReg());
1257 }
1258
1259 template <unsigned Class> bool isSVEVectorReg() const {
1260 RegKind RK;
1261 switch (Class) {
1262 case AArch64::ZPRRegClassID:
1263 case AArch64::ZPR_3bRegClassID:
1264 case AArch64::ZPR_4bRegClassID:
1265 RK = RegKind::SVEDataVector;
1266 break;
1267 case AArch64::PPRRegClassID:
1268 case AArch64::PPR_3bRegClassID:
1269 case AArch64::PPR_p8to15RegClassID:
1270 case AArch64::PNRRegClassID:
1271 case AArch64::PNR_p8to15RegClassID:
1272 case AArch64::PPRorPNRRegClassID:
1273 RK = RegKind::SVEPredicateVector;
1274 break;
1275 default:
1276 llvm_unreachable("Unsupport register class");
1277 }
1278
1279 return (Kind == k_Register && Reg.Kind == RK) &&
1280 AArch64MCRegisterClasses[Class].contains(getReg());
1281 }
1282
1283 template <unsigned Class> bool isFPRasZPR() const {
1284 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1285 AArch64MCRegisterClasses[Class].contains(getReg());
1286 }
1287
1288 template <int ElementWidth, unsigned Class>
1289 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1290 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1291 return DiagnosticPredicateTy::NoMatch;
1292
1293 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1294 return DiagnosticPredicateTy::Match;
1295
1296 return DiagnosticPredicateTy::NearMatch;
1297 }
1298
1299 template <int ElementWidth, unsigned Class>
1300 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1301 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1302 Reg.Kind != RegKind::SVEPredicateVector))
1303 return DiagnosticPredicateTy::NoMatch;
1304
1305 if ((isSVEPredicateAsCounterReg<Class>() ||
1306 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1307 Reg.ElementWidth == ElementWidth)
1308 return DiagnosticPredicateTy::Match;
1309
1310 return DiagnosticPredicateTy::NearMatch;
1311 }
1312
1313 template <int ElementWidth, unsigned Class>
1314 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1315 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1316 return DiagnosticPredicateTy::NoMatch;
1317
1318 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1319 return DiagnosticPredicateTy::Match;
1320
1321 return DiagnosticPredicateTy::NearMatch;
1322 }
1323
1324 template <int ElementWidth, unsigned Class>
1325 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1326 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1327 return DiagnosticPredicateTy::NoMatch;
1328
1329 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1330 return DiagnosticPredicateTy::Match;
1331
1332 return DiagnosticPredicateTy::NearMatch;
1333 }
1334
1335 template <int ElementWidth, unsigned Class,
1336 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1337 bool ShiftWidthAlwaysSame>
1338 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1339 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1340 if (!VectorMatch.isMatch())
1341 return DiagnosticPredicateTy::NoMatch;
1342
1343 // Give a more specific diagnostic when the user has explicitly typed in
1344 // a shift-amount that does not match what is expected, but for which
1345 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1346 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1347 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1348 ShiftExtendTy == AArch64_AM::SXTW) &&
1349 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1350 return DiagnosticPredicateTy::NoMatch;
1351
1352 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1353 return DiagnosticPredicateTy::Match;
1354
1355 return DiagnosticPredicateTy::NearMatch;
1356 }
1357
1358 bool isGPR32as64() const {
1359 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1360 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1361 }
1362
1363 bool isGPR64as32() const {
1364 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1365 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1366 }
1367
1368 bool isGPR64x8() const {
1369 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1370 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1371 Reg.RegNum);
1372 }
1373
1374 bool isWSeqPair() const {
1375 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1376 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1377 Reg.RegNum);
1378 }
1379
1380 bool isXSeqPair() const {
1381 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1382 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1383 Reg.RegNum);
1384 }
1385
1386 bool isSyspXzrPair() const {
1387 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1388 }
1389
1390 template<int64_t Angle, int64_t Remainder>
1391 DiagnosticPredicate isComplexRotation() const {
1392 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1393
1394 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1395 if (!CE) return DiagnosticPredicateTy::NoMatch;
1396 uint64_t Value = CE->getValue();
1397
1398 if (Value % Angle == Remainder && Value <= 270)
1399 return DiagnosticPredicateTy::Match;
1400 return DiagnosticPredicateTy::NearMatch;
1401 }
1402
1403 template <unsigned RegClassID> bool isGPR64() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1406 }
1407
1408 template <unsigned RegClassID, int ExtWidth>
1409 DiagnosticPredicate isGPR64WithShiftExtend() const {
1410 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1411 return DiagnosticPredicateTy::NoMatch;
1412
1413 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1414 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1415 return DiagnosticPredicateTy::Match;
1416 return DiagnosticPredicateTy::NearMatch;
1417 }
1418
1419 /// Is this a vector list with the type implicit (presumably attached to the
1420 /// instruction itself)?
1421 template <RegKind VectorKind, unsigned NumRegs>
1422 bool isImplicitlyTypedVectorList() const {
1423 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1424 VectorList.NumElements == 0 &&
1425 VectorList.RegisterKind == VectorKind;
1426 }
1427
1428 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1429 unsigned ElementWidth, unsigned Stride = 1>
1430 bool isTypedVectorList() const {
1431 if (Kind != k_VectorList)
1432 return false;
1433 if (VectorList.Count != NumRegs)
1434 return false;
1435 if (VectorList.RegisterKind != VectorKind)
1436 return false;
1437 if (VectorList.ElementWidth != ElementWidth)
1438 return false;
1439 if (VectorList.Stride != Stride)
1440 return false;
1441 return VectorList.NumElements == NumElements;
1442 }
1443
1444 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1445 unsigned ElementWidth>
1446 DiagnosticPredicate isTypedVectorListMultiple() const {
1447 bool Res =
1448 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1449 if (!Res)
1450 return DiagnosticPredicateTy::NoMatch;
1451 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1452 return DiagnosticPredicateTy::NearMatch;
1453 return DiagnosticPredicateTy::Match;
1454 }
1455
1456 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1457 unsigned ElementWidth>
1458 DiagnosticPredicate isTypedVectorListStrided() const {
1459 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1460 ElementWidth, Stride>();
1461 if (!Res)
1462 return DiagnosticPredicateTy::NoMatch;
1463 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1464 ((VectorList.RegNum >= AArch64::Z16) &&
1465 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1466 return DiagnosticPredicateTy::Match;
1467 return DiagnosticPredicateTy::NoMatch;
1468 }
1469
1470 template <int Min, int Max>
1471 DiagnosticPredicate isVectorIndex() const {
1472 if (Kind != k_VectorIndex)
1473 return DiagnosticPredicateTy::NoMatch;
1474 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1475 return DiagnosticPredicateTy::Match;
1476 return DiagnosticPredicateTy::NearMatch;
1477 }
1478
1479 bool isToken() const override { return Kind == k_Token; }
1480
1481 bool isTokenEqual(StringRef Str) const {
1482 return Kind == k_Token && getToken() == Str;
1483 }
1484 bool isSysCR() const { return Kind == k_SysCR; }
1485 bool isPrefetch() const { return Kind == k_Prefetch; }
1486 bool isPSBHint() const { return Kind == k_PSBHint; }
1487 bool isBTIHint() const { return Kind == k_BTIHint; }
1488 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1489 bool isShifter() const {
1490 if (!isShiftExtend())
1491 return false;
1492
1493 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1494 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1495 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1496 ST == AArch64_AM::MSL);
1497 }
1498
1499 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1500 if (Kind != k_FPImm)
1501 return DiagnosticPredicateTy::NoMatch;
1502
1503 if (getFPImmIsExact()) {
1504 // Lookup the immediate from table of supported immediates.
1505 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1506 assert(Desc && "Unknown enum value");
1507
1508 // Calculate its FP value.
1509 APFloat RealVal(APFloat::IEEEdouble());
1510 auto StatusOrErr =
1511 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1512 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1513 llvm_unreachable("FP immediate is not exact");
1514
1515 if (getFPImm().bitwiseIsEqual(RealVal))
1516 return DiagnosticPredicateTy::Match;
1517 }
1518
1519 return DiagnosticPredicateTy::NearMatch;
1520 }
1521
1522 template <unsigned ImmA, unsigned ImmB>
1523 DiagnosticPredicate isExactFPImm() const {
1524 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1525 if ((Res = isExactFPImm<ImmA>()))
1526 return DiagnosticPredicateTy::Match;
1527 if ((Res = isExactFPImm<ImmB>()))
1528 return DiagnosticPredicateTy::Match;
1529 return Res;
1530 }
1531
1532 bool isExtend() const {
1533 if (!isShiftExtend())
1534 return false;
1535
1536 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1537 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1538 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1539 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1540 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1541 ET == AArch64_AM::LSL) &&
1542 getShiftExtendAmount() <= 4;
1543 }
1544
1545 bool isExtend64() const {
1546 if (!isExtend())
1547 return false;
1548 // Make sure the extend expects a 32-bit source register.
1549 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1550 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1551 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1552 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1553 }
1554
1555 bool isExtendLSL64() const {
1556 if (!isExtend())
1557 return false;
1558 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1559 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1560 ET == AArch64_AM::LSL) &&
1561 getShiftExtendAmount() <= 4;
1562 }
1563
1564 bool isLSLImm3Shift() const {
1565 if (!isShiftExtend())
1566 return false;
1567 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1568 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1569 }
1570
1571 template<int Width> bool isMemXExtend() const {
1572 if (!isExtend())
1573 return false;
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1576 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1577 getShiftExtendAmount() == 0);
1578 }
1579
1580 template<int Width> bool isMemWExtend() const {
1581 if (!isExtend())
1582 return false;
1583 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1584 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1585 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1586 getShiftExtendAmount() == 0);
1587 }
1588
1589 template <unsigned width>
1590 bool isArithmeticShifter() const {
1591 if (!isShifter())
1592 return false;
1593
1594 // An arithmetic shifter is LSL, LSR, or ASR.
1595 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1596 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1597 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1598 }
1599
1600 template <unsigned width>
1601 bool isLogicalShifter() const {
1602 if (!isShifter())
1603 return false;
1604
1605 // A logical shifter is LSL, LSR, ASR or ROR.
1606 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1607 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1608 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1609 getShiftExtendAmount() < width;
1610 }
1611
1612 bool isMovImm32Shifter() const {
1613 if (!isShifter())
1614 return false;
1615
1616 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1617 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1618 if (ST != AArch64_AM::LSL)
1619 return false;
1620 uint64_t Val = getShiftExtendAmount();
1621 return (Val == 0 || Val == 16);
1622 }
1623
1624 bool isMovImm64Shifter() const {
1625 if (!isShifter())
1626 return false;
1627
1628 // A MOVi shifter is LSL of 0 or 16.
1629 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1630 if (ST != AArch64_AM::LSL)
1631 return false;
1632 uint64_t Val = getShiftExtendAmount();
1633 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1634 }
1635
1636 bool isLogicalVecShifter() const {
1637 if (!isShifter())
1638 return false;
1639
1640 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1641 unsigned Shift = getShiftExtendAmount();
1642 return getShiftExtendType() == AArch64_AM::LSL &&
1643 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1644 }
1645
1646 bool isLogicalVecHalfWordShifter() const {
1647 if (!isLogicalVecShifter())
1648 return false;
1649
1650 // A logical vector shifter is a left shift by 0 or 8.
1651 unsigned Shift = getShiftExtendAmount();
1652 return getShiftExtendType() == AArch64_AM::LSL &&
1653 (Shift == 0 || Shift == 8);
1654 }
1655
1656 bool isMoveVecShifter() const {
1657 if (!isShiftExtend())
1658 return false;
1659
1660 // A logical vector shifter is a left shift by 8 or 16.
1661 unsigned Shift = getShiftExtendAmount();
1662 return getShiftExtendType() == AArch64_AM::MSL &&
1663 (Shift == 8 || Shift == 16);
1664 }
1665
1666 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1667 // to LDUR/STUR when the offset is not legal for the former but is for
1668 // the latter. As such, in addition to checking for being a legal unscaled
1669 // address, also check that it is not a legal scaled address. This avoids
1670 // ambiguity in the matcher.
1671 template<int Width>
1672 bool isSImm9OffsetFB() const {
1673 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1674 }
1675
1676 bool isAdrpLabel() const {
1677 // Validation was handled during parsing, so we just verify that
1678 // something didn't go haywire.
1679 if (!isImm())
1680 return false;
1681
1682 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1683 int64_t Val = CE->getValue();
1684 int64_t Min = - (4096 * (1LL << (21 - 1)));
1685 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1686 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1687 }
1688
1689 return true;
1690 }
1691
1692 bool isAdrLabel() const {
1693 // Validation was handled during parsing, so we just verify that
1694 // something didn't go haywire.
1695 if (!isImm())
1696 return false;
1697
1698 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1699 int64_t Val = CE->getValue();
1700 int64_t Min = - (1LL << (21 - 1));
1701 int64_t Max = ((1LL << (21 - 1)) - 1);
1702 return Val >= Min && Val <= Max;
1703 }
1704
1705 return true;
1706 }
1707
1708 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1709 DiagnosticPredicate isMatrixRegOperand() const {
1710 if (!isMatrix())
1711 return DiagnosticPredicateTy::NoMatch;
1712 if (getMatrixKind() != Kind ||
1713 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1714 EltSize != getMatrixElementWidth())
1715 return DiagnosticPredicateTy::NearMatch;
1716 return DiagnosticPredicateTy::Match;
1717 }
1718
1719 bool isPAuthPCRelLabel16Operand() const {
1720 // PAuth PCRel16 operands are similar to regular branch targets, but only
1721 // negative values are allowed for concrete immediates as signing instr
1722 // should be in a lower address.
1723 if (!isImm())
1724 return false;
1725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1726 if (!MCE)
1727 return true;
1728 int64_t Val = MCE->getValue();
1729 if (Val & 0b11)
1730 return false;
1731 return (Val <= 0) && (Val > -(1 << 18));
1732 }
1733
1734 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1735 // Add as immediates when possible. Null MCExpr = 0.
1736 if (!Expr)
1738 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1739 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1740 else
1742 }
1743
1744 void addRegOperands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!");
1747 }
1748
1749 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1750 assert(N == 1 && "Invalid number of operands!");
1751 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1752 }
1753
1754 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1755 assert(N == 1 && "Invalid number of operands!");
1756 assert(
1757 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1758
1759 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1760 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1761 RI->getEncodingValue(getReg()));
1762
1764 }
1765
1766 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!");
1768 assert(
1769 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1770
1771 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1772 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1773 RI->getEncodingValue(getReg()));
1774
1776 }
1777
1778 template <int Width>
1779 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1780 unsigned Base;
1781 switch (Width) {
1782 case 8: Base = AArch64::B0; break;
1783 case 16: Base = AArch64::H0; break;
1784 case 32: Base = AArch64::S0; break;
1785 case 64: Base = AArch64::D0; break;
1786 case 128: Base = AArch64::Q0; break;
1787 default:
1788 llvm_unreachable("Unsupported width");
1789 }
1790 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1791 }
1792
1793 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1794 assert(N == 1 && "Invalid number of operands!");
1795 unsigned Reg = getReg();
1796 // Normalise to PPR
1797 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1798 Reg = Reg - AArch64::PN0 + AArch64::P0;
1800 }
1801
1802 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1803 assert(N == 1 && "Invalid number of operands!");
1804 Inst.addOperand(
1805 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1806 }
1807
1808 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1809 assert(N == 1 && "Invalid number of operands!");
1810 assert(
1811 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1812 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1813 }
1814
1815 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1817 assert(
1818 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1820 }
1821
1822 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1823 assert(N == 1 && "Invalid number of operands!");
1825 }
1826
1827 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1828 assert(N == 1 && "Invalid number of operands!");
1830 }
1831
1832 enum VecListIndexType {
1833 VecListIdx_DReg = 0,
1834 VecListIdx_QReg = 1,
1835 VecListIdx_ZReg = 2,
1836 VecListIdx_PReg = 3,
1837 };
1838
1839 template <VecListIndexType RegTy, unsigned NumRegs>
1840 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1841 assert(N == 1 && "Invalid number of operands!");
1842 static const unsigned FirstRegs[][5] = {
1843 /* DReg */ { AArch64::Q0,
1844 AArch64::D0, AArch64::D0_D1,
1845 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1846 /* QReg */ { AArch64::Q0,
1847 AArch64::Q0, AArch64::Q0_Q1,
1848 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1849 /* ZReg */ { AArch64::Z0,
1850 AArch64::Z0, AArch64::Z0_Z1,
1851 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1852 /* PReg */ { AArch64::P0,
1853 AArch64::P0, AArch64::P0_P1 }
1854 };
1855
1856 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1857 " NumRegs must be <= 4 for ZRegs");
1858
1859 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1860 " NumRegs must be <= 2 for PRegs");
1861
1862 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1863 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1864 FirstRegs[(unsigned)RegTy][0]));
1865 }
1866
1867 template <unsigned NumRegs>
1868 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1869 assert(N == 1 && "Invalid number of operands!");
1870 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1871
1872 switch (NumRegs) {
1873 case 2:
1874 if (getVectorListStart() < AArch64::Z16) {
1875 assert((getVectorListStart() < AArch64::Z8) &&
1876 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1878 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1879 } else {
1880 assert((getVectorListStart() < AArch64::Z24) &&
1881 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1883 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1884 }
1885 break;
1886 case 4:
1887 if (getVectorListStart() < AArch64::Z16) {
1888 assert((getVectorListStart() < AArch64::Z4) &&
1889 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1891 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1892 } else {
1893 assert((getVectorListStart() < AArch64::Z20) &&
1894 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1896 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1897 }
1898 break;
1899 default:
1900 llvm_unreachable("Unsupported number of registers for strided vec list");
1901 }
1902 }
1903
1904 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1905 assert(N == 1 && "Invalid number of operands!");
1906 unsigned RegMask = getMatrixTileListRegMask();
1907 assert(RegMask <= 0xFF && "Invalid mask!");
1908 Inst.addOperand(MCOperand::createImm(RegMask));
1909 }
1910
1911 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1912 assert(N == 1 && "Invalid number of operands!");
1913 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1914 }
1915
1916 template <unsigned ImmIs0, unsigned ImmIs1>
1917 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1918 assert(N == 1 && "Invalid number of operands!");
1919 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1920 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1921 }
1922
1923 void addImmOperands(MCInst &Inst, unsigned N) const {
1924 assert(N == 1 && "Invalid number of operands!");
1925 // If this is a pageoff symrefexpr with an addend, adjust the addend
1926 // to be only the page-offset portion. Otherwise, just add the expr
1927 // as-is.
1928 addExpr(Inst, getImm());
1929 }
1930
1931 template <int Shift>
1932 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 2 && "Invalid number of operands!");
1934 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1935 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1936 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1937 } else if (isShiftedImm()) {
1938 addExpr(Inst, getShiftedImmVal());
1939 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1940 } else {
1941 addExpr(Inst, getImm());
1943 }
1944 }
1945
1946 template <int Shift>
1947 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 2 && "Invalid number of operands!");
1949 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1950 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1951 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1952 } else
1953 llvm_unreachable("Not a shifted negative immediate");
1954 }
1955
1956 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!");
1959 }
1960
1961 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1962 assert(N == 1 && "Invalid number of operands!");
1963 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1964 if (!MCE)
1965 addExpr(Inst, getImm());
1966 else
1967 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1968 }
1969
1970 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1971 addImmOperands(Inst, N);
1972 }
1973
1974 template<int Scale>
1975 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1976 assert(N == 1 && "Invalid number of operands!");
1977 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1978
1979 if (!MCE) {
1980 Inst.addOperand(MCOperand::createExpr(getImm()));
1981 return;
1982 }
1983 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1984 }
1985
1986 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1990 }
1991
1992 template <int Scale>
1993 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1994 assert(N == 1 && "Invalid number of operands!");
1995 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1996 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1997 }
1998
1999 template <int Scale>
2000 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2001 assert(N == 1 && "Invalid number of operands!");
2002 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2003 }
2004
2005 template <typename T>
2006 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 1 && "Invalid number of operands!");
2008 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2009 std::make_unsigned_t<T> Val = MCE->getValue();
2010 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2011 Inst.addOperand(MCOperand::createImm(encoding));
2012 }
2013
2014 template <typename T>
2015 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2016 assert(N == 1 && "Invalid number of operands!");
2017 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2018 std::make_unsigned_t<T> Val = ~MCE->getValue();
2019 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2020 Inst.addOperand(MCOperand::createImm(encoding));
2021 }
2022
2023 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2024 assert(N == 1 && "Invalid number of operands!");
2025 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2027 Inst.addOperand(MCOperand::createImm(encoding));
2028 }
2029
2030 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2031 // Branch operands don't encode the low bits, so shift them off
2032 // here. If it's a label, however, just put it on directly as there's
2033 // not enough information now to do anything.
2034 assert(N == 1 && "Invalid number of operands!");
2035 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2036 if (!MCE) {
2037 addExpr(Inst, getImm());
2038 return;
2039 }
2040 assert(MCE && "Invalid constant immediate operand!");
2041 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2042 }
2043
2044 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2045 // PC-relative operands don't encode the low bits, so shift them off
2046 // here. If it's a label, however, just put it on directly as there's
2047 // not enough information now to do anything.
2048 assert(N == 1 && "Invalid number of operands!");
2049 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2050 if (!MCE) {
2051 addExpr(Inst, getImm());
2052 return;
2053 }
2054 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2055 }
2056
2057 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2058 // Branch operands don't encode the low bits, so shift them off
2059 // here. If it's a label, however, just put it on directly as there's
2060 // not enough information now to do anything.
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2063 if (!MCE) {
2064 addExpr(Inst, getImm());
2065 return;
2066 }
2067 assert(MCE && "Invalid constant immediate operand!");
2068 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2069 }
2070
2071 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2072 // Branch operands don't encode the low bits, so shift them off
2073 // here. If it's a label, however, just put it on directly as there's
2074 // not enough information now to do anything.
2075 assert(N == 1 && "Invalid number of operands!");
2076 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2077 if (!MCE) {
2078 addExpr(Inst, getImm());
2079 return;
2080 }
2081 assert(MCE && "Invalid constant immediate operand!");
2082 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2083 }
2084
2085 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2086 assert(N == 1 && "Invalid number of operands!");
2088 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2089 }
2090
2091 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2092 assert(N == 1 && "Invalid number of operands!");
2093 Inst.addOperand(MCOperand::createImm(getBarrier()));
2094 }
2095
2096 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2097 assert(N == 1 && "Invalid number of operands!");
2098 Inst.addOperand(MCOperand::createImm(getBarrier()));
2099 }
2100
2101 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2102 assert(N == 1 && "Invalid number of operands!");
2103
2104 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2105 }
2106
2107 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2108 assert(N == 1 && "Invalid number of operands!");
2109
2110 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2111 }
2112
2113 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2114 assert(N == 1 && "Invalid number of operands!");
2115
2116 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2117 }
2118
2119 void addSVCROperands(MCInst &Inst, unsigned N) const {
2120 assert(N == 1 && "Invalid number of operands!");
2121
2122 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2123 }
2124
2125 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2126 assert(N == 1 && "Invalid number of operands!");
2127
2128 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2129 }
2130
2131 void addSysCROperands(MCInst &Inst, unsigned N) const {
2132 assert(N == 1 && "Invalid number of operands!");
2133 Inst.addOperand(MCOperand::createImm(getSysCR()));
2134 }
2135
2136 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2137 assert(N == 1 && "Invalid number of operands!");
2138 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2139 }
2140
2141 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2142 assert(N == 1 && "Invalid number of operands!");
2143 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2144 }
2145
2146 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2147 assert(N == 1 && "Invalid number of operands!");
2148 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2149 }
2150
2151 void addShifterOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2153 unsigned Imm =
2154 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2156 }
2157
2158 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2159 assert(N == 1 && "Invalid number of operands!");
2160 unsigned Imm = getShiftExtendAmount();
2162 }
2163
2164 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2165 assert(N == 1 && "Invalid number of operands!");
2166
2167 if (!isScalarReg())
2168 return;
2169
2170 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2171 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2173 if (Reg != AArch64::XZR)
2174 llvm_unreachable("wrong register");
2175
2176 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2177 }
2178
2179 void addExtendOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2182 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2183 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2185 }
2186
2187 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2188 assert(N == 1 && "Invalid number of operands!");
2189 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2190 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2191 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2193 }
2194
2195 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 2 && "Invalid number of operands!");
2197 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2198 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2199 Inst.addOperand(MCOperand::createImm(IsSigned));
2200 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2201 }
2202
2203 // For 8-bit load/store instructions with a register offset, both the
2204 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2205 // they're disambiguated by whether the shift was explicit or implicit rather
2206 // than its size.
2207 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2208 assert(N == 2 && "Invalid number of operands!");
2209 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2210 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2211 Inst.addOperand(MCOperand::createImm(IsSigned));
2212 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2213 }
2214
2215 template<int Shift>
2216 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2217 assert(N == 1 && "Invalid number of operands!");
2218
2219 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2220 if (CE) {
2221 uint64_t Value = CE->getValue();
2222 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2223 } else {
2224 addExpr(Inst, getImm());
2225 }
2226 }
2227
2228 template<int Shift>
2229 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231
2232 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2233 uint64_t Value = CE->getValue();
2234 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2235 }
2236
2237 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2238 assert(N == 1 && "Invalid number of operands!");
2239 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2240 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2241 }
2242
2243 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2244 assert(N == 1 && "Invalid number of operands!");
2245 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2246 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2247 }
2248
2249 void print(raw_ostream &OS) const override;
2250
2251 static std::unique_ptr<AArch64Operand>
2252 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2253 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2254 Op->Tok.Data = Str.data();
2255 Op->Tok.Length = Str.size();
2256 Op->Tok.IsSuffix = IsSuffix;
2257 Op->StartLoc = S;
2258 Op->EndLoc = S;
2259 return Op;
2260 }
2261
2262 static std::unique_ptr<AArch64Operand>
2263 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2264 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2266 unsigned ShiftAmount = 0,
2267 unsigned HasExplicitAmount = false) {
2268 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2269 Op->Reg.RegNum = RegNum;
2270 Op->Reg.Kind = Kind;
2271 Op->Reg.ElementWidth = 0;
2272 Op->Reg.EqualityTy = EqTy;
2273 Op->Reg.ShiftExtend.Type = ExtTy;
2274 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2275 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2276 Op->StartLoc = S;
2277 Op->EndLoc = E;
2278 return Op;
2279 }
2280
2281 static std::unique_ptr<AArch64Operand>
2282 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2283 SMLoc S, SMLoc E, MCContext &Ctx,
2285 unsigned ShiftAmount = 0,
2286 unsigned HasExplicitAmount = false) {
2287 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2288 Kind == RegKind::SVEPredicateVector ||
2289 Kind == RegKind::SVEPredicateAsCounter) &&
2290 "Invalid vector kind");
2291 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2292 HasExplicitAmount);
2293 Op->Reg.ElementWidth = ElementWidth;
2294 return Op;
2295 }
2296
2297 static std::unique_ptr<AArch64Operand>
2298 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2299 unsigned NumElements, unsigned ElementWidth,
2300 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2301 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2302 Op->VectorList.RegNum = RegNum;
2303 Op->VectorList.Count = Count;
2304 Op->VectorList.Stride = Stride;
2305 Op->VectorList.NumElements = NumElements;
2306 Op->VectorList.ElementWidth = ElementWidth;
2307 Op->VectorList.RegisterKind = RegisterKind;
2308 Op->StartLoc = S;
2309 Op->EndLoc = E;
2310 return Op;
2311 }
2312
2313 static std::unique_ptr<AArch64Operand>
2314 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2315 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2316 Op->VectorIndex.Val = Idx;
2317 Op->StartLoc = S;
2318 Op->EndLoc = E;
2319 return Op;
2320 }
2321
2322 static std::unique_ptr<AArch64Operand>
2323 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2324 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2325 Op->MatrixTileList.RegMask = RegMask;
2326 Op->StartLoc = S;
2327 Op->EndLoc = E;
2328 return Op;
2329 }
2330
2331 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2332 const unsigned ElementWidth) {
2333 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2334 RegMap = {
2335 {{0, AArch64::ZAB0},
2336 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2337 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2338 {{8, AArch64::ZAB0},
2339 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2340 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2341 {{16, AArch64::ZAH0},
2342 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2343 {{16, AArch64::ZAH1},
2344 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2345 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2346 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2347 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2348 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2349 };
2350
2351 if (ElementWidth == 64)
2352 OutRegs.insert(Reg);
2353 else {
2354 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2355 assert(!Regs.empty() && "Invalid tile or element width!");
2356 for (auto OutReg : Regs)
2357 OutRegs.insert(OutReg);
2358 }
2359 }
2360
2361 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2362 SMLoc E, MCContext &Ctx) {
2363 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2364 Op->Imm.Val = Val;
2365 Op->StartLoc = S;
2366 Op->EndLoc = E;
2367 return Op;
2368 }
2369
2370 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2371 unsigned ShiftAmount,
2372 SMLoc S, SMLoc E,
2373 MCContext &Ctx) {
2374 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2375 Op->ShiftedImm .Val = Val;
2376 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2377 Op->StartLoc = S;
2378 Op->EndLoc = E;
2379 return Op;
2380 }
2381
2382 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2383 unsigned Last, SMLoc S,
2384 SMLoc E,
2385 MCContext &Ctx) {
2386 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2387 Op->ImmRange.First = First;
2388 Op->ImmRange.Last = Last;
2389 Op->EndLoc = E;
2390 return Op;
2391 }
2392
2393 static std::unique_ptr<AArch64Operand>
2394 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2395 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2396 Op->CondCode.Code = Code;
2397 Op->StartLoc = S;
2398 Op->EndLoc = E;
2399 return Op;
2400 }
2401
2402 static std::unique_ptr<AArch64Operand>
2403 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2404 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2405 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2406 Op->FPImm.IsExact = IsExact;
2407 Op->StartLoc = S;
2408 Op->EndLoc = S;
2409 return Op;
2410 }
2411
2412 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2413 StringRef Str,
2414 SMLoc S,
2415 MCContext &Ctx,
2416 bool HasnXSModifier) {
2417 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2418 Op->Barrier.Val = Val;
2419 Op->Barrier.Data = Str.data();
2420 Op->Barrier.Length = Str.size();
2421 Op->Barrier.HasnXSModifier = HasnXSModifier;
2422 Op->StartLoc = S;
2423 Op->EndLoc = S;
2424 return Op;
2425 }
2426
2427 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2428 uint32_t MRSReg,
2429 uint32_t MSRReg,
2430 uint32_t PStateField,
2431 MCContext &Ctx) {
2432 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2433 Op->SysReg.Data = Str.data();
2434 Op->SysReg.Length = Str.size();
2435 Op->SysReg.MRSReg = MRSReg;
2436 Op->SysReg.MSRReg = MSRReg;
2437 Op->SysReg.PStateField = PStateField;
2438 Op->StartLoc = S;
2439 Op->EndLoc = S;
2440 return Op;
2441 }
2442
2443 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2444 SMLoc E, MCContext &Ctx) {
2445 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2446 Op->SysCRImm.Val = Val;
2447 Op->StartLoc = S;
2448 Op->EndLoc = E;
2449 return Op;
2450 }
2451
2452 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2453 StringRef Str,
2454 SMLoc S,
2455 MCContext &Ctx) {
2456 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2457 Op->Prefetch.Val = Val;
2458 Op->Barrier.Data = Str.data();
2459 Op->Barrier.Length = Str.size();
2460 Op->StartLoc = S;
2461 Op->EndLoc = S;
2462 return Op;
2463 }
2464
2465 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2466 StringRef Str,
2467 SMLoc S,
2468 MCContext &Ctx) {
2469 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2470 Op->PSBHint.Val = Val;
2471 Op->PSBHint.Data = Str.data();
2472 Op->PSBHint.Length = Str.size();
2473 Op->StartLoc = S;
2474 Op->EndLoc = S;
2475 return Op;
2476 }
2477
2478 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2479 StringRef Str,
2480 SMLoc S,
2481 MCContext &Ctx) {
2482 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2483 Op->BTIHint.Val = Val | 32;
2484 Op->BTIHint.Data = Str.data();
2485 Op->BTIHint.Length = Str.size();
2486 Op->StartLoc = S;
2487 Op->EndLoc = S;
2488 return Op;
2489 }
2490
2491 static std::unique_ptr<AArch64Operand>
2492 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2493 SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2495 Op->MatrixReg.RegNum = RegNum;
2496 Op->MatrixReg.ElementWidth = ElementWidth;
2497 Op->MatrixReg.Kind = Kind;
2498 Op->StartLoc = S;
2499 Op->EndLoc = E;
2500 return Op;
2501 }
2502
2503 static std::unique_ptr<AArch64Operand>
2504 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2505 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2506 Op->SVCR.PStateField = PStateField;
2507 Op->SVCR.Data = Str.data();
2508 Op->SVCR.Length = Str.size();
2509 Op->StartLoc = S;
2510 Op->EndLoc = S;
2511 return Op;
2512 }
2513
2514 static std::unique_ptr<AArch64Operand>
2515 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2516 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2517 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2518 Op->ShiftExtend.Type = ShOp;
2519 Op->ShiftExtend.Amount = Val;
2520 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2521 Op->StartLoc = S;
2522 Op->EndLoc = E;
2523 return Op;
2524 }
2525};
2526
2527} // end anonymous namespace.
2528
2529void AArch64Operand::print(raw_ostream &OS) const {
2530 switch (Kind) {
2531 case k_FPImm:
2532 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2533 if (!getFPImmIsExact())
2534 OS << " (inexact)";
2535 OS << ">";
2536 break;
2537 case k_Barrier: {
2538 StringRef Name = getBarrierName();
2539 if (!Name.empty())
2540 OS << "<barrier " << Name << ">";
2541 else
2542 OS << "<barrier invalid #" << getBarrier() << ">";
2543 break;
2544 }
2545 case k_Immediate:
2546 OS << *getImm();
2547 break;
2548 case k_ShiftedImm: {
2549 unsigned Shift = getShiftedImmShift();
2550 OS << "<shiftedimm ";
2551 OS << *getShiftedImmVal();
2552 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2553 break;
2554 }
2555 case k_ImmRange: {
2556 OS << "<immrange ";
2557 OS << getFirstImmVal();
2558 OS << ":" << getLastImmVal() << ">";
2559 break;
2560 }
2561 case k_CondCode:
2562 OS << "<condcode " << getCondCode() << ">";
2563 break;
2564 case k_VectorList: {
2565 OS << "<vectorlist ";
2566 unsigned Reg = getVectorListStart();
2567 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2568 OS << Reg + i * getVectorListStride() << " ";
2569 OS << ">";
2570 break;
2571 }
2572 case k_VectorIndex:
2573 OS << "<vectorindex " << getVectorIndex() << ">";
2574 break;
2575 case k_SysReg:
2576 OS << "<sysreg: " << getSysReg() << '>';
2577 break;
2578 case k_Token:
2579 OS << "'" << getToken() << "'";
2580 break;
2581 case k_SysCR:
2582 OS << "c" << getSysCR();
2583 break;
2584 case k_Prefetch: {
2585 StringRef Name = getPrefetchName();
2586 if (!Name.empty())
2587 OS << "<prfop " << Name << ">";
2588 else
2589 OS << "<prfop invalid #" << getPrefetch() << ">";
2590 break;
2591 }
2592 case k_PSBHint:
2593 OS << getPSBHintName();
2594 break;
2595 case k_BTIHint:
2596 OS << getBTIHintName();
2597 break;
2598 case k_MatrixRegister:
2599 OS << "<matrix " << getMatrixReg() << ">";
2600 break;
2601 case k_MatrixTileList: {
2602 OS << "<matrixlist ";
2603 unsigned RegMask = getMatrixTileListRegMask();
2604 unsigned MaxBits = 8;
2605 for (unsigned I = MaxBits; I > 0; --I)
2606 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2607 OS << '>';
2608 break;
2609 }
2610 case k_SVCR: {
2611 OS << getSVCR();
2612 break;
2613 }
2614 case k_Register:
2615 OS << "<register " << getReg() << ">";
2616 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2617 break;
2618 [[fallthrough]];
2619 case k_ShiftExtend:
2620 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2621 << getShiftExtendAmount();
2622 if (!hasShiftExtendAmount())
2623 OS << "<imp>";
2624 OS << '>';
2625 break;
2626 }
2627}
2628
2629/// @name Auto-generated Match Functions
2630/// {
2631
2633
2634/// }
2635
2637 return StringSwitch<unsigned>(Name.lower())
2638 .Case("v0", AArch64::Q0)
2639 .Case("v1", AArch64::Q1)
2640 .Case("v2", AArch64::Q2)
2641 .Case("v3", AArch64::Q3)
2642 .Case("v4", AArch64::Q4)
2643 .Case("v5", AArch64::Q5)
2644 .Case("v6", AArch64::Q6)
2645 .Case("v7", AArch64::Q7)
2646 .Case("v8", AArch64::Q8)
2647 .Case("v9", AArch64::Q9)
2648 .Case("v10", AArch64::Q10)
2649 .Case("v11", AArch64::Q11)
2650 .Case("v12", AArch64::Q12)
2651 .Case("v13", AArch64::Q13)
2652 .Case("v14", AArch64::Q14)
2653 .Case("v15", AArch64::Q15)
2654 .Case("v16", AArch64::Q16)
2655 .Case("v17", AArch64::Q17)
2656 .Case("v18", AArch64::Q18)
2657 .Case("v19", AArch64::Q19)
2658 .Case("v20", AArch64::Q20)
2659 .Case("v21", AArch64::Q21)
2660 .Case("v22", AArch64::Q22)
2661 .Case("v23", AArch64::Q23)
2662 .Case("v24", AArch64::Q24)
2663 .Case("v25", AArch64::Q25)
2664 .Case("v26", AArch64::Q26)
2665 .Case("v27", AArch64::Q27)
2666 .Case("v28", AArch64::Q28)
2667 .Case("v29", AArch64::Q29)
2668 .Case("v30", AArch64::Q30)
2669 .Case("v31", AArch64::Q31)
2670 .Default(0);
2671}
2672
2673/// Returns an optional pair of (#elements, element-width) if Suffix
2674/// is a valid vector kind. Where the number of elements in a vector
2675/// or the vector width is implicit or explicitly unknown (but still a
2676/// valid suffix kind), 0 is used.
2677static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2678 RegKind VectorKind) {
2679 std::pair<int, int> Res = {-1, -1};
2680
2681 switch (VectorKind) {
2682 case RegKind::NeonVector:
2684 .Case("", {0, 0})
2685 .Case(".1d", {1, 64})
2686 .Case(".1q", {1, 128})
2687 // '.2h' needed for fp16 scalar pairwise reductions
2688 .Case(".2h", {2, 16})
2689 .Case(".2b", {2, 8})
2690 .Case(".2s", {2, 32})
2691 .Case(".2d", {2, 64})
2692 // '.4b' is another special case for the ARMv8.2a dot product
2693 // operand
2694 .Case(".4b", {4, 8})
2695 .Case(".4h", {4, 16})
2696 .Case(".4s", {4, 32})
2697 .Case(".8b", {8, 8})
2698 .Case(".8h", {8, 16})
2699 .Case(".16b", {16, 8})
2700 // Accept the width neutral ones, too, for verbose syntax. If
2701 // those aren't used in the right places, the token operand won't
2702 // match so all will work out.
2703 .Case(".b", {0, 8})
2704 .Case(".h", {0, 16})
2705 .Case(".s", {0, 32})
2706 .Case(".d", {0, 64})
2707 .Default({-1, -1});
2708 break;
2709 case RegKind::SVEPredicateAsCounter:
2710 case RegKind::SVEPredicateVector:
2711 case RegKind::SVEDataVector:
2712 case RegKind::Matrix:
2714 .Case("", {0, 0})
2715 .Case(".b", {0, 8})
2716 .Case(".h", {0, 16})
2717 .Case(".s", {0, 32})
2718 .Case(".d", {0, 64})
2719 .Case(".q", {0, 128})
2720 .Default({-1, -1});
2721 break;
2722 default:
2723 llvm_unreachable("Unsupported RegKind");
2724 }
2725
2726 if (Res == std::make_pair(-1, -1))
2727 return std::nullopt;
2728
2729 return std::optional<std::pair<int, int>>(Res);
2730}
2731
2732static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2733 return parseVectorKind(Suffix, VectorKind).has_value();
2734}
2735
2737 return StringSwitch<unsigned>(Name.lower())
2738 .Case("z0", AArch64::Z0)
2739 .Case("z1", AArch64::Z1)
2740 .Case("z2", AArch64::Z2)
2741 .Case("z3", AArch64::Z3)
2742 .Case("z4", AArch64::Z4)
2743 .Case("z5", AArch64::Z5)
2744 .Case("z6", AArch64::Z6)
2745 .Case("z7", AArch64::Z7)
2746 .Case("z8", AArch64::Z8)
2747 .Case("z9", AArch64::Z9)
2748 .Case("z10", AArch64::Z10)
2749 .Case("z11", AArch64::Z11)
2750 .Case("z12", AArch64::Z12)
2751 .Case("z13", AArch64::Z13)
2752 .Case("z14", AArch64::Z14)
2753 .Case("z15", AArch64::Z15)
2754 .Case("z16", AArch64::Z16)
2755 .Case("z17", AArch64::Z17)
2756 .Case("z18", AArch64::Z18)
2757 .Case("z19", AArch64::Z19)
2758 .Case("z20", AArch64::Z20)
2759 .Case("z21", AArch64::Z21)
2760 .Case("z22", AArch64::Z22)
2761 .Case("z23", AArch64::Z23)
2762 .Case("z24", AArch64::Z24)
2763 .Case("z25", AArch64::Z25)
2764 .Case("z26", AArch64::Z26)
2765 .Case("z27", AArch64::Z27)
2766 .Case("z28", AArch64::Z28)
2767 .Case("z29", AArch64::Z29)
2768 .Case("z30", AArch64::Z30)
2769 .Case("z31", AArch64::Z31)
2770 .Default(0);
2771}
2772
2774 return StringSwitch<unsigned>(Name.lower())
2775 .Case("p0", AArch64::P0)
2776 .Case("p1", AArch64::P1)
2777 .Case("p2", AArch64::P2)
2778 .Case("p3", AArch64::P3)
2779 .Case("p4", AArch64::P4)
2780 .Case("p5", AArch64::P5)
2781 .Case("p6", AArch64::P6)
2782 .Case("p7", AArch64::P7)
2783 .Case("p8", AArch64::P8)
2784 .Case("p9", AArch64::P9)
2785 .Case("p10", AArch64::P10)
2786 .Case("p11", AArch64::P11)
2787 .Case("p12", AArch64::P12)
2788 .Case("p13", AArch64::P13)
2789 .Case("p14", AArch64::P14)
2790 .Case("p15", AArch64::P15)
2791 .Default(0);
2792}
2793
2795 return StringSwitch<unsigned>(Name.lower())
2796 .Case("pn0", AArch64::PN0)
2797 .Case("pn1", AArch64::PN1)
2798 .Case("pn2", AArch64::PN2)
2799 .Case("pn3", AArch64::PN3)
2800 .Case("pn4", AArch64::PN4)
2801 .Case("pn5", AArch64::PN5)
2802 .Case("pn6", AArch64::PN6)
2803 .Case("pn7", AArch64::PN7)
2804 .Case("pn8", AArch64::PN8)
2805 .Case("pn9", AArch64::PN9)
2806 .Case("pn10", AArch64::PN10)
2807 .Case("pn11", AArch64::PN11)
2808 .Case("pn12", AArch64::PN12)
2809 .Case("pn13", AArch64::PN13)
2810 .Case("pn14", AArch64::PN14)
2811 .Case("pn15", AArch64::PN15)
2812 .Default(0);
2813}
2814
2816 return StringSwitch<unsigned>(Name.lower())
2817 .Case("za0.d", AArch64::ZAD0)
2818 .Case("za1.d", AArch64::ZAD1)
2819 .Case("za2.d", AArch64::ZAD2)
2820 .Case("za3.d", AArch64::ZAD3)
2821 .Case("za4.d", AArch64::ZAD4)
2822 .Case("za5.d", AArch64::ZAD5)
2823 .Case("za6.d", AArch64::ZAD6)
2824 .Case("za7.d", AArch64::ZAD7)
2825 .Case("za0.s", AArch64::ZAS0)
2826 .Case("za1.s", AArch64::ZAS1)
2827 .Case("za2.s", AArch64::ZAS2)
2828 .Case("za3.s", AArch64::ZAS3)
2829 .Case("za0.h", AArch64::ZAH0)
2830 .Case("za1.h", AArch64::ZAH1)
2831 .Case("za0.b", AArch64::ZAB0)
2832 .Default(0);
2833}
2834
2836 return StringSwitch<unsigned>(Name.lower())
2837 .Case("za", AArch64::ZA)
2838 .Case("za0.q", AArch64::ZAQ0)
2839 .Case("za1.q", AArch64::ZAQ1)
2840 .Case("za2.q", AArch64::ZAQ2)
2841 .Case("za3.q", AArch64::ZAQ3)
2842 .Case("za4.q", AArch64::ZAQ4)
2843 .Case("za5.q", AArch64::ZAQ5)
2844 .Case("za6.q", AArch64::ZAQ6)
2845 .Case("za7.q", AArch64::ZAQ7)
2846 .Case("za8.q", AArch64::ZAQ8)
2847 .Case("za9.q", AArch64::ZAQ9)
2848 .Case("za10.q", AArch64::ZAQ10)
2849 .Case("za11.q", AArch64::ZAQ11)
2850 .Case("za12.q", AArch64::ZAQ12)
2851 .Case("za13.q", AArch64::ZAQ13)
2852 .Case("za14.q", AArch64::ZAQ14)
2853 .Case("za15.q", AArch64::ZAQ15)
2854 .Case("za0.d", AArch64::ZAD0)
2855 .Case("za1.d", AArch64::ZAD1)
2856 .Case("za2.d", AArch64::ZAD2)
2857 .Case("za3.d", AArch64::ZAD3)
2858 .Case("za4.d", AArch64::ZAD4)
2859 .Case("za5.d", AArch64::ZAD5)
2860 .Case("za6.d", AArch64::ZAD6)
2861 .Case("za7.d", AArch64::ZAD7)
2862 .Case("za0.s", AArch64::ZAS0)
2863 .Case("za1.s", AArch64::ZAS1)
2864 .Case("za2.s", AArch64::ZAS2)
2865 .Case("za3.s", AArch64::ZAS3)
2866 .Case("za0.h", AArch64::ZAH0)
2867 .Case("za1.h", AArch64::ZAH1)
2868 .Case("za0.b", AArch64::ZAB0)
2869 .Case("za0h.q", AArch64::ZAQ0)
2870 .Case("za1h.q", AArch64::ZAQ1)
2871 .Case("za2h.q", AArch64::ZAQ2)
2872 .Case("za3h.q", AArch64::ZAQ3)
2873 .Case("za4h.q", AArch64::ZAQ4)
2874 .Case("za5h.q", AArch64::ZAQ5)
2875 .Case("za6h.q", AArch64::ZAQ6)
2876 .Case("za7h.q", AArch64::ZAQ7)
2877 .Case("za8h.q", AArch64::ZAQ8)
2878 .Case("za9h.q", AArch64::ZAQ9)
2879 .Case("za10h.q", AArch64::ZAQ10)
2880 .Case("za11h.q", AArch64::ZAQ11)
2881 .Case("za12h.q", AArch64::ZAQ12)
2882 .Case("za13h.q", AArch64::ZAQ13)
2883 .Case("za14h.q", AArch64::ZAQ14)
2884 .Case("za15h.q", AArch64::ZAQ15)
2885 .Case("za0h.d", AArch64::ZAD0)
2886 .Case("za1h.d", AArch64::ZAD1)
2887 .Case("za2h.d", AArch64::ZAD2)
2888 .Case("za3h.d", AArch64::ZAD3)
2889 .Case("za4h.d", AArch64::ZAD4)
2890 .Case("za5h.d", AArch64::ZAD5)
2891 .Case("za6h.d", AArch64::ZAD6)
2892 .Case("za7h.d", AArch64::ZAD7)
2893 .Case("za0h.s", AArch64::ZAS0)
2894 .Case("za1h.s", AArch64::ZAS1)
2895 .Case("za2h.s", AArch64::ZAS2)
2896 .Case("za3h.s", AArch64::ZAS3)
2897 .Case("za0h.h", AArch64::ZAH0)
2898 .Case("za1h.h", AArch64::ZAH1)
2899 .Case("za0h.b", AArch64::ZAB0)
2900 .Case("za0v.q", AArch64::ZAQ0)
2901 .Case("za1v.q", AArch64::ZAQ1)
2902 .Case("za2v.q", AArch64::ZAQ2)
2903 .Case("za3v.q", AArch64::ZAQ3)
2904 .Case("za4v.q", AArch64::ZAQ4)
2905 .Case("za5v.q", AArch64::ZAQ5)
2906 .Case("za6v.q", AArch64::ZAQ6)
2907 .Case("za7v.q", AArch64::ZAQ7)
2908 .Case("za8v.q", AArch64::ZAQ8)
2909 .Case("za9v.q", AArch64::ZAQ9)
2910 .Case("za10v.q", AArch64::ZAQ10)
2911 .Case("za11v.q", AArch64::ZAQ11)
2912 .Case("za12v.q", AArch64::ZAQ12)
2913 .Case("za13v.q", AArch64::ZAQ13)
2914 .Case("za14v.q", AArch64::ZAQ14)
2915 .Case("za15v.q", AArch64::ZAQ15)
2916 .Case("za0v.d", AArch64::ZAD0)
2917 .Case("za1v.d", AArch64::ZAD1)
2918 .Case("za2v.d", AArch64::ZAD2)
2919 .Case("za3v.d", AArch64::ZAD3)
2920 .Case("za4v.d", AArch64::ZAD4)
2921 .Case("za5v.d", AArch64::ZAD5)
2922 .Case("za6v.d", AArch64::ZAD6)
2923 .Case("za7v.d", AArch64::ZAD7)
2924 .Case("za0v.s", AArch64::ZAS0)
2925 .Case("za1v.s", AArch64::ZAS1)
2926 .Case("za2v.s", AArch64::ZAS2)
2927 .Case("za3v.s", AArch64::ZAS3)
2928 .Case("za0v.h", AArch64::ZAH0)
2929 .Case("za1v.h", AArch64::ZAH1)
2930 .Case("za0v.b", AArch64::ZAB0)
2931 .Default(0);
2932}
2933
2934bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2935 SMLoc &EndLoc) {
2936 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2937}
2938
2939ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2940 SMLoc &EndLoc) {
2941 StartLoc = getLoc();
2942 ParseStatus Res = tryParseScalarRegister(Reg);
2943 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2944 return Res;
2945}
2946
2947// Matches a register name or register alias previously defined by '.req'
2948unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2949 RegKind Kind) {
2950 unsigned RegNum = 0;
2951 if ((RegNum = matchSVEDataVectorRegName(Name)))
2952 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2953
2954 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2955 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2956
2958 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2959
2960 if ((RegNum = MatchNeonVectorRegName(Name)))
2961 return Kind == RegKind::NeonVector ? RegNum : 0;
2962
2963 if ((RegNum = matchMatrixRegName(Name)))
2964 return Kind == RegKind::Matrix ? RegNum : 0;
2965
2966 if (Name.equals_insensitive("zt0"))
2967 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2968
2969 // The parsed register must be of RegKind Scalar
2970 if ((RegNum = MatchRegisterName(Name)))
2971 return (Kind == RegKind::Scalar) ? RegNum : 0;
2972
2973 if (!RegNum) {
2974 // Handle a few common aliases of registers.
2975 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2976 .Case("fp", AArch64::FP)
2977 .Case("lr", AArch64::LR)
2978 .Case("x31", AArch64::XZR)
2979 .Case("w31", AArch64::WZR)
2980 .Default(0))
2981 return Kind == RegKind::Scalar ? RegNum : 0;
2982
2983 // Check for aliases registered via .req. Canonicalize to lower case.
2984 // That's more consistent since register names are case insensitive, and
2985 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2986 auto Entry = RegisterReqs.find(Name.lower());
2987 if (Entry == RegisterReqs.end())
2988 return 0;
2989
2990 // set RegNum if the match is the right kind of register
2991 if (Kind == Entry->getValue().first)
2992 RegNum = Entry->getValue().second;
2993 }
2994 return RegNum;
2995}
2996
2997unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2998 switch (K) {
2999 case RegKind::Scalar:
3000 case RegKind::NeonVector:
3001 case RegKind::SVEDataVector:
3002 return 32;
3003 case RegKind::Matrix:
3004 case RegKind::SVEPredicateVector:
3005 case RegKind::SVEPredicateAsCounter:
3006 return 16;
3007 case RegKind::LookupTable:
3008 return 1;
3009 }
3010 llvm_unreachable("Unsupported RegKind");
3011}
3012
3013/// tryParseScalarRegister - Try to parse a register name. The token must be an
3014/// Identifier when called, and if it is a register name the token is eaten and
3015/// the register is added to the operand list.
3016ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3017 const AsmToken &Tok = getTok();
3018 if (Tok.isNot(AsmToken::Identifier))
3019 return ParseStatus::NoMatch;
3020
3021 std::string lowerCase = Tok.getString().lower();
3022 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3023 if (Reg == 0)
3024 return ParseStatus::NoMatch;
3025
3026 RegNum = Reg;
3027 Lex(); // Eat identifier token.
3028 return ParseStatus::Success;
3029}
3030
3031/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3032ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3033 SMLoc S = getLoc();
3034
3035 if (getTok().isNot(AsmToken::Identifier))
3036 return Error(S, "Expected cN operand where 0 <= N <= 15");
3037
3038 StringRef Tok = getTok().getIdentifier();
3039 if (Tok[0] != 'c' && Tok[0] != 'C')
3040 return Error(S, "Expected cN operand where 0 <= N <= 15");
3041
3042 uint32_t CRNum;
3043 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3044 if (BadNum || CRNum > 15)
3045 return Error(S, "Expected cN operand where 0 <= N <= 15");
3046
3047 Lex(); // Eat identifier token.
3048 Operands.push_back(
3049 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3050 return ParseStatus::Success;
3051}
3052
3053// Either an identifier for named values or a 6-bit immediate.
3054ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3055 SMLoc S = getLoc();
3056 const AsmToken &Tok = getTok();
3057
3058 unsigned MaxVal = 63;
3059
3060 // Immediate case, with optional leading hash:
3061 if (parseOptionalToken(AsmToken::Hash) ||
3062 Tok.is(AsmToken::Integer)) {
3063 const MCExpr *ImmVal;
3064 if (getParser().parseExpression(ImmVal))
3065 return ParseStatus::Failure;
3066
3067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3068 if (!MCE)
3069 return TokError("immediate value expected for prefetch operand");
3070 unsigned prfop = MCE->getValue();
3071 if (prfop > MaxVal)
3072 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3073 "] expected");
3074
3075 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3076 Operands.push_back(AArch64Operand::CreatePrefetch(
3077 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3078 return ParseStatus::Success;
3079 }
3080
3081 if (Tok.isNot(AsmToken::Identifier))
3082 return TokError("prefetch hint expected");
3083
3084 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3085 if (!RPRFM)
3086 return TokError("prefetch hint expected");
3087
3088 Operands.push_back(AArch64Operand::CreatePrefetch(
3089 RPRFM->Encoding, Tok.getString(), S, getContext()));
3090 Lex(); // Eat identifier token.
3091 return ParseStatus::Success;
3092}
3093
3094/// tryParsePrefetch - Try to parse a prefetch operand.
3095template <bool IsSVEPrefetch>
3096ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3097 SMLoc S = getLoc();
3098 const AsmToken &Tok = getTok();
3099
3100 auto LookupByName = [](StringRef N) {
3101 if (IsSVEPrefetch) {
3102 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3103 return std::optional<unsigned>(Res->Encoding);
3104 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3105 return std::optional<unsigned>(Res->Encoding);
3106 return std::optional<unsigned>();
3107 };
3108
3109 auto LookupByEncoding = [](unsigned E) {
3110 if (IsSVEPrefetch) {
3111 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3112 return std::optional<StringRef>(Res->Name);
3113 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3114 return std::optional<StringRef>(Res->Name);
3115 return std::optional<StringRef>();
3116 };
3117 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3118
3119 // Either an identifier for named values or a 5-bit immediate.
3120 // Eat optional hash.
3121 if (parseOptionalToken(AsmToken::Hash) ||
3122 Tok.is(AsmToken::Integer)) {
3123 const MCExpr *ImmVal;
3124 if (getParser().parseExpression(ImmVal))
3125 return ParseStatus::Failure;
3126
3127 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3128 if (!MCE)
3129 return TokError("immediate value expected for prefetch operand");
3130 unsigned prfop = MCE->getValue();
3131 if (prfop > MaxVal)
3132 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3133 "] expected");
3134
3135 auto PRFM = LookupByEncoding(MCE->getValue());
3136 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3137 S, getContext()));
3138 return ParseStatus::Success;
3139 }
3140
3141 if (Tok.isNot(AsmToken::Identifier))
3142 return TokError("prefetch hint expected");
3143
3144 auto PRFM = LookupByName(Tok.getString());
3145 if (!PRFM)
3146 return TokError("prefetch hint expected");
3147
3148 Operands.push_back(AArch64Operand::CreatePrefetch(
3149 *PRFM, Tok.getString(), S, getContext()));
3150 Lex(); // Eat identifier token.
3151 return ParseStatus::Success;
3152}
3153
3154/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3155ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3156 SMLoc S = getLoc();
3157 const AsmToken &Tok = getTok();
3158 if (Tok.isNot(AsmToken::Identifier))
3159 return TokError("invalid operand for instruction");
3160
3161 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3162 if (!PSB)
3163 return TokError("invalid operand for instruction");
3164
3165 Operands.push_back(AArch64Operand::CreatePSBHint(
3166 PSB->Encoding, Tok.getString(), S, getContext()));
3167 Lex(); // Eat identifier token.
3168 return ParseStatus::Success;
3169}
3170
3171ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3172 SMLoc StartLoc = getLoc();
3173
3174 MCRegister RegNum;
3175
3176 // The case where xzr, xzr is not present is handled by an InstAlias.
3177
3178 auto RegTok = getTok(); // in case we need to backtrack
3179 if (!tryParseScalarRegister(RegNum).isSuccess())
3180 return ParseStatus::NoMatch;
3181
3182 if (RegNum != AArch64::XZR) {
3183 getLexer().UnLex(RegTok);
3184 return ParseStatus::NoMatch;
3185 }
3186
3187 if (parseComma())
3188 return ParseStatus::Failure;
3189
3190 if (!tryParseScalarRegister(RegNum).isSuccess())
3191 return TokError("expected register operand");
3192
3193 if (RegNum != AArch64::XZR)
3194 return TokError("xzr must be followed by xzr");
3195
3196 // We need to push something, since we claim this is an operand in .td.
3197 // See also AArch64AsmParser::parseKeywordOperand.
3198 Operands.push_back(AArch64Operand::CreateReg(
3199 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3200
3201 return ParseStatus::Success;
3202}
3203
3204/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3205ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3206 SMLoc S = getLoc();
3207 const AsmToken &Tok = getTok();
3208 if (Tok.isNot(AsmToken::Identifier))
3209 return TokError("invalid operand for instruction");
3210
3211 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3212 if (!BTI)
3213 return TokError("invalid operand for instruction");
3214
3215 Operands.push_back(AArch64Operand::CreateBTIHint(
3216 BTI->Encoding, Tok.getString(), S, getContext()));
3217 Lex(); // Eat identifier token.
3218 return ParseStatus::Success;
3219}
3220
3221/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3222/// instruction.
3223ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3224 SMLoc S = getLoc();
3225 const MCExpr *Expr = nullptr;
3226
3227 if (getTok().is(AsmToken::Hash)) {
3228 Lex(); // Eat hash token.
3229 }
3230
3231 if (parseSymbolicImmVal(Expr))
3232 return ParseStatus::Failure;
3233
3234 AArch64MCExpr::VariantKind ELFRefKind;
3235 MCSymbolRefExpr::VariantKind DarwinRefKind;
3236 int64_t Addend;
3237 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3238 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3239 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3240 // No modifier was specified at all; this is the syntax for an ELF basic
3241 // ADRP relocation (unfortunately).
3242 Expr =
3244 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3245 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3246 Addend != 0) {
3247 return Error(S, "gotpage label reference not allowed an addend");
3248 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3249 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3250 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3251 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3252 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3253 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3254 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3255 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3256 // The operand must be an @page or @gotpage qualified symbolref.
3257 return Error(S, "page or gotpage label reference expected");
3258 }
3259 }
3260
3261 // We have either a label reference possibly with addend or an immediate. The
3262 // addend is a raw value here. The linker will adjust it to only reference the
3263 // page.
3264 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3265 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3266
3267 return ParseStatus::Success;
3268}
3269
3270/// tryParseAdrLabel - Parse and validate a source label for the ADR
3271/// instruction.
3272ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3273 SMLoc S = getLoc();
3274 const MCExpr *Expr = nullptr;
3275
3276 // Leave anything with a bracket to the default for SVE
3277 if (getTok().is(AsmToken::LBrac))
3278 return ParseStatus::NoMatch;
3279
3280 if (getTok().is(AsmToken::Hash))
3281 Lex(); // Eat hash token.
3282
3283 if (parseSymbolicImmVal(Expr))
3284 return ParseStatus::Failure;
3285
3286 AArch64MCExpr::VariantKind ELFRefKind;
3287 MCSymbolRefExpr::VariantKind DarwinRefKind;
3288 int64_t Addend;
3289 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3290 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3291 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3292 // No modifier was specified at all; this is the syntax for an ELF basic
3293 // ADR relocation (unfortunately).
3294 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3295 } else {
3296 return Error(S, "unexpected adr label");
3297 }
3298 }
3299
3300 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3301 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3302 return ParseStatus::Success;
3303}
3304
3305/// tryParseFPImm - A floating point immediate expression operand.
3306template <bool AddFPZeroAsLiteral>
3307ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3308 SMLoc S = getLoc();
3309
3310 bool Hash = parseOptionalToken(AsmToken::Hash);
3311
3312 // Handle negation, as that still comes through as a separate token.
3313 bool isNegative = parseOptionalToken(AsmToken::Minus);
3314
3315 const AsmToken &Tok = getTok();
3316 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3317 if (!Hash)
3318 return ParseStatus::NoMatch;
3319 return TokError("invalid floating point immediate");
3320 }
3321
3322 // Parse hexadecimal representation.
3323 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3324 if (Tok.getIntVal() > 255 || isNegative)
3325 return TokError("encoded floating point value out of range");
3326
3328 Operands.push_back(
3329 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3330 } else {
3331 // Parse FP representation.
3332 APFloat RealVal(APFloat::IEEEdouble());
3333 auto StatusOrErr =
3334 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3335 if (errorToBool(StatusOrErr.takeError()))
3336 return TokError("invalid floating point representation");
3337
3338 if (isNegative)
3339 RealVal.changeSign();
3340
3341 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3342 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3343 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3344 } else
3345 Operands.push_back(AArch64Operand::CreateFPImm(
3346 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3347 }
3348
3349 Lex(); // Eat the token.
3350
3351 return ParseStatus::Success;
3352}
3353
3354/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3355/// a shift suffix, for example '#1, lsl #12'.
3357AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3358 SMLoc S = getLoc();
3359
3360 if (getTok().is(AsmToken::Hash))
3361 Lex(); // Eat '#'
3362 else if (getTok().isNot(AsmToken::Integer))
3363 // Operand should start from # or should be integer, emit error otherwise.
3364 return ParseStatus::NoMatch;
3365
3366 if (getTok().is(AsmToken::Integer) &&
3367 getLexer().peekTok().is(AsmToken::Colon))
3368 return tryParseImmRange(Operands);
3369
3370 const MCExpr *Imm = nullptr;
3371 if (parseSymbolicImmVal(Imm))
3372 return ParseStatus::Failure;
3373 else if (getTok().isNot(AsmToken::Comma)) {
3374 Operands.push_back(
3375 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3376 return ParseStatus::Success;
3377 }
3378
3379 // Eat ','
3380 Lex();
3381 StringRef VecGroup;
3382 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3383 Operands.push_back(
3384 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3385 Operands.push_back(
3386 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3387 return ParseStatus::Success;
3388 }
3389
3390 // The optional operand must be "lsl #N" where N is non-negative.
3391 if (!getTok().is(AsmToken::Identifier) ||
3392 !getTok().getIdentifier().equals_insensitive("lsl"))
3393 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3394
3395 // Eat 'lsl'
3396 Lex();
3397
3398 parseOptionalToken(AsmToken::Hash);
3399
3400 if (getTok().isNot(AsmToken::Integer))
3401 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3402
3403 int64_t ShiftAmount = getTok().getIntVal();
3404
3405 if (ShiftAmount < 0)
3406 return Error(getLoc(), "positive shift amount required");
3407 Lex(); // Eat the number
3408
3409 // Just in case the optional lsl #0 is used for immediates other than zero.
3410 if (ShiftAmount == 0 && Imm != nullptr) {
3411 Operands.push_back(
3412 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3413 return ParseStatus::Success;
3414 }
3415
3416 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3417 getLoc(), getContext()));
3418 return ParseStatus::Success;
3419}
3420
3421/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3422/// suggestion to help common typos.
3424AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3426 .Case("eq", AArch64CC::EQ)
3427 .Case("ne", AArch64CC::NE)
3428 .Case("cs", AArch64CC::HS)
3429 .Case("hs", AArch64CC::HS)
3430 .Case("cc", AArch64CC::LO)
3431 .Case("lo", AArch64CC::LO)
3432 .Case("mi", AArch64CC::MI)
3433 .Case("pl", AArch64CC::PL)
3434 .Case("vs", AArch64CC::VS)
3435 .Case("vc", AArch64CC::VC)
3436 .Case("hi", AArch64CC::HI)
3437 .Case("ls", AArch64CC::LS)
3438 .Case("ge", AArch64CC::GE)
3439 .Case("lt", AArch64CC::LT)
3440 .Case("gt", AArch64CC::GT)
3441 .Case("le", AArch64CC::LE)
3442 .Case("al", AArch64CC::AL)
3443 .Case("nv", AArch64CC::NV)
3445
3446 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3448 .Case("none", AArch64CC::EQ)
3449 .Case("any", AArch64CC::NE)
3450 .Case("nlast", AArch64CC::HS)
3451 .Case("last", AArch64CC::LO)
3452 .Case("first", AArch64CC::MI)
3453 .Case("nfrst", AArch64CC::PL)
3454 .Case("pmore", AArch64CC::HI)
3455 .Case("plast", AArch64CC::LS)
3456 .Case("tcont", AArch64CC::GE)
3457 .Case("tstop", AArch64CC::LT)
3459
3460 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3461 Suggestion = "nfrst";
3462 }
3463 return CC;
3464}
3465
3466/// parseCondCode - Parse a Condition Code operand.
3467bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3468 bool invertCondCode) {
3469 SMLoc S = getLoc();
3470 const AsmToken &Tok = getTok();
3471 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3472
3473 StringRef Cond = Tok.getString();
3474 std::string Suggestion;
3475 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3476 if (CC == AArch64CC::Invalid) {
3477 std::string Msg = "invalid condition code";
3478 if (!Suggestion.empty())
3479 Msg += ", did you mean " + Suggestion + "?";
3480 return TokError(Msg);
3481 }
3482 Lex(); // Eat identifier token.
3483
3484 if (invertCondCode) {
3485 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3486 return TokError("condition codes AL and NV are invalid for this instruction");
3488 }
3489
3490 Operands.push_back(
3491 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3492 return false;
3493}
3494
3495ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3496 const AsmToken &Tok = getTok();
3497 SMLoc S = getLoc();
3498
3499 if (Tok.isNot(AsmToken::Identifier))
3500 return TokError("invalid operand for instruction");
3501
3502 unsigned PStateImm = -1;
3503 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3504 if (!SVCR)
3505 return ParseStatus::NoMatch;
3506 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3507 PStateImm = SVCR->Encoding;
3508
3509 Operands.push_back(
3510 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3511 Lex(); // Eat identifier token.
3512 return ParseStatus::Success;
3513}
3514
3515ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3516 const AsmToken &Tok = getTok();
3517 SMLoc S = getLoc();
3518
3519 StringRef Name = Tok.getString();
3520
3521 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3522 Lex(); // eat "za[.(b|h|s|d)]"
3523 unsigned ElementWidth = 0;
3524 auto DotPosition = Name.find('.');
3525 if (DotPosition != StringRef::npos) {
3526 const auto &KindRes =
3527 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3528 if (!KindRes)
3529 return TokError(
3530 "Expected the register to be followed by element width suffix");
3531 ElementWidth = KindRes->second;
3532 }
3533 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3534 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3535 getContext()));
3536 if (getLexer().is(AsmToken::LBrac)) {
3537 // There's no comma after matrix operand, so we can parse the next operand
3538 // immediately.
3539 if (parseOperand(Operands, false, false))
3540 return ParseStatus::NoMatch;
3541 }
3542 return ParseStatus::Success;
3543 }
3544
3545 // Try to parse matrix register.
3546 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3547 if (!Reg)
3548 return ParseStatus::NoMatch;
3549
3550 size_t DotPosition = Name.find('.');
3551 assert(DotPosition != StringRef::npos && "Unexpected register");
3552
3553 StringRef Head = Name.take_front(DotPosition);
3554 StringRef Tail = Name.drop_front(DotPosition);
3555 StringRef RowOrColumn = Head.take_back();
3556
3557 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3558 .Case("h", MatrixKind::Row)
3559 .Case("v", MatrixKind::Col)
3560 .Default(MatrixKind::Tile);
3561
3562 // Next up, parsing the suffix
3563 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3564 if (!KindRes)
3565 return TokError(
3566 "Expected the register to be followed by element width suffix");
3567 unsigned ElementWidth = KindRes->second;
3568
3569 Lex();
3570
3571 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3572 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3573
3574 if (getLexer().is(AsmToken::LBrac)) {
3575 // There's no comma after matrix operand, so we can parse the next operand
3576 // immediately.
3577 if (parseOperand(Operands, false, false))
3578 return ParseStatus::NoMatch;
3579 }
3580 return ParseStatus::Success;
3581}
3582
3583/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3584/// them if present.
3586AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3587 const AsmToken &Tok = getTok();
3588 std::string LowerID = Tok.getString().lower();
3591 .Case("lsl", AArch64_AM::LSL)
3592 .Case("lsr", AArch64_AM::LSR)
3593 .Case("asr", AArch64_AM::ASR)
3594 .Case("ror", AArch64_AM::ROR)
3595 .Case("msl", AArch64_AM::MSL)
3596 .Case("uxtb", AArch64_AM::UXTB)
3597 .Case("uxth", AArch64_AM::UXTH)
3598 .Case("uxtw", AArch64_AM::UXTW)
3599 .Case("uxtx", AArch64_AM::UXTX)
3600 .Case("sxtb", AArch64_AM::SXTB)
3601 .Case("sxth", AArch64_AM::SXTH)
3602 .Case("sxtw", AArch64_AM::SXTW)
3603 .Case("sxtx", AArch64_AM::SXTX)
3605
3607 return ParseStatus::NoMatch;
3608
3609 SMLoc S = Tok.getLoc();
3610 Lex();
3611
3612 bool Hash = parseOptionalToken(AsmToken::Hash);
3613
3614 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3615 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3616 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3617 ShOp == AArch64_AM::MSL) {
3618 // We expect a number here.
3619 return TokError("expected #imm after shift specifier");
3620 }
3621
3622 // "extend" type operations don't need an immediate, #0 is implicit.
3623 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3624 Operands.push_back(
3625 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3626 return ParseStatus::Success;
3627 }
3628
3629 // Make sure we do actually have a number, identifier or a parenthesized
3630 // expression.
3631 SMLoc E = getLoc();
3632 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3633 !getTok().is(AsmToken::Identifier))
3634 return Error(E, "expected integer shift amount");
3635
3636 const MCExpr *ImmVal;
3637 if (getParser().parseExpression(ImmVal))
3638 return ParseStatus::Failure;
3639
3640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3641 if (!MCE)
3642 return Error(E, "expected constant '#imm' after shift specifier");
3643
3644 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3645 Operands.push_back(AArch64Operand::CreateShiftExtend(
3646 ShOp, MCE->getValue(), true, S, E, getContext()));
3647 return ParseStatus::Success;
3648}
3649
3650static const struct Extension {
3651 const char *Name;
3653} ExtensionMap[] = {
3654 {"crc", {AArch64::FeatureCRC}},
3655 {"sm4", {AArch64::FeatureSM4}},
3656 {"sha3", {AArch64::FeatureSHA3}},
3657 {"sha2", {AArch64::FeatureSHA2}},
3658 {"aes", {AArch64::FeatureAES}},
3659 {"crypto", {AArch64::FeatureCrypto}},
3660 {"fp", {AArch64::FeatureFPARMv8}},
3661 {"simd", {AArch64::FeatureNEON}},
3662 {"ras", {AArch64::FeatureRAS}},
3663 {"rasv2", {AArch64::FeatureRASv2}},
3664 {"lse", {AArch64::FeatureLSE}},
3665 {"predres", {AArch64::FeaturePredRes}},
3666 {"predres2", {AArch64::FeatureSPECRES2}},
3667 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3668 {"mte", {AArch64::FeatureMTE}},
3669 {"memtag", {AArch64::FeatureMTE}},
3670 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3671 {"pan", {AArch64::FeaturePAN}},
3672 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3673 {"ccpp", {AArch64::FeatureCCPP}},
3674 {"rcpc", {AArch64::FeatureRCPC}},
3675 {"rng", {AArch64::FeatureRandGen}},
3676 {"sve", {AArch64::FeatureSVE}},
3677 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3678 {"sve2", {AArch64::FeatureSVE2}},
3679 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3680 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3681 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3682 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3683 {"sve2p1", {AArch64::FeatureSVE2p1}},
3684 {"ls64", {AArch64::FeatureLS64}},
3685 {"xs", {AArch64::FeatureXS}},
3686 {"pauth", {AArch64::FeaturePAuth}},
3687 {"flagm", {AArch64::FeatureFlagM}},
3688 {"rme", {AArch64::FeatureRME}},
3689 {"sme", {AArch64::FeatureSME}},
3690 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3691 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3692 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3693 {"sme2", {AArch64::FeatureSME2}},
3694 {"sme2p1", {AArch64::FeatureSME2p1}},
3695 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3696 {"hbc", {AArch64::FeatureHBC}},
3697 {"mops", {AArch64::FeatureMOPS}},
3698 {"mec", {AArch64::FeatureMEC}},
3699 {"the", {AArch64::FeatureTHE}},
3700 {"d128", {AArch64::FeatureD128}},
3701 {"lse128", {AArch64::FeatureLSE128}},
3702 {"ite", {AArch64::FeatureITE}},
3703 {"cssc", {AArch64::FeatureCSSC}},
3704 {"rcpc3", {AArch64::FeatureRCPC3}},
3705 {"gcs", {AArch64::FeatureGCS}},
3706 {"bf16", {AArch64::FeatureBF16}},
3707 {"compnum", {AArch64::FeatureComplxNum}},
3708 {"dotprod", {AArch64::FeatureDotProd}},
3709 {"f32mm", {AArch64::FeatureMatMulFP32}},
3710 {"f64mm", {AArch64::FeatureMatMulFP64}},
3711 {"fp16", {AArch64::FeatureFullFP16}},
3712 {"fp16fml", {AArch64::FeatureFP16FML}},
3713 {"i8mm", {AArch64::FeatureMatMulInt8}},
3714 {"lor", {AArch64::FeatureLOR}},
3715 {"profile", {AArch64::FeatureSPE}},
3716 // "rdma" is the name documented by binutils for the feature, but
3717 // binutils also accepts incomplete prefixes of features, so "rdm"
3718 // works too. Support both spellings here.
3719 {"rdm", {AArch64::FeatureRDM}},
3720 {"rdma", {AArch64::FeatureRDM}},
3721 {"sb", {AArch64::FeatureSB}},
3722 {"ssbs", {AArch64::FeatureSSBS}},
3723 {"tme", {AArch64::FeatureTME}},
3724 {"fp8", {AArch64::FeatureFP8}},
3725 {"faminmax", {AArch64::FeatureFAMINMAX}},
3726 {"fp8fma", {AArch64::FeatureFP8FMA}},
3727 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3728 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3729 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3730 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3731 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3732 {"lut", {AArch64::FeatureLUT}},
3733 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3734 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3735 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3736 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3737 {"cpa", {AArch64::FeatureCPA}},
3738 {"tlbiw", {AArch64::FeatureTLBIW}},
3740
3741static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3742 if (FBS[AArch64::HasV8_0aOps])
3743 Str += "ARMv8a";
3744 if (FBS[AArch64::HasV8_1aOps])
3745 Str += "ARMv8.1a";
3746 else if (FBS[AArch64::HasV8_2aOps])
3747 Str += "ARMv8.2a";
3748 else if (FBS[AArch64::HasV8_3aOps])
3749 Str += "ARMv8.3a";
3750 else if (FBS[AArch64::HasV8_4aOps])
3751 Str += "ARMv8.4a";
3752 else if (FBS[AArch64::HasV8_5aOps])
3753 Str += "ARMv8.5a";
3754 else if (FBS[AArch64::HasV8_6aOps])
3755 Str += "ARMv8.6a";
3756 else if (FBS[AArch64::HasV8_7aOps])
3757 Str += "ARMv8.7a";
3758 else if (FBS[AArch64::HasV8_8aOps])
3759 Str += "ARMv8.8a";
3760 else if (FBS[AArch64::HasV8_9aOps])
3761 Str += "ARMv8.9a";
3762 else if (FBS[AArch64::HasV9_0aOps])
3763 Str += "ARMv9-a";
3764 else if (FBS[AArch64::HasV9_1aOps])
3765 Str += "ARMv9.1a";
3766 else if (FBS[AArch64::HasV9_2aOps])
3767 Str += "ARMv9.2a";
3768 else if (FBS[AArch64::HasV9_3aOps])
3769 Str += "ARMv9.3a";
3770 else if (FBS[AArch64::HasV9_4aOps])
3771 Str += "ARMv9.4a";
3772 else if (FBS[AArch64::HasV9_5aOps])
3773 Str += "ARMv9.5a";
3774 else if (FBS[AArch64::HasV8_0rOps])
3775 Str += "ARMv8r";
3776 else {
3777 SmallVector<std::string, 2> ExtMatches;
3778 for (const auto& Ext : ExtensionMap) {
3779 // Use & in case multiple features are enabled
3780 if ((FBS & Ext.Features) != FeatureBitset())
3781 ExtMatches.push_back(Ext.Name);
3782 }
3783 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3784 }
3785}
3786
3787void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3788 SMLoc S) {
3789 const uint16_t Op2 = Encoding & 7;
3790 const uint16_t Cm = (Encoding & 0x78) >> 3;
3791 const uint16_t Cn = (Encoding & 0x780) >> 7;
3792 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3793
3794 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3795
3796 Operands.push_back(
3797 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3798 Operands.push_back(
3799 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3800 Operands.push_back(
3801 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3802 Expr = MCConstantExpr::create(Op2, getContext());
3803 Operands.push_back(
3804 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3805}
3806
3807/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3808/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3809bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3811 if (Name.contains('.'))
3812 return TokError("invalid operand");
3813
3814 Mnemonic = Name;
3815 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3816
3817 const AsmToken &Tok = getTok();
3818 StringRef Op = Tok.getString();
3819 SMLoc S = Tok.getLoc();
3820
3821 if (Mnemonic == "ic") {
3822 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3823 if (!IC)
3824 return TokError("invalid operand for IC instruction");
3825 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3826 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3828 return TokError(Str);
3829 }
3830 createSysAlias(IC->Encoding, Operands, S);
3831 } else if (Mnemonic == "dc") {
3832 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3833 if (!DC)
3834 return TokError("invalid operand for DC instruction");
3835 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3836 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3838 return TokError(Str);
3839 }
3840 createSysAlias(DC->Encoding, Operands, S);
3841 } else if (Mnemonic == "at") {
3842 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3843 if (!AT)
3844 return TokError("invalid operand for AT instruction");
3845 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3846 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3848 return TokError(Str);
3849 }
3850 createSysAlias(AT->Encoding, Operands, S);
3851 } else if (Mnemonic == "tlbi") {
3852 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3853 if (!TLBI)
3854 return TokError("invalid operand for TLBI instruction");
3855 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3856 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3858 return TokError(Str);
3859 }
3860 createSysAlias(TLBI->Encoding, Operands, S);
3861 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3862
3863 if (Op.lower() != "rctx")
3864 return TokError("invalid operand for prediction restriction instruction");
3865
3866 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3867 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3868 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3869
3870 if (Mnemonic == "cosp" && !hasSpecres2)
3871 return TokError("COSP requires: predres2");
3872 if (!hasPredres)
3873 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3874
3875 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3876 : Mnemonic == "dvp" ? 0b101
3877 : Mnemonic == "cosp" ? 0b110
3878 : Mnemonic == "cpp" ? 0b111
3879 : 0;
3880 assert(PRCTX_Op2 &&
3881 "Invalid mnemonic for prediction restriction instruction");
3882 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3883 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3884
3885 createSysAlias(Encoding, Operands, S);
3886 }
3887
3888 Lex(); // Eat operand.
3889
3890 bool ExpectRegister = !Op.contains_insensitive("all");
3891 bool HasRegister = false;
3892
3893 // Check for the optional register operand.
3894 if (parseOptionalToken(AsmToken::Comma)) {
3895 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3896 return TokError("expected register operand");
3897 HasRegister = true;
3898 }
3899
3900 if (ExpectRegister && !HasRegister)
3901 return TokError("specified " + Mnemonic + " op requires a register");
3902 else if (!ExpectRegister && HasRegister)
3903 return TokError("specified " + Mnemonic + " op does not use a register");
3904
3905 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3906 return true;
3907
3908 return false;
3909}
3910
3911/// parseSyspAlias - The TLBIP instructions are simple aliases for
3912/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3913bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3915 if (Name.contains('.'))
3916 return TokError("invalid operand");
3917
3918 Mnemonic = Name;
3919 Operands.push_back(
3920 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3921
3922 const AsmToken &Tok = getTok();
3923 StringRef Op = Tok.getString();
3924 SMLoc S = Tok.getLoc();
3925
3926 if (Mnemonic == "tlbip") {
3927 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3928 if (HasnXSQualifier) {
3929 Op = Op.drop_back(3);
3930 }
3931 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3932 if (!TLBIorig)
3933 return TokError("invalid operand for TLBIP instruction");
3934 const AArch64TLBI::TLBI TLBI(
3935 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3936 TLBIorig->NeedsReg,
3937 HasnXSQualifier
3938 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3939 : TLBIorig->FeaturesRequired);
3940 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3941 std::string Name =
3942 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3943 std::string Str("TLBIP " + Name + " requires: ");
3945 return TokError(Str);
3946 }
3947 createSysAlias(TLBI.Encoding, Operands, S);
3948 }
3949
3950 Lex(); // Eat operand.
3951
3952 if (parseComma())
3953 return true;
3954
3955 if (Tok.isNot(AsmToken::Identifier))
3956 return TokError("expected register identifier");
3957 auto Result = tryParseSyspXzrPair(Operands);
3958 if (Result.isNoMatch())
3959 Result = tryParseGPRSeqPair(Operands);
3960 if (!Result.isSuccess())
3961 return TokError("specified " + Mnemonic +
3962 " op requires a pair of registers");
3963
3964 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3965 return true;
3966
3967 return false;
3968}
3969
3970ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3971 MCAsmParser &Parser = getParser();
3972 const AsmToken &Tok = getTok();
3973
3974 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3975 return TokError("'csync' operand expected");
3976 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3977 // Immediate operand.
3978 const MCExpr *ImmVal;
3979 SMLoc ExprLoc = getLoc();
3980 AsmToken IntTok = Tok;
3981 if (getParser().parseExpression(ImmVal))
3982 return ParseStatus::Failure;
3983 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3984 if (!MCE)
3985 return Error(ExprLoc, "immediate value expected for barrier operand");
3986 int64_t Value = MCE->getValue();
3987 if (Mnemonic == "dsb" && Value > 15) {
3988 // This case is a no match here, but it might be matched by the nXS
3989 // variant. Deliberately not unlex the optional '#' as it is not necessary
3990 // to characterize an integer immediate.
3991 Parser.getLexer().UnLex(IntTok);
3992 return ParseStatus::NoMatch;
3993 }
3994 if (Value < 0 || Value > 15)
3995 return Error(ExprLoc, "barrier operand out of range");
3996 auto DB = AArch64DB::lookupDBByEncoding(Value);
3997 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3998 ExprLoc, getContext(),
3999 false /*hasnXSModifier*/));
4000 return ParseStatus::Success;
4001 }
4002
4003 if (Tok.isNot(AsmToken::Identifier))
4004 return TokError("invalid operand for instruction");
4005
4006 StringRef Operand = Tok.getString();
4007 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4008 auto DB = AArch64DB::lookupDBByName(Operand);
4009 // The only valid named option for ISB is 'sy'
4010 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4011 return TokError("'sy' or #imm operand expected");
4012 // The only valid named option for TSB is 'csync'
4013 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4014 return TokError("'csync' operand expected");
4015 if (!DB && !TSB) {
4016 if (Mnemonic == "dsb") {
4017 // This case is a no match here, but it might be matched by the nXS
4018 // variant.
4019 return ParseStatus::NoMatch;
4020 }
4021 return TokError("invalid barrier option name");
4022 }
4023
4024 Operands.push_back(AArch64Operand::CreateBarrier(
4025 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4026 getContext(), false /*hasnXSModifier*/));
4027 Lex(); // Consume the option
4028
4029 return ParseStatus::Success;
4030}
4031
4033AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4034 const AsmToken &Tok = getTok();
4035
4036 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4037 if (Mnemonic != "dsb")
4038 return ParseStatus::Failure;
4039
4040 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4041 // Immediate operand.
4042 const MCExpr *ImmVal;
4043 SMLoc ExprLoc = getLoc();
4044 if (getParser().parseExpression(ImmVal))
4045 return ParseStatus::Failure;
4046 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4047 if (!MCE)
4048 return Error(ExprLoc, "immediate value expected for barrier operand");
4049 int64_t Value = MCE->getValue();
4050 // v8.7-A DSB in the nXS variant accepts only the following immediate
4051 // values: 16, 20, 24, 28.
4052 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4053 return Error(ExprLoc, "barrier operand out of range");
4054 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4055 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4056 ExprLoc, getContext(),
4057 true /*hasnXSModifier*/));
4058 return ParseStatus::Success;
4059 }
4060
4061 if (Tok.isNot(AsmToken::Identifier))
4062 return TokError("invalid operand for instruction");
4063
4064 StringRef Operand = Tok.getString();
4065 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4066
4067 if (!DB)
4068 return TokError("invalid barrier option name");
4069
4070 Operands.push_back(
4071 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4072 getContext(), true /*hasnXSModifier*/));
4073 Lex(); // Consume the option
4074
4075 return ParseStatus::Success;
4076}
4077
4078ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4079 const AsmToken &Tok = getTok();
4080
4081 if (Tok.isNot(AsmToken::Identifier))
4082 return ParseStatus::NoMatch;
4083
4084 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4085 return ParseStatus::NoMatch;
4086
4087 int MRSReg, MSRReg;
4088 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4089 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4090 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4091 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4092 } else
4093 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4094
4095 unsigned PStateImm = -1;
4096 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4097 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4098 PStateImm = PState15->Encoding;
4099 if (!PState15) {
4100 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4101 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4102 PStateImm = PState1->Encoding;
4103 }
4104
4105 Operands.push_back(
4106 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4107 PStateImm, getContext()));
4108 Lex(); // Eat identifier
4109
4110 return ParseStatus::Success;
4111}
4112
4113/// tryParseNeonVectorRegister - Parse a vector register operand.
4114bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4115 if (getTok().isNot(AsmToken::Identifier))
4116 return true;
4117
4118 SMLoc S = getLoc();
4119 // Check for a vector register specifier first.
4122 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4123 if (!Res.isSuccess())
4124 return true;
4125
4126 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4127 if (!KindRes)
4128 return true;
4129
4130 unsigned ElementWidth = KindRes->second;
4131 Operands.push_back(
4132 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4133 S, getLoc(), getContext()));
4134
4135 // If there was an explicit qualifier, that goes on as a literal text
4136 // operand.
4137 if (!Kind.empty())
4138 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4139
4140 return tryParseVectorIndex(Operands).isFailure();
4141}
4142
4143ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4144 SMLoc SIdx = getLoc();
4145 if (parseOptionalToken(AsmToken::LBrac)) {
4146 const MCExpr *ImmVal;
4147 if (getParser().parseExpression(ImmVal))
4148 return ParseStatus::NoMatch;
4149 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4150 if (!MCE)
4151 return TokError("immediate value expected for vector index");
4152
4153 SMLoc E = getLoc();
4154
4155 if (parseToken(AsmToken::RBrac, "']' expected"))
4156 return ParseStatus::Failure;
4157
4158 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4159 E, getContext()));
4160 return ParseStatus::Success;
4161 }
4162
4163 return ParseStatus::NoMatch;
4164}
4165
4166// tryParseVectorRegister - Try to parse a vector register name with
4167// optional kind specifier. If it is a register specifier, eat the token
4168// and return it.
4169ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4170 StringRef &Kind,
4171 RegKind MatchKind) {
4172 const AsmToken &Tok = getTok();
4173
4174 if (Tok.isNot(AsmToken::Identifier))
4175 return ParseStatus::NoMatch;
4176
4177 StringRef Name = Tok.getString();
4178 // If there is a kind specifier, it's separated from the register name by
4179 // a '.'.
4180 size_t Start = 0, Next = Name.find('.');
4181 StringRef Head = Name.slice(Start, Next);
4182 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4183
4184 if (RegNum) {
4185 if (Next != StringRef::npos) {
4186 Kind = Name.slice(Next, StringRef::npos);
4187 if (!isValidVectorKind(Kind, MatchKind))
4188 return TokError("invalid vector kind qualifier");
4189 }
4190 Lex(); // Eat the register token.
4191
4192 Reg = RegNum;
4193 return ParseStatus::Success;
4194 }
4195
4196 return ParseStatus::NoMatch;
4197}
4198
4199ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4202 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4203 if (!Status.isSuccess())
4204 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4205 return Status;
4206}
4207
4208/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4209template <RegKind RK>
4211AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4212 // Check for a SVE predicate register specifier first.
4213 const SMLoc S = getLoc();
4215 MCRegister RegNum;
4216 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4217 if (!Res.isSuccess())
4218 return Res;
4219
4220 const auto &KindRes = parseVectorKind(Kind, RK);
4221 if (!KindRes)
4222 return ParseStatus::NoMatch;
4223
4224 unsigned ElementWidth = KindRes->second;
4225 Operands.push_back(AArch64Operand::CreateVectorReg(
4226 RegNum, RK, ElementWidth, S,
4227 getLoc(), getContext()));
4228
4229 if (getLexer().is(AsmToken::LBrac)) {
4230 if (RK == RegKind::SVEPredicateAsCounter) {
4231 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4232 if (ResIndex.isSuccess())
4233 return ParseStatus::Success;
4234 } else {
4235 // Indexed predicate, there's no comma so try parse the next operand
4236 // immediately.
4237 if (parseOperand(Operands, false, false))
4238 return ParseStatus::NoMatch;
4239 }
4240 }
4241
4242 // Not all predicates are followed by a '/m' or '/z'.
4243 if (getTok().isNot(AsmToken::Slash))
4244 return ParseStatus::Success;
4245
4246 // But when they do they shouldn't have an element type suffix.
4247 if (!Kind.empty())
4248 return Error(S, "not expecting size suffix");
4249
4250 // Add a literal slash as operand
4251 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4252
4253 Lex(); // Eat the slash.
4254
4255 // Zeroing or merging?
4256 auto Pred = getTok().getString().lower();
4257 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4258 return Error(getLoc(), "expecting 'z' predication");
4259
4260 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4261 return Error(getLoc(), "expecting 'm' or 'z' predication");
4262
4263 // Add zero/merge token.
4264 const char *ZM = Pred == "z" ? "z" : "m";
4265 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4266
4267 Lex(); // Eat zero/merge token.
4268 return ParseStatus::Success;
4269}
4270
4271/// parseRegister - Parse a register operand.
4272bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4273 // Try for a Neon vector register.
4274 if (!tryParseNeonVectorRegister(Operands))
4275 return false;
4276
4277 if (tryParseZTOperand(Operands).isSuccess())
4278 return false;
4279
4280 // Otherwise try for a scalar register.
4281 if (tryParseGPROperand<false>(Operands).isSuccess())
4282 return false;
4283
4284 return true;
4285}
4286
4287bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4288 bool HasELFModifier = false;
4290
4291 if (parseOptionalToken(AsmToken::Colon)) {
4292 HasELFModifier = true;
4293
4294 if (getTok().isNot(AsmToken::Identifier))
4295 return TokError("expect relocation specifier in operand after ':'");
4296
4297 std::string LowerCase = getTok().getIdentifier().lower();
4298 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4300 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4301 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4302 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4303 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4304 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4305 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4306 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4307 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4308 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4309 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4310 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4311 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4312 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4313 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4314 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4315 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4316 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4317 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4318 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4319 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4320 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4321 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4322 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4323 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4324 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4325 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4326 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4327 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4328 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4329 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4330 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4331 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4332 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4333 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4334 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4336 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4337 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4339 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4340 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4341 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4343 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4344 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4346
4347 if (RefKind == AArch64MCExpr::VK_INVALID)
4348 return TokError("expect relocation specifier in operand after ':'");
4349
4350 Lex(); // Eat identifier
4351
4352 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4353 return true;
4354 }
4355
4356 if (getParser().parseExpression(ImmVal))
4357 return true;
4358
4359 if (HasELFModifier)
4360 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4361
4362 return false;
4363}
4364
4365ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4366 if (getTok().isNot(AsmToken::LCurly))
4367 return ParseStatus::NoMatch;
4368
4369 auto ParseMatrixTile = [this](unsigned &Reg,
4370 unsigned &ElementWidth) -> ParseStatus {
4371 StringRef Name = getTok().getString();
4372 size_t DotPosition = Name.find('.');
4373 if (DotPosition == StringRef::npos)
4374 return ParseStatus::NoMatch;
4375
4376 unsigned RegNum = matchMatrixTileListRegName(Name);
4377 if (!RegNum)
4378 return ParseStatus::NoMatch;
4379
4380 StringRef Tail = Name.drop_front(DotPosition);
4381 const std::optional<std::pair<int, int>> &KindRes =
4382 parseVectorKind(Tail, RegKind::Matrix);
4383 if (!KindRes)
4384 return TokError(
4385 "Expected the register to be followed by element width suffix");
4386 ElementWidth = KindRes->second;
4387 Reg = RegNum;
4388 Lex(); // Eat the register.
4389 return ParseStatus::Success;
4390 };
4391
4392 SMLoc S = getLoc();
4393 auto LCurly = getTok();
4394 Lex(); // Eat left bracket token.
4395
4396 // Empty matrix list
4397 if (parseOptionalToken(AsmToken::RCurly)) {
4398 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4399 /*RegMask=*/0, S, getLoc(), getContext()));
4400 return ParseStatus::Success;
4401 }
4402
4403 // Try parse {za} alias early
4404 if (getTok().getString().equals_insensitive("za")) {
4405 Lex(); // Eat 'za'
4406
4407 if (parseToken(AsmToken::RCurly, "'}' expected"))
4408 return ParseStatus::Failure;
4409
4410 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4411 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4412 return ParseStatus::Success;
4413 }
4414
4415 SMLoc TileLoc = getLoc();
4416
4417 unsigned FirstReg, ElementWidth;
4418 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4419 if (!ParseRes.isSuccess()) {
4420 getLexer().UnLex(LCurly);
4421 return ParseRes;
4422 }
4423
4424 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4425
4426 unsigned PrevReg = FirstReg;
4427
4429 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4430
4431 SmallSet<unsigned, 8> SeenRegs;
4432 SeenRegs.insert(FirstReg);
4433
4434 while (parseOptionalToken(AsmToken::Comma)) {
4435 TileLoc = getLoc();
4436 unsigned Reg, NextElementWidth;
4437 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4438 if (!ParseRes.isSuccess())
4439 return ParseRes;
4440
4441 // Element size must match on all regs in the list.
4442 if (ElementWidth != NextElementWidth)
4443 return Error(TileLoc, "mismatched register size suffix");
4444
4445 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4446 Warning(TileLoc, "tile list not in ascending order");
4447
4448 if (SeenRegs.contains(Reg))
4449 Warning(TileLoc, "duplicate tile in list");
4450 else {
4451 SeenRegs.insert(Reg);
4452 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4453 }
4454
4455 PrevReg = Reg;
4456 }
4457
4458 if (parseToken(AsmToken::RCurly, "'}' expected"))
4459 return ParseStatus::Failure;
4460
4461 unsigned RegMask = 0;
4462 for (auto Reg : DRegs)
4463 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4464 RI->getEncodingValue(AArch64::ZAD0));
4465 Operands.push_back(
4466 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4467
4468 return ParseStatus::Success;
4469}
4470
4471template <RegKind VectorKind>
4472ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4473 bool ExpectMatch) {
4474 MCAsmParser &Parser = getParser();
4475 if (!getTok().is(AsmToken::LCurly))
4476 return ParseStatus::NoMatch;
4477
4478 // Wrapper around parse function
4479 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4480 bool NoMatchIsError) -> ParseStatus {
4481 auto RegTok = getTok();
4482 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4483 if (ParseRes.isSuccess()) {
4484 if (parseVectorKind(Kind, VectorKind))
4485 return ParseRes;
4486 llvm_unreachable("Expected a valid vector kind");
4487 }
4488
4489 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4490 RegTok.getString().equals_insensitive("zt0"))
4491 return ParseStatus::NoMatch;
4492
4493 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4494 (ParseRes.isNoMatch() && NoMatchIsError &&
4495 !RegTok.getString().starts_with_insensitive("za")))
4496 return Error(Loc, "vector register expected");
4497
4498 return ParseStatus::NoMatch;
4499 };
4500
4501 int NumRegs = getNumRegsForRegKind(VectorKind);
4502 SMLoc S = getLoc();
4503 auto LCurly = getTok();
4504 Lex(); // Eat left bracket token.
4505
4507 MCRegister FirstReg;
4508 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4509
4510 // Put back the original left bracket if there was no match, so that
4511 // different types of list-operands can be matched (e.g. SVE, Neon).
4512 if (ParseRes.isNoMatch())
4513 Parser.getLexer().UnLex(LCurly);
4514
4515 if (!ParseRes.isSuccess())
4516 return ParseRes;
4517
4518 int64_t PrevReg = FirstReg;
4519 unsigned Count = 1;
4520
4521 int Stride = 1;
4522 if (parseOptionalToken(AsmToken::Minus)) {
4523 SMLoc Loc = getLoc();
4524 StringRef NextKind;
4525
4527 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4528 if (!ParseRes.isSuccess())
4529 return ParseRes;
4530
4531 // Any Kind suffices must match on all regs in the list.
4532 if (Kind != NextKind)
4533 return Error(Loc, "mismatched register size suffix");
4534
4535 unsigned Space =
4536 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4537
4538 if (Space == 0 || Space > 3)
4539 return Error(Loc, "invalid number of vectors");
4540
4541 Count += Space;
4542 }
4543 else {
4544 bool HasCalculatedStride = false;
4545 while (parseOptionalToken(AsmToken::Comma)) {
4546 SMLoc Loc = getLoc();
4547 StringRef NextKind;
4549 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4550 if (!ParseRes.isSuccess())
4551 return ParseRes;
4552
4553 // Any Kind suffices must match on all regs in the list.
4554 if (Kind != NextKind)
4555 return Error(Loc, "mismatched register size suffix");
4556
4557 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4558 unsigned PrevRegVal =
4559 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4560 if (!HasCalculatedStride) {
4561 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4562 : (RegVal + NumRegs - PrevRegVal);
4563 HasCalculatedStride = true;
4564 }
4565
4566 // Register must be incremental (with a wraparound at last register).
4567 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4568 return Error(Loc, "registers must have the same sequential stride");
4569
4570 PrevReg = Reg;
4571 ++Count;
4572 }
4573 }
4574
4575 if (parseToken(AsmToken::RCurly, "'}' expected"))
4576 return ParseStatus::Failure;
4577
4578 if (Count > 4)
4579 return Error(S, "invalid number of vectors");
4580
4581 unsigned NumElements = 0;
4582 unsigned ElementWidth = 0;
4583 if (!Kind.empty()) {
4584 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4585 std::tie(NumElements, ElementWidth) = *VK;
4586 }
4587
4588 Operands.push_back(AArch64Operand::CreateVectorList(
4589 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4590 getLoc(), getContext()));
4591
4592 return ParseStatus::Success;
4593}
4594
4595/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4596bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4597 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4598 if (!ParseRes.isSuccess())
4599 return true;
4600
4601 return tryParseVectorIndex(Operands).isFailure();
4602}
4603
4604ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4605 SMLoc StartLoc = getLoc();
4606
4607 MCRegister RegNum;
4608 ParseStatus Res = tryParseScalarRegister(RegNum);
4609 if (!Res.isSuccess())
4610 return Res;
4611
4612 if (!parseOptionalToken(AsmToken::Comma)) {
4613 Operands.push_back(AArch64Operand::CreateReg(
4614 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4615 return ParseStatus::Success;
4616 }
4617
4618 parseOptionalToken(AsmToken::Hash);
4619
4620 if (getTok().isNot(AsmToken::Integer))
4621 return Error(getLoc(), "index must be absent or #0");
4622
4623 const MCExpr *ImmVal;
4624 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4625 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4626 return Error(getLoc(), "index must be absent or #0");
4627
4628 Operands.push_back(AArch64Operand::CreateReg(
4629 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4630 return ParseStatus::Success;
4631}
4632
4633ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4634 SMLoc StartLoc = getLoc();
4635 const AsmToken &Tok = getTok();
4636 std::string Name = Tok.getString().lower();
4637
4638 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4639
4640 if (RegNum == 0)
4641 return ParseStatus::NoMatch;
4642
4643 Operands.push_back(AArch64Operand::CreateReg(
4644 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4645 Lex(); // Eat register.
4646
4647 // Check if register is followed by an index
4648 if (parseOptionalToken(AsmToken::LBrac)) {
4649 Operands.push_back(
4650 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4651 const MCExpr *ImmVal;
4652 if (getParser().parseExpression(ImmVal))
4653 return ParseStatus::NoMatch;
4654 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4655 if (!MCE)
4656 return TokError("immediate value expected for vector index");
4657 Operands.push_back(AArch64Operand::CreateImm(
4658 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4659 getLoc(), getContext()));
4660 if (parseOptionalToken(AsmToken::Comma))
4661 if (parseOptionalMulOperand(Operands))
4662 return ParseStatus::Failure;
4663 if (parseToken(AsmToken::RBrac, "']' expected"))
4664 return ParseStatus::Failure;
4665 Operands.push_back(
4666 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4667 }
4668 return ParseStatus::Success;
4669}
4670
4671template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4672ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4673 SMLoc StartLoc = getLoc();
4674
4675 MCRegister RegNum;
4676 ParseStatus Res = tryParseScalarRegister(RegNum);
4677 if (!Res.isSuccess())
4678 return Res;
4679
4680 // No shift/extend is the default.
4681 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4682 Operands.push_back(AArch64Operand::CreateReg(
4683 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4684 return ParseStatus::Success;
4685 }
4686
4687 // Eat the comma
4688 Lex();
4689
4690 // Match the shift
4692 Res = tryParseOptionalShiftExtend(ExtOpnd);
4693 if (!Res.isSuccess())
4694 return Res;
4695
4696 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4697 Operands.push_back(AArch64Operand::CreateReg(
4698 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4699 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4700 Ext->hasShiftExtendAmount()));
4701
4702 return ParseStatus::Success;
4703}
4704
4705bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4706 MCAsmParser &Parser = getParser();
4707
4708 // Some SVE instructions have a decoration after the immediate, i.e.
4709 // "mul vl". We parse them here and add tokens, which must be present in the
4710 // asm string in the tablegen instruction.
4711 bool NextIsVL =
4712 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4713 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4714 if (!getTok().getString().equals_insensitive("mul") ||
4715 !(NextIsVL || NextIsHash))
4716 return true;
4717
4718 Operands.push_back(
4719 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4720 Lex(); // Eat the "mul"
4721
4722 if (NextIsVL) {
4723 Operands.push_back(
4724 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4725 Lex(); // Eat the "vl"
4726 return false;
4727 }
4728
4729 if (NextIsHash) {
4730 Lex(); // Eat the #
4731 SMLoc S = getLoc();
4732
4733 // Parse immediate operand.
4734 const MCExpr *ImmVal;
4735 if (!Parser.parseExpression(ImmVal))
4736 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4737 Operands.push_back(AArch64Operand::CreateImm(
4738 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4739 getContext()));
4740 return false;
4741 }
4742 }
4743
4744 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4745}
4746
4747bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4748 StringRef &VecGroup) {
4749 MCAsmParser &Parser = getParser();
4750 auto Tok = Parser.getTok();
4751 if (Tok.isNot(AsmToken::Identifier))
4752 return true;
4753
4755 .Case("vgx2", "vgx2")
4756 .Case("vgx4", "vgx4")
4757 .Default("");
4758
4759 if (VG.empty())
4760 return true;
4761
4762 VecGroup = VG;
4763 Parser.Lex(); // Eat vgx[2|4]
4764 return false;
4765}
4766
4767bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4768 auto Tok = getTok();
4769 if (Tok.isNot(AsmToken::Identifier))
4770 return true;
4771
4772 auto Keyword = Tok.getString();
4774 .Case("sm", "sm")
4775 .Case("za", "za")
4776 .Default(Keyword);
4777 Operands.push_back(
4778 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4779
4780 Lex();
4781 return false;
4782}
4783
4784/// parseOperand - Parse a arm instruction operand. For now this parses the
4785/// operand regardless of the mnemonic.
4786bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4787 bool invertCondCode) {
4788 MCAsmParser &Parser = getParser();
4789
4790 ParseStatus ResTy =
4791 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4792
4793 // Check if the current operand has a custom associated parser, if so, try to
4794 // custom parse the operand, or fallback to the general approach.
4795 if (ResTy.isSuccess())
4796 return false;
4797 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4798 // there was a match, but an error occurred, in which case, just return that
4799 // the operand parsing failed.
4800 if (ResTy.isFailure())
4801 return true;
4802
4803 // Nothing custom, so do general case parsing.
4804 SMLoc S, E;
4805 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4806 if (parseOptionalToken(AsmToken::Comma)) {
4807 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4808 if (!Res.isNoMatch())
4809 return Res.isFailure();
4810 getLexer().UnLex(SavedTok);
4811 }
4812 return false;
4813 };
4814 switch (getLexer().getKind()) {
4815 default: {
4816 SMLoc S = getLoc();
4817 const MCExpr *Expr;
4818 if (parseSymbolicImmVal(Expr))
4819 return Error(S, "invalid operand");
4820
4821 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4822 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4823 return parseOptionalShiftExtend(getTok());
4824 }
4825 case AsmToken::LBrac: {
4826 Operands.push_back(
4827 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4828 Lex(); // Eat '['
4829
4830 // There's no comma after a '[', so we can parse the next operand
4831 // immediately.
4832 return parseOperand(Operands, false, false);
4833 }
4834 case AsmToken::LCurly: {
4835 if (!parseNeonVectorList(Operands))
4836 return false;
4837
4838 Operands.push_back(
4839 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4840 Lex(); // Eat '{'
4841
4842 // There's no comma after a '{', so we can parse the next operand
4843 // immediately.
4844 return parseOperand(Operands, false, false);
4845 }
4846 case AsmToken::Identifier: {
4847 // See if this is a "VG" decoration used by SME instructions.
4848 StringRef VecGroup;
4849 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4850 Operands.push_back(
4851 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4852 return false;
4853 }
4854 // If we're expecting a Condition Code operand, then just parse that.
4855 if (isCondCode)
4856 return parseCondCode(Operands, invertCondCode);
4857
4858 // If it's a register name, parse it.
4859 if (!parseRegister(Operands)) {
4860 // Parse an optional shift/extend modifier.
4861 AsmToken SavedTok = getTok();
4862 if (parseOptionalToken(AsmToken::Comma)) {
4863 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4864 // such cases and don't report an error when <label> happens to match a
4865 // shift/extend modifier.
4866 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4867 /*ParseForAllFeatures=*/true);
4868 if (!Res.isNoMatch())
4869 return Res.isFailure();
4870 Res = tryParseOptionalShiftExtend(Operands);
4871 if (!Res.isNoMatch())
4872 return Res.isFailure();
4873 getLexer().UnLex(SavedTok);
4874 }
4875 return false;
4876 }
4877
4878 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4879 // by SVE instructions.
4880 if (!parseOptionalMulOperand(Operands))
4881 return false;
4882
4883 // If this is a two-word mnemonic, parse its special keyword
4884 // operand as an identifier.
4885 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4886 Mnemonic == "gcsb")
4887 return parseKeywordOperand(Operands);
4888
4889 // This was not a register so parse other operands that start with an
4890 // identifier (like labels) as expressions and create them as immediates.
4891 const MCExpr *IdVal;
4892 S = getLoc();
4893 if (getParser().parseExpression(IdVal))
4894 return true;
4895 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4896 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4897 return false;
4898 }
4899 case AsmToken::Integer:
4900 case AsmToken::Real:
4901 case AsmToken::Hash: {
4902 // #42 -> immediate.
4903 S = getLoc();
4904
4905 parseOptionalToken(AsmToken::Hash);
4906
4907 // Parse a negative sign
4908 bool isNegative = false;
4909 if (getTok().is(AsmToken::Minus)) {
4910 isNegative = true;