LLVM 17.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseDirectiveArch(SMLoc L);
184 bool parseDirectiveArchExtension(SMLoc L);
185 bool parseDirectiveCPU(SMLoc L);
186 bool parseDirectiveInst(SMLoc L);
187
188 bool parseDirectiveTLSDescCall(SMLoc L);
189
190 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
191 bool parseDirectiveLtorg(SMLoc L);
192
193 bool parseDirectiveReq(StringRef Name, SMLoc L);
194 bool parseDirectiveUnreq(SMLoc L);
195 bool parseDirectiveCFINegateRAState();
196 bool parseDirectiveCFIBKeyFrame();
197 bool parseDirectiveCFIMTETaggedFrame();
198
199 bool parseDirectiveVariantPCS(SMLoc L);
200
201 bool parseDirectiveSEHAllocStack(SMLoc L);
202 bool parseDirectiveSEHPrologEnd(SMLoc L);
203 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
204 bool parseDirectiveSEHSaveFPLR(SMLoc L);
205 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
206 bool parseDirectiveSEHSaveReg(SMLoc L);
207 bool parseDirectiveSEHSaveRegX(SMLoc L);
208 bool parseDirectiveSEHSaveRegP(SMLoc L);
209 bool parseDirectiveSEHSaveRegPX(SMLoc L);
210 bool parseDirectiveSEHSaveLRPair(SMLoc L);
211 bool parseDirectiveSEHSaveFReg(SMLoc L);
212 bool parseDirectiveSEHSaveFRegX(SMLoc L);
213 bool parseDirectiveSEHSaveFRegP(SMLoc L);
214 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
215 bool parseDirectiveSEHSetFP(SMLoc L);
216 bool parseDirectiveSEHAddFP(SMLoc L);
217 bool parseDirectiveSEHNop(SMLoc L);
218 bool parseDirectiveSEHSaveNext(SMLoc L);
219 bool parseDirectiveSEHEpilogStart(SMLoc L);
220 bool parseDirectiveSEHEpilogEnd(SMLoc L);
221 bool parseDirectiveSEHTrapFrame(SMLoc L);
222 bool parseDirectiveSEHMachineFrame(SMLoc L);
223 bool parseDirectiveSEHContext(SMLoc L);
224 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
225 bool parseDirectiveSEHPACSignLR(SMLoc L);
226 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
227
228 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
230 unsigned getNumRegsForRegKind(RegKind K);
231 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
234 bool MatchingInlineAsm) override;
235/// @name Auto-generated Match Functions
236/// {
237
238#define GET_ASSEMBLER_HEADER
239#include "AArch64GenAsmMatcher.inc"
240
241 /// }
242
243 OperandMatchResultTy tryParseScalarRegister(MCRegister &Reg);
244 OperandMatchResultTy tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
245 RegKind MatchKind);
246 OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
248 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
249 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
250 OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
252 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
253 template <bool IsSVEPrefetch = false>
255 OperandMatchResultTy tryParseRPRFMOperand(OperandVector &Operands);
258 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
260 template<bool AddFPZeroAsLiteral>
262 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
263 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
264 bool tryParseNeonVectorRegister(OperandVector &Operands);
265 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
266 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
267 OperandMatchResultTy tryParseSyspXzrPair(OperandVector &Operands);
268 template <bool ParseShiftExtend,
269 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
270 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
271 OperandMatchResultTy tryParseZTOperand(OperandVector &Operands);
272 template <bool ParseShiftExtend, bool ParseSuffix>
273 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
274 template <RegKind RK>
275 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
276 template <RegKind VectorKind>
277 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
278 bool ExpectMatch = false);
279 OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
280 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
281 OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands);
284
285public:
286 enum AArch64MatchResultTy {
287 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
288#define GET_OPERAND_DIAGNOSTIC_TYPES
289#include "AArch64GenAsmMatcher.inc"
290 };
291 bool IsILP32;
292
293 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
294 const MCInstrInfo &MII, const MCTargetOptions &Options)
295 : MCTargetAsmParser(Options, STI, MII) {
299 if (S.getTargetStreamer() == nullptr)
301
302 // Alias .hword/.word/.[dx]word to the target-independent
303 // .2byte/.4byte/.8byte directives as they have the same form and
304 // semantics:
305 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
306 Parser.addAliasForDirective(".hword", ".2byte");
307 Parser.addAliasForDirective(".word", ".4byte");
308 Parser.addAliasForDirective(".dword", ".8byte");
309 Parser.addAliasForDirective(".xword", ".8byte");
310
311 // Initialize the set of available features.
312 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
313 }
314
315 bool areEqualRegs(const MCParsedAsmOperand &Op1,
316 const MCParsedAsmOperand &Op2) const override;
318 SMLoc NameLoc, OperandVector &Operands) override;
319 bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
320 SMLoc &EndLoc) override;
322 SMLoc &EndLoc) override;
323 bool ParseDirective(AsmToken DirectiveID) override;
325 unsigned Kind) override;
326
327 static bool classifySymbolRef(const MCExpr *Expr,
328 AArch64MCExpr::VariantKind &ELFRefKind,
329 MCSymbolRefExpr::VariantKind &DarwinRefKind,
330 int64_t &Addend);
331};
332
333/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
334/// instruction.
335class AArch64Operand : public MCParsedAsmOperand {
336private:
337 enum KindTy {
338 k_Immediate,
339 k_ShiftedImm,
340 k_ImmRange,
341 k_CondCode,
342 k_Register,
343 k_MatrixRegister,
344 k_MatrixTileList,
345 k_SVCR,
346 k_VectorList,
347 k_VectorIndex,
348 k_Token,
349 k_SysReg,
350 k_SysCR,
351 k_Prefetch,
352 k_ShiftExtend,
353 k_FPImm,
354 k_Barrier,
355 k_PSBHint,
356 k_BTIHint,
357 } Kind;
358
359 SMLoc StartLoc, EndLoc;
360
361 struct TokOp {
362 const char *Data;
363 unsigned Length;
364 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
365 };
366
367 // Separate shift/extend operand.
368 struct ShiftExtendOp {
370 unsigned Amount;
371 bool HasExplicitAmount;
372 };
373
374 struct RegOp {
375 unsigned RegNum;
376 RegKind Kind;
377 int ElementWidth;
378
379 // The register may be allowed as a different register class,
380 // e.g. for GPR64as32 or GPR32as64.
381 RegConstraintEqualityTy EqualityTy;
382
383 // In some cases the shift/extend needs to be explicitly parsed together
384 // with the register, rather than as a separate operand. This is needed
385 // for addressing modes where the instruction as a whole dictates the
386 // scaling/extend, rather than specific bits in the instruction.
387 // By parsing them as a single operand, we avoid the need to pass an
388 // extra operand in all CodeGen patterns (because all operands need to
389 // have an associated value), and we avoid the need to update TableGen to
390 // accept operands that have no associated bits in the instruction.
391 //
392 // An added benefit of parsing them together is that the assembler
393 // can give a sensible diagnostic if the scaling is not correct.
394 //
395 // The default is 'lsl #0' (HasExplicitAmount = false) if no
396 // ShiftExtend is specified.
397 ShiftExtendOp ShiftExtend;
398 };
399
400 struct MatrixRegOp {
401 unsigned RegNum;
402 unsigned ElementWidth;
403 MatrixKind Kind;
404 };
405
406 struct MatrixTileListOp {
407 unsigned RegMask = 0;
408 };
409
410 struct VectorListOp {
411 unsigned RegNum;
412 unsigned Count;
413 unsigned Stride;
414 unsigned NumElements;
415 unsigned ElementWidth;
416 RegKind RegisterKind;
417 };
418
419 struct VectorIndexOp {
420 int Val;
421 };
422
423 struct ImmOp {
424 const MCExpr *Val;
425 };
426
427 struct ShiftedImmOp {
428 const MCExpr *Val;
429 unsigned ShiftAmount;
430 };
431
432 struct ImmRangeOp {
433 unsigned First;
434 unsigned Last;
435 };
436
437 struct CondCodeOp {
439 };
440
441 struct FPImmOp {
442 uint64_t Val; // APFloat value bitcasted to uint64_t.
443 bool IsExact; // describes whether parsed value was exact.
444 };
445
446 struct BarrierOp {
447 const char *Data;
448 unsigned Length;
449 unsigned Val; // Not the enum since not all values have names.
450 bool HasnXSModifier;
451 };
452
453 struct SysRegOp {
454 const char *Data;
455 unsigned Length;
456 uint32_t MRSReg;
457 uint32_t MSRReg;
458 uint32_t PStateField;
459 };
460
461 struct SysCRImmOp {
462 unsigned Val;
463 };
464
465 struct PrefetchOp {
466 const char *Data;
467 unsigned Length;
468 unsigned Val;
469 };
470
471 struct PSBHintOp {
472 const char *Data;
473 unsigned Length;
474 unsigned Val;
475 };
476
477 struct BTIHintOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct SVCROp {
484 const char *Data;
485 unsigned Length;
486 unsigned PStateField;
487 };
488
489 union {
490 struct TokOp Tok;
491 struct RegOp Reg;
492 struct MatrixRegOp MatrixReg;
493 struct MatrixTileListOp MatrixTileList;
494 struct VectorListOp VectorList;
495 struct VectorIndexOp VectorIndex;
496 struct ImmOp Imm;
497 struct ShiftedImmOp ShiftedImm;
498 struct ImmRangeOp ImmRange;
499 struct CondCodeOp CondCode;
500 struct FPImmOp FPImm;
501 struct BarrierOp Barrier;
502 struct SysRegOp SysReg;
503 struct SysCRImmOp SysCRImm;
504 struct PrefetchOp Prefetch;
505 struct PSBHintOp PSBHint;
506 struct BTIHintOp BTIHint;
507 struct ShiftExtendOp ShiftExtend;
508 struct SVCROp SVCR;
509 };
510
511 // Keep the MCContext around as the MCExprs may need manipulated during
512 // the add<>Operands() calls.
513 MCContext &Ctx;
514
515public:
516 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
517
518 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
519 Kind = o.Kind;
520 StartLoc = o.StartLoc;
521 EndLoc = o.EndLoc;
522 switch (Kind) {
523 case k_Token:
524 Tok = o.Tok;
525 break;
526 case k_Immediate:
527 Imm = o.Imm;
528 break;
529 case k_ShiftedImm:
530 ShiftedImm = o.ShiftedImm;
531 break;
532 case k_ImmRange:
533 ImmRange = o.ImmRange;
534 break;
535 case k_CondCode:
536 CondCode = o.CondCode;
537 break;
538 case k_FPImm:
539 FPImm = o.FPImm;
540 break;
541 case k_Barrier:
542 Barrier = o.Barrier;
543 break;
544 case k_Register:
545 Reg = o.Reg;
546 break;
547 case k_MatrixRegister:
548 MatrixReg = o.MatrixReg;
549 break;
550 case k_MatrixTileList:
551 MatrixTileList = o.MatrixTileList;
552 break;
553 case k_VectorList:
554 VectorList = o.VectorList;
555 break;
556 case k_VectorIndex:
557 VectorIndex = o.VectorIndex;
558 break;
559 case k_SysReg:
560 SysReg = o.SysReg;
561 break;
562 case k_SysCR:
563 SysCRImm = o.SysCRImm;
564 break;
565 case k_Prefetch:
566 Prefetch = o.Prefetch;
567 break;
568 case k_PSBHint:
569 PSBHint = o.PSBHint;
570 break;
571 case k_BTIHint:
572 BTIHint = o.BTIHint;
573 break;
574 case k_ShiftExtend:
575 ShiftExtend = o.ShiftExtend;
576 break;
577 case k_SVCR:
578 SVCR = o.SVCR;
579 break;
580 }
581 }
582
583 /// getStartLoc - Get the location of the first token of this operand.
584 SMLoc getStartLoc() const override { return StartLoc; }
585 /// getEndLoc - Get the location of the last token of this operand.
586 SMLoc getEndLoc() const override { return EndLoc; }
587
588 StringRef getToken() const {
589 assert(Kind == k_Token && "Invalid access!");
590 return StringRef(Tok.Data, Tok.Length);
591 }
592
593 bool isTokenSuffix() const {
594 assert(Kind == k_Token && "Invalid access!");
595 return Tok.IsSuffix;
596 }
597
598 const MCExpr *getImm() const {
599 assert(Kind == k_Immediate && "Invalid access!");
600 return Imm.Val;
601 }
602
603 const MCExpr *getShiftedImmVal() const {
604 assert(Kind == k_ShiftedImm && "Invalid access!");
605 return ShiftedImm.Val;
606 }
607
608 unsigned getShiftedImmShift() const {
609 assert(Kind == k_ShiftedImm && "Invalid access!");
610 return ShiftedImm.ShiftAmount;
611 }
612
613 unsigned getFirstImmVal() const {
614 assert(Kind == k_ImmRange && "Invalid access!");
615 return ImmRange.First;
616 }
617
618 unsigned getLastImmVal() const {
619 assert(Kind == k_ImmRange && "Invalid access!");
620 return ImmRange.Last;
621 }
622
624 assert(Kind == k_CondCode && "Invalid access!");
625 return CondCode.Code;
626 }
627
628 APFloat getFPImm() const {
629 assert (Kind == k_FPImm && "Invalid access!");
630 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
631 }
632
633 bool getFPImmIsExact() const {
634 assert (Kind == k_FPImm && "Invalid access!");
635 return FPImm.IsExact;
636 }
637
638 unsigned getBarrier() const {
639 assert(Kind == k_Barrier && "Invalid access!");
640 return Barrier.Val;
641 }
642
643 StringRef getBarrierName() const {
644 assert(Kind == k_Barrier && "Invalid access!");
645 return StringRef(Barrier.Data, Barrier.Length);
646 }
647
648 bool getBarriernXSModifier() const {
649 assert(Kind == k_Barrier && "Invalid access!");
650 return Barrier.HasnXSModifier;
651 }
652
653 unsigned getReg() const override {
654 assert(Kind == k_Register && "Invalid access!");
655 return Reg.RegNum;
656 }
657
658 unsigned getMatrixReg() const {
659 assert(Kind == k_MatrixRegister && "Invalid access!");
660 return MatrixReg.RegNum;
661 }
662
663 unsigned getMatrixElementWidth() const {
664 assert(Kind == k_MatrixRegister && "Invalid access!");
665 return MatrixReg.ElementWidth;
666 }
667
668 MatrixKind getMatrixKind() const {
669 assert(Kind == k_MatrixRegister && "Invalid access!");
670 return MatrixReg.Kind;
671 }
672
673 unsigned getMatrixTileListRegMask() const {
674 assert(isMatrixTileList() && "Invalid access!");
675 return MatrixTileList.RegMask;
676 }
677
678 RegConstraintEqualityTy getRegEqualityTy() const {
679 assert(Kind == k_Register && "Invalid access!");
680 return Reg.EqualityTy;
681 }
682
683 unsigned getVectorListStart() const {
684 assert(Kind == k_VectorList && "Invalid access!");
685 return VectorList.RegNum;
686 }
687
688 unsigned getVectorListCount() const {
689 assert(Kind == k_VectorList && "Invalid access!");
690 return VectorList.Count;
691 }
692
693 unsigned getVectorListStride() const {
694 assert(Kind == k_VectorList && "Invalid access!");
695 return VectorList.Stride;
696 }
697
698 int getVectorIndex() const {
699 assert(Kind == k_VectorIndex && "Invalid access!");
700 return VectorIndex.Val;
701 }
702
703 StringRef getSysReg() const {
704 assert(Kind == k_SysReg && "Invalid access!");
705 return StringRef(SysReg.Data, SysReg.Length);
706 }
707
708 unsigned getSysCR() const {
709 assert(Kind == k_SysCR && "Invalid access!");
710 return SysCRImm.Val;
711 }
712
713 unsigned getPrefetch() const {
714 assert(Kind == k_Prefetch && "Invalid access!");
715 return Prefetch.Val;
716 }
717
718 unsigned getPSBHint() const {
719 assert(Kind == k_PSBHint && "Invalid access!");
720 return PSBHint.Val;
721 }
722
723 StringRef getPSBHintName() const {
724 assert(Kind == k_PSBHint && "Invalid access!");
725 return StringRef(PSBHint.Data, PSBHint.Length);
726 }
727
728 unsigned getBTIHint() const {
729 assert(Kind == k_BTIHint && "Invalid access!");
730 return BTIHint.Val;
731 }
732
733 StringRef getBTIHintName() const {
734 assert(Kind == k_BTIHint && "Invalid access!");
735 return StringRef(BTIHint.Data, BTIHint.Length);
736 }
737
738 StringRef getSVCR() const {
739 assert(Kind == k_SVCR && "Invalid access!");
740 return StringRef(SVCR.Data, SVCR.Length);
741 }
742
743 StringRef getPrefetchName() const {
744 assert(Kind == k_Prefetch && "Invalid access!");
745 return StringRef(Prefetch.Data, Prefetch.Length);
746 }
747
748 AArch64_AM::ShiftExtendType getShiftExtendType() const {
749 if (Kind == k_ShiftExtend)
750 return ShiftExtend.Type;
751 if (Kind == k_Register)
752 return Reg.ShiftExtend.Type;
753 llvm_unreachable("Invalid access!");
754 }
755
756 unsigned getShiftExtendAmount() const {
757 if (Kind == k_ShiftExtend)
758 return ShiftExtend.Amount;
759 if (Kind == k_Register)
760 return Reg.ShiftExtend.Amount;
761 llvm_unreachable("Invalid access!");
762 }
763
764 bool hasShiftExtendAmount() const {
765 if (Kind == k_ShiftExtend)
766 return ShiftExtend.HasExplicitAmount;
767 if (Kind == k_Register)
768 return Reg.ShiftExtend.HasExplicitAmount;
769 llvm_unreachable("Invalid access!");
770 }
771
772 bool isImm() const override { return Kind == k_Immediate; }
773 bool isMem() const override { return false; }
774
775 bool isUImm6() const {
776 if (!isImm())
777 return false;
778 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
779 if (!MCE)
780 return false;
781 int64_t Val = MCE->getValue();
782 return (Val >= 0 && Val < 64);
783 }
784
785 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
786
787 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
788 return isImmScaled<Bits, Scale>(true);
789 }
790
791 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
792 DiagnosticPredicate isUImmScaled() const {
793 if (IsRange && isImmRange() &&
794 (getLastImmVal() != getFirstImmVal() + Offset))
795 return DiagnosticPredicateTy::NoMatch;
796
797 return isImmScaled<Bits, Scale, IsRange>(false);
798 }
799
800 template <int Bits, int Scale, bool IsRange = false>
801 DiagnosticPredicate isImmScaled(bool Signed) const {
802 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
803 (isImmRange() && !IsRange))
804 return DiagnosticPredicateTy::NoMatch;
805
806 int64_t Val;
807 if (isImmRange())
808 Val = getFirstImmVal();
809 else {
810 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
811 if (!MCE)
812 return DiagnosticPredicateTy::NoMatch;
813 Val = MCE->getValue();
814 }
815
816 int64_t MinVal, MaxVal;
817 if (Signed) {
818 int64_t Shift = Bits - 1;
819 MinVal = (int64_t(1) << Shift) * -Scale;
820 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
821 } else {
822 MinVal = 0;
823 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
824 }
825
826 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
827 return DiagnosticPredicateTy::Match;
828
829 return DiagnosticPredicateTy::NearMatch;
830 }
831
832 DiagnosticPredicate isSVEPattern() const {
833 if (!isImm())
834 return DiagnosticPredicateTy::NoMatch;
835 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
836 if (!MCE)
837 return DiagnosticPredicateTy::NoMatch;
838 int64_t Val = MCE->getValue();
839 if (Val >= 0 && Val < 32)
840 return DiagnosticPredicateTy::Match;
841 return DiagnosticPredicateTy::NearMatch;
842 }
843
844 DiagnosticPredicate isSVEVecLenSpecifier() const {
845 if (!isImm())
846 return DiagnosticPredicateTy::NoMatch;
847 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
848 if (!MCE)
849 return DiagnosticPredicateTy::NoMatch;
850 int64_t Val = MCE->getValue();
851 if (Val >= 0 && Val <= 1)
852 return DiagnosticPredicateTy::Match;
853 return DiagnosticPredicateTy::NearMatch;
854 }
855
856 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
858 MCSymbolRefExpr::VariantKind DarwinRefKind;
859 int64_t Addend;
860 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
861 Addend)) {
862 // If we don't understand the expression, assume the best and
863 // let the fixup and relocation code deal with it.
864 return true;
865 }
866
867 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
868 ELFRefKind == AArch64MCExpr::VK_LO12 ||
869 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
870 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
871 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
872 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
873 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
875 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
876 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
878 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
879 // Note that we don't range-check the addend. It's adjusted modulo page
880 // size when converted, so there is no "out of range" condition when using
881 // @pageoff.
882 return true;
883 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
884 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
885 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
886 return Addend == 0;
887 }
888
889 return false;
890 }
891
892 template <int Scale> bool isUImm12Offset() const {
893 if (!isImm())
894 return false;
895
896 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
897 if (!MCE)
898 return isSymbolicUImm12Offset(getImm());
899
900 int64_t Val = MCE->getValue();
901 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
902 }
903
904 template <int N, int M>
905 bool isImmInRange() const {
906 if (!isImm())
907 return false;
908 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
909 if (!MCE)
910 return false;
911 int64_t Val = MCE->getValue();
912 return (Val >= N && Val <= M);
913 }
914
915 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
916 // a logical immediate can always be represented when inverted.
917 template <typename T>
918 bool isLogicalImm() const {
919 if (!isImm())
920 return false;
921 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
922 if (!MCE)
923 return false;
924
925 int64_t Val = MCE->getValue();
926 // Avoid left shift by 64 directly.
927 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
928 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
929 if ((Val & Upper) && (Val & Upper) != Upper)
930 return false;
931
932 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
933 }
934
935 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
936
937 bool isImmRange() const { return Kind == k_ImmRange; }
938
939 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
940 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
941 /// immediate that can be shifted by 'Shift'.
942 template <unsigned Width>
943 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
944 if (isShiftedImm() && Width == getShiftedImmShift())
945 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
946 return std::make_pair(CE->getValue(), Width);
947
948 if (isImm())
949 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
950 int64_t Val = CE->getValue();
951 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
952 return std::make_pair(Val >> Width, Width);
953 else
954 return std::make_pair(Val, 0u);
955 }
956
957 return {};
958 }
959
960 bool isAddSubImm() const {
961 if (!isShiftedImm() && !isImm())
962 return false;
963
964 const MCExpr *Expr;
965
966 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
967 if (isShiftedImm()) {
968 unsigned Shift = ShiftedImm.ShiftAmount;
969 Expr = ShiftedImm.Val;
970 if (Shift != 0 && Shift != 12)
971 return false;
972 } else {
973 Expr = getImm();
974 }
975
977 MCSymbolRefExpr::VariantKind DarwinRefKind;
978 int64_t Addend;
979 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
980 DarwinRefKind, Addend)) {
981 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
982 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
983 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
984 || ELFRefKind == AArch64MCExpr::VK_LO12
985 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
986 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
987 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
988 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
989 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
990 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
991 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
992 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
993 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
994 }
995
996 // If it's a constant, it should be a real immediate in range.
997 if (auto ShiftedVal = getShiftedVal<12>())
998 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
999
1000 // If it's an expression, we hope for the best and let the fixup/relocation
1001 // code deal with it.
1002 return true;
1003 }
1004
1005 bool isAddSubImmNeg() const {
1006 if (!isShiftedImm() && !isImm())
1007 return false;
1008
1009 // Otherwise it should be a real negative immediate in range.
1010 if (auto ShiftedVal = getShiftedVal<12>())
1011 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1012
1013 return false;
1014 }
1015
1016 // Signed value in the range -128 to +127. For element widths of
1017 // 16 bits or higher it may also be a signed multiple of 256 in the
1018 // range -32768 to +32512.
1019 // For element-width of 8 bits a range of -128 to 255 is accepted,
1020 // since a copy of a byte can be either signed/unsigned.
1021 template <typename T>
1022 DiagnosticPredicate isSVECpyImm() const {
1023 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1024 return DiagnosticPredicateTy::NoMatch;
1025
1026 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1027 std::is_same<int8_t, T>::value;
1028 if (auto ShiftedImm = getShiftedVal<8>())
1029 if (!(IsByte && ShiftedImm->second) &&
1030 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1031 << ShiftedImm->second))
1032 return DiagnosticPredicateTy::Match;
1033
1034 return DiagnosticPredicateTy::NearMatch;
1035 }
1036
1037 // Unsigned value in the range 0 to 255. For element widths of
1038 // 16 bits or higher it may also be a signed multiple of 256 in the
1039 // range 0 to 65280.
1040 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1041 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1042 return DiagnosticPredicateTy::NoMatch;
1043
1044 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1045 std::is_same<int8_t, T>::value;
1046 if (auto ShiftedImm = getShiftedVal<8>())
1047 if (!(IsByte && ShiftedImm->second) &&
1048 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1049 << ShiftedImm->second))
1050 return DiagnosticPredicateTy::Match;
1051
1052 return DiagnosticPredicateTy::NearMatch;
1053 }
1054
1055 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1056 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1057 return DiagnosticPredicateTy::Match;
1058 return DiagnosticPredicateTy::NoMatch;
1059 }
1060
1061 bool isCondCode() const { return Kind == k_CondCode; }
1062
1063 bool isSIMDImmType10() const {
1064 if (!isImm())
1065 return false;
1066 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1067 if (!MCE)
1068 return false;
1070 }
1071
1072 template<int N>
1073 bool isBranchTarget() const {
1074 if (!isImm())
1075 return false;
1076 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1077 if (!MCE)
1078 return true;
1079 int64_t Val = MCE->getValue();
1080 if (Val & 0x3)
1081 return false;
1082 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1083 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1084 }
1085
1086 bool
1087 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1088 if (!isImm())
1089 return false;
1090
1091 AArch64MCExpr::VariantKind ELFRefKind;
1092 MCSymbolRefExpr::VariantKind DarwinRefKind;
1093 int64_t Addend;
1094 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1095 DarwinRefKind, Addend)) {
1096 return false;
1097 }
1098 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1099 return false;
1100
1101 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1102 }
1103
1104 bool isMovWSymbolG3() const {
1106 }
1107
1108 bool isMovWSymbolG2() const {
1109 return isMovWSymbol(
1114 }
1115
1116 bool isMovWSymbolG1() const {
1117 return isMovWSymbol(
1123 }
1124
1125 bool isMovWSymbolG0() const {
1126 return isMovWSymbol(
1132 }
1133
1134 template<int RegWidth, int Shift>
1135 bool isMOVZMovAlias() const {
1136 if (!isImm()) return false;
1137
1138 const MCExpr *E = getImm();
1139 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1140 uint64_t Value = CE->getValue();
1141
1142 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1143 }
1144 // Only supports the case of Shift being 0 if an expression is used as an
1145 // operand
1146 return !Shift && E;
1147 }
1148
1149 template<int RegWidth, int Shift>
1150 bool isMOVNMovAlias() const {
1151 if (!isImm()) return false;
1152
1153 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1154 if (!CE) return false;
1155 uint64_t Value = CE->getValue();
1156
1157 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1158 }
1159
1160 bool isFPImm() const {
1161 return Kind == k_FPImm &&
1162 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1163 }
1164
1165 bool isBarrier() const {
1166 return Kind == k_Barrier && !getBarriernXSModifier();
1167 }
1168 bool isBarriernXS() const {
1169 return Kind == k_Barrier && getBarriernXSModifier();
1170 }
1171 bool isSysReg() const { return Kind == k_SysReg; }
1172
1173 bool isMRSSystemRegister() const {
1174 if (!isSysReg()) return false;
1175
1176 return SysReg.MRSReg != -1U;
1177 }
1178
1179 bool isMSRSystemRegister() const {
1180 if (!isSysReg()) return false;
1181 return SysReg.MSRReg != -1U;
1182 }
1183
1184 bool isSystemPStateFieldWithImm0_1() const {
1185 if (!isSysReg()) return false;
1186 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1187 }
1188
1189 bool isSystemPStateFieldWithImm0_15() const {
1190 if (!isSysReg())
1191 return false;
1192 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1193 }
1194
1195 bool isSVCR() const {
1196 if (Kind != k_SVCR)
1197 return false;
1198 return SVCR.PStateField != -1U;
1199 }
1200
1201 bool isReg() const override {
1202 return Kind == k_Register;
1203 }
1204
1205 bool isVectorList() const { return Kind == k_VectorList; }
1206
1207 bool isScalarReg() const {
1208 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1209 }
1210
1211 bool isNeonVectorReg() const {
1212 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1213 }
1214
1215 bool isNeonVectorRegLo() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1217 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1218 Reg.RegNum) ||
1219 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1220 Reg.RegNum));
1221 }
1222
1223 bool isMatrix() const { return Kind == k_MatrixRegister; }
1224 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1225
1226 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1227 RegKind RK;
1228 switch (Class) {
1229 case AArch64::PPRRegClassID:
1230 case AArch64::PPR_3bRegClassID:
1231 case AArch64::PPR_p8to15RegClassID:
1232 RK = RegKind::SVEPredicateAsCounter;
1233 break;
1234 default:
1235 llvm_unreachable("Unsupport register class");
1236 }
1237
1238 return (Kind == k_Register && Reg.Kind == RK) &&
1239 AArch64MCRegisterClasses[Class].contains(getReg());
1240 }
1241
1242 template <unsigned Class> bool isSVEVectorReg() const {
1243 RegKind RK;
1244 switch (Class) {
1245 case AArch64::ZPRRegClassID:
1246 case AArch64::ZPR_3bRegClassID:
1247 case AArch64::ZPR_4bRegClassID:
1248 RK = RegKind::SVEDataVector;
1249 break;
1250 case AArch64::PPRRegClassID:
1251 case AArch64::PPR_3bRegClassID:
1252 RK = RegKind::SVEPredicateVector;
1253 break;
1254 default:
1255 llvm_unreachable("Unsupport register class");
1256 }
1257
1258 return (Kind == k_Register && Reg.Kind == RK) &&
1259 AArch64MCRegisterClasses[Class].contains(getReg());
1260 }
1261
1262 template <unsigned Class> bool isFPRasZPR() const {
1263 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1264 AArch64MCRegisterClasses[Class].contains(getReg());
1265 }
1266
1267 template <int ElementWidth, unsigned Class>
1268 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1269 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1270 return DiagnosticPredicateTy::NoMatch;
1271
1272 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1273 return DiagnosticPredicateTy::Match;
1274
1275 return DiagnosticPredicateTy::NearMatch;
1276 }
1277
1278 template <int ElementWidth, unsigned Class>
1279 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1280 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1281 return DiagnosticPredicateTy::NoMatch;
1282
1283 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1284 return DiagnosticPredicateTy::Match;
1285
1286 return DiagnosticPredicateTy::NearMatch;
1287 }
1288
1289 template <int ElementWidth, unsigned Class>
1290 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1291 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1292 return DiagnosticPredicateTy::NoMatch;
1293
1294 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1295 return DiagnosticPredicateTy::Match;
1296
1297 return DiagnosticPredicateTy::NearMatch;
1298 }
1299
1300 template <int ElementWidth, unsigned Class,
1301 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1302 bool ShiftWidthAlwaysSame>
1303 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1304 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1305 if (!VectorMatch.isMatch())
1306 return DiagnosticPredicateTy::NoMatch;
1307
1308 // Give a more specific diagnostic when the user has explicitly typed in
1309 // a shift-amount that does not match what is expected, but for which
1310 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1311 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1312 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1313 ShiftExtendTy == AArch64_AM::SXTW) &&
1314 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1315 return DiagnosticPredicateTy::NoMatch;
1316
1317 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1318 return DiagnosticPredicateTy::Match;
1319
1320 return DiagnosticPredicateTy::NearMatch;
1321 }
1322
1323 bool isGPR32as64() const {
1324 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1325 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1326 }
1327
1328 bool isGPR64as32() const {
1329 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1330 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1331 }
1332
1333 bool isGPR64x8() const {
1334 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1335 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1336 Reg.RegNum);
1337 }
1338
1339 bool isWSeqPair() const {
1340 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1341 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1342 Reg.RegNum);
1343 }
1344
1345 bool isXSeqPair() const {
1346 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1347 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1348 Reg.RegNum);
1349 }
1350
1351 bool isSyspXzrPair() const {
1352 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1353 }
1354
1355 template<int64_t Angle, int64_t Remainder>
1356 DiagnosticPredicate isComplexRotation() const {
1357 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1358
1359 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1360 if (!CE) return DiagnosticPredicateTy::NoMatch;
1361 uint64_t Value = CE->getValue();
1362
1363 if (Value % Angle == Remainder && Value <= 270)
1364 return DiagnosticPredicateTy::Match;
1365 return DiagnosticPredicateTy::NearMatch;
1366 }
1367
1368 template <unsigned RegClassID> bool isGPR64() const {
1369 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1370 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1371 }
1372
1373 template <unsigned RegClassID, int ExtWidth>
1374 DiagnosticPredicate isGPR64WithShiftExtend() const {
1375 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1376 return DiagnosticPredicateTy::NoMatch;
1377
1378 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1379 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1380 return DiagnosticPredicateTy::Match;
1381 return DiagnosticPredicateTy::NearMatch;
1382 }
1383
1384 /// Is this a vector list with the type implicit (presumably attached to the
1385 /// instruction itself)?
1386 template <RegKind VectorKind, unsigned NumRegs>
1387 bool isImplicitlyTypedVectorList() const {
1388 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1389 VectorList.NumElements == 0 &&
1390 VectorList.RegisterKind == VectorKind;
1391 }
1392
1393 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1394 unsigned ElementWidth, unsigned Stride = 1>
1395 bool isTypedVectorList() const {
1396 if (Kind != k_VectorList)
1397 return false;
1398 if (VectorList.Count != NumRegs)
1399 return false;
1400 if (VectorList.RegisterKind != VectorKind)
1401 return false;
1402 if (VectorList.ElementWidth != ElementWidth)
1403 return false;
1404 if (VectorList.Stride != Stride)
1405 return false;
1406 return VectorList.NumElements == NumElements;
1407 }
1408
1409 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1410 unsigned ElementWidth>
1411 DiagnosticPredicate isTypedVectorListMultiple() const {
1412 bool Res =
1413 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1414 if (!Res)
1415 return DiagnosticPredicateTy::NoMatch;
1416 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1417 return DiagnosticPredicateTy::NearMatch;
1418 return DiagnosticPredicateTy::Match;
1419 }
1420
1421 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1422 unsigned ElementWidth>
1423 DiagnosticPredicate isTypedVectorListStrided() const {
1424 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1425 ElementWidth, Stride>();
1426 if (!Res)
1427 return DiagnosticPredicateTy::NoMatch;
1428 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1429 ((VectorList.RegNum >= AArch64::Z16) &&
1430 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1431 return DiagnosticPredicateTy::Match;
1432 return DiagnosticPredicateTy::NoMatch;
1433 }
1434
1435 template <int Min, int Max>
1436 DiagnosticPredicate isVectorIndex() const {
1437 if (Kind != k_VectorIndex)
1438 return DiagnosticPredicateTy::NoMatch;
1439 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1440 return DiagnosticPredicateTy::Match;
1441 return DiagnosticPredicateTy::NearMatch;
1442 }
1443
1444 bool isToken() const override { return Kind == k_Token; }
1445
1446 bool isTokenEqual(StringRef Str) const {
1447 return Kind == k_Token && getToken() == Str;
1448 }
1449 bool isSysCR() const { return Kind == k_SysCR; }
1450 bool isPrefetch() const { return Kind == k_Prefetch; }
1451 bool isPSBHint() const { return Kind == k_PSBHint; }
1452 bool isBTIHint() const { return Kind == k_BTIHint; }
1453 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1454 bool isShifter() const {
1455 if (!isShiftExtend())
1456 return false;
1457
1458 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1459 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1460 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1461 ST == AArch64_AM::MSL);
1462 }
1463
1464 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1465 if (Kind != k_FPImm)
1466 return DiagnosticPredicateTy::NoMatch;
1467
1468 if (getFPImmIsExact()) {
1469 // Lookup the immediate from table of supported immediates.
1470 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1471 assert(Desc && "Unknown enum value");
1472
1473 // Calculate its FP value.
1474 APFloat RealVal(APFloat::IEEEdouble());
1475 auto StatusOrErr =
1476 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1477 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1478 llvm_unreachable("FP immediate is not exact");
1479
1480 if (getFPImm().bitwiseIsEqual(RealVal))
1481 return DiagnosticPredicateTy::Match;
1482 }
1483
1484 return DiagnosticPredicateTy::NearMatch;
1485 }
1486
1487 template <unsigned ImmA, unsigned ImmB>
1488 DiagnosticPredicate isExactFPImm() const {
1489 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1490 if ((Res = isExactFPImm<ImmA>()))
1491 return DiagnosticPredicateTy::Match;
1492 if ((Res = isExactFPImm<ImmB>()))
1493 return DiagnosticPredicateTy::Match;
1494 return Res;
1495 }
1496
1497 bool isExtend() const {
1498 if (!isShiftExtend())
1499 return false;
1500
1501 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1502 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1503 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1504 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1505 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1506 ET == AArch64_AM::LSL) &&
1507 getShiftExtendAmount() <= 4;
1508 }
1509
1510 bool isExtend64() const {
1511 if (!isExtend())
1512 return false;
1513 // Make sure the extend expects a 32-bit source register.
1514 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1515 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1516 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1517 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1518 }
1519
1520 bool isExtendLSL64() const {
1521 if (!isExtend())
1522 return false;
1523 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1524 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1525 ET == AArch64_AM::LSL) &&
1526 getShiftExtendAmount() <= 4;
1527 }
1528
1529 template<int Width> bool isMemXExtend() const {
1530 if (!isExtend())
1531 return false;
1532 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1533 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1534 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1535 getShiftExtendAmount() == 0);
1536 }
1537
1538 template<int Width> bool isMemWExtend() const {
1539 if (!isExtend())
1540 return false;
1541 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1542 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1543 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1544 getShiftExtendAmount() == 0);
1545 }
1546
1547 template <unsigned width>
1548 bool isArithmeticShifter() const {
1549 if (!isShifter())
1550 return false;
1551
1552 // An arithmetic shifter is LSL, LSR, or ASR.
1553 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1554 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1555 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1556 }
1557
1558 template <unsigned width>
1559 bool isLogicalShifter() const {
1560 if (!isShifter())
1561 return false;
1562
1563 // A logical shifter is LSL, LSR, ASR or ROR.
1564 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1565 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1566 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1567 getShiftExtendAmount() < width;
1568 }
1569
1570 bool isMovImm32Shifter() const {
1571 if (!isShifter())
1572 return false;
1573
1574 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1575 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1576 if (ST != AArch64_AM::LSL)
1577 return false;
1578 uint64_t Val = getShiftExtendAmount();
1579 return (Val == 0 || Val == 16);
1580 }
1581
1582 bool isMovImm64Shifter() const {
1583 if (!isShifter())
1584 return false;
1585
1586 // A MOVi shifter is LSL of 0 or 16.
1587 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1588 if (ST != AArch64_AM::LSL)
1589 return false;
1590 uint64_t Val = getShiftExtendAmount();
1591 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1592 }
1593
1594 bool isLogicalVecShifter() const {
1595 if (!isShifter())
1596 return false;
1597
1598 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1599 unsigned Shift = getShiftExtendAmount();
1600 return getShiftExtendType() == AArch64_AM::LSL &&
1601 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1602 }
1603
1604 bool isLogicalVecHalfWordShifter() const {
1605 if (!isLogicalVecShifter())
1606 return false;
1607
1608 // A logical vector shifter is a left shift by 0 or 8.
1609 unsigned Shift = getShiftExtendAmount();
1610 return getShiftExtendType() == AArch64_AM::LSL &&
1611 (Shift == 0 || Shift == 8);
1612 }
1613
1614 bool isMoveVecShifter() const {
1615 if (!isShiftExtend())
1616 return false;
1617
1618 // A logical vector shifter is a left shift by 8 or 16.
1619 unsigned Shift = getShiftExtendAmount();
1620 return getShiftExtendType() == AArch64_AM::MSL &&
1621 (Shift == 8 || Shift == 16);
1622 }
1623
1624 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1625 // to LDUR/STUR when the offset is not legal for the former but is for
1626 // the latter. As such, in addition to checking for being a legal unscaled
1627 // address, also check that it is not a legal scaled address. This avoids
1628 // ambiguity in the matcher.
1629 template<int Width>
1630 bool isSImm9OffsetFB() const {
1631 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1632 }
1633
1634 bool isAdrpLabel() const {
1635 // Validation was handled during parsing, so we just verify that
1636 // something didn't go haywire.
1637 if (!isImm())
1638 return false;
1639
1640 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1641 int64_t Val = CE->getValue();
1642 int64_t Min = - (4096 * (1LL << (21 - 1)));
1643 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1644 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1645 }
1646
1647 return true;
1648 }
1649
1650 bool isAdrLabel() const {
1651 // Validation was handled during parsing, so we just verify that
1652 // something didn't go haywire.
1653 if (!isImm())
1654 return false;
1655
1656 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1657 int64_t Val = CE->getValue();
1658 int64_t Min = - (1LL << (21 - 1));
1659 int64_t Max = ((1LL << (21 - 1)) - 1);
1660 return Val >= Min && Val <= Max;
1661 }
1662
1663 return true;
1664 }
1665
1666 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1667 DiagnosticPredicate isMatrixRegOperand() const {
1668 if (!isMatrix())
1669 return DiagnosticPredicateTy::NoMatch;
1670 if (getMatrixKind() != Kind ||
1671 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1672 EltSize != getMatrixElementWidth())
1673 return DiagnosticPredicateTy::NearMatch;
1674 return DiagnosticPredicateTy::Match;
1675 }
1676
1677 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1678 // Add as immediates when possible. Null MCExpr = 0.
1679 if (!Expr)
1681 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1682 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1683 else
1685 }
1686
1687 void addRegOperands(MCInst &Inst, unsigned N) const {
1688 assert(N == 1 && "Invalid number of operands!");
1690 }
1691
1692 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1693 assert(N == 1 && "Invalid number of operands!");
1694 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1695 }
1696
1697 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1698 assert(N == 1 && "Invalid number of operands!");
1699 assert(
1700 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1701
1702 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1703 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1704 RI->getEncodingValue(getReg()));
1705
1707 }
1708
1709 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1710 assert(N == 1 && "Invalid number of operands!");
1711 assert(
1712 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1713
1714 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1715 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1716 RI->getEncodingValue(getReg()));
1717
1719 }
1720
1721 template <int Width>
1722 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1723 unsigned Base;
1724 switch (Width) {
1725 case 8: Base = AArch64::B0; break;
1726 case 16: Base = AArch64::H0; break;
1727 case 32: Base = AArch64::S0; break;
1728 case 64: Base = AArch64::D0; break;
1729 case 128: Base = AArch64::Q0; break;
1730 default:
1731 llvm_unreachable("Unsupported width");
1732 }
1733 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1734 }
1735
1736 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1737 assert(N == 1 && "Invalid number of operands!");
1738 assert(
1739 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1740 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1741 }
1742
1743 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1744 assert(N == 1 && "Invalid number of operands!");
1745 assert(
1746 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1748 }
1749
1750 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1751 assert(N == 1 && "Invalid number of operands!");
1753 }
1754
1755 enum VecListIndexType {
1756 VecListIdx_DReg = 0,
1757 VecListIdx_QReg = 1,
1758 VecListIdx_ZReg = 2,
1759 VecListIdx_PReg = 3,
1760 };
1761
1762 template <VecListIndexType RegTy, unsigned NumRegs>
1763 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1764 assert(N == 1 && "Invalid number of operands!");
1765 static const unsigned FirstRegs[][5] = {
1766 /* DReg */ { AArch64::Q0,
1767 AArch64::D0, AArch64::D0_D1,
1768 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1769 /* QReg */ { AArch64::Q0,
1770 AArch64::Q0, AArch64::Q0_Q1,
1771 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1772 /* ZReg */ { AArch64::Z0,
1773 AArch64::Z0, AArch64::Z0_Z1,
1774 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1775 /* PReg */ { AArch64::P0,
1776 AArch64::P0, AArch64::P0_P1 }
1777 };
1778
1779 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1780 " NumRegs must be <= 4 for ZRegs");
1781
1782 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1783 " NumRegs must be <= 2 for PRegs");
1784
1785 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1786 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1787 FirstRegs[(unsigned)RegTy][0]));
1788 }
1789
1790 template <unsigned NumRegs>
1791 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1792 assert(N == 1 && "Invalid number of operands!");
1793 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1794
1795 switch (NumRegs) {
1796 case 2:
1797 if (getVectorListStart() < AArch64::Z16) {
1798 assert((getVectorListStart() < AArch64::Z8) &&
1799 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1801 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1802 } else {
1803 assert((getVectorListStart() < AArch64::Z24) &&
1804 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1806 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1807 }
1808 break;
1809 case 4:
1810 if (getVectorListStart() < AArch64::Z16) {
1811 assert((getVectorListStart() < AArch64::Z4) &&
1812 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1814 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1815 } else {
1816 assert((getVectorListStart() < AArch64::Z20) &&
1817 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1819 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1820 }
1821 break;
1822 default:
1823 llvm_unreachable("Unsupported number of registers for strided vec list");
1824 }
1825 }
1826
1827 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1828 assert(N == 1 && "Invalid number of operands!");
1829 unsigned RegMask = getMatrixTileListRegMask();
1830 assert(RegMask <= 0xFF && "Invalid mask!");
1831 Inst.addOperand(MCOperand::createImm(RegMask));
1832 }
1833
1834 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1835 assert(N == 1 && "Invalid number of operands!");
1836 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1837 }
1838
1839 template <unsigned ImmIs0, unsigned ImmIs1>
1840 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1841 assert(N == 1 && "Invalid number of operands!");
1842 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1843 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1844 }
1845
1846 void addImmOperands(MCInst &Inst, unsigned N) const {
1847 assert(N == 1 && "Invalid number of operands!");
1848 // If this is a pageoff symrefexpr with an addend, adjust the addend
1849 // to be only the page-offset portion. Otherwise, just add the expr
1850 // as-is.
1851 addExpr(Inst, getImm());
1852 }
1853
1854 template <int Shift>
1855 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1856 assert(N == 2 && "Invalid number of operands!");
1857 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1858 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1859 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1860 } else if (isShiftedImm()) {
1861 addExpr(Inst, getShiftedImmVal());
1862 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1863 } else {
1864 addExpr(Inst, getImm());
1866 }
1867 }
1868
1869 template <int Shift>
1870 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1871 assert(N == 2 && "Invalid number of operands!");
1872 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1873 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1874 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1875 } else
1876 llvm_unreachable("Not a shifted negative immediate");
1877 }
1878
1879 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!");
1882 }
1883
1884 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1885 assert(N == 1 && "Invalid number of operands!");
1886 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1887 if (!MCE)
1888 addExpr(Inst, getImm());
1889 else
1890 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1891 }
1892
1893 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1894 addImmOperands(Inst, N);
1895 }
1896
1897 template<int Scale>
1898 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!");
1900 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1901
1902 if (!MCE) {
1903 Inst.addOperand(MCOperand::createExpr(getImm()));
1904 return;
1905 }
1906 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1907 }
1908
1909 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1910 assert(N == 1 && "Invalid number of operands!");
1911 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1913 }
1914
1915 template <int Scale>
1916 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1917 assert(N == 1 && "Invalid number of operands!");
1918 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1919 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1920 }
1921
1922 template <int Scale>
1923 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1924 assert(N == 1 && "Invalid number of operands!");
1925 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1926 }
1927
1928 template <typename T>
1929 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1930 assert(N == 1 && "Invalid number of operands!");
1931 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1932 std::make_unsigned_t<T> Val = MCE->getValue();
1933 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1934 Inst.addOperand(MCOperand::createImm(encoding));
1935 }
1936
1937 template <typename T>
1938 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1939 assert(N == 1 && "Invalid number of operands!");
1940 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1941 std::make_unsigned_t<T> Val = ~MCE->getValue();
1942 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1943 Inst.addOperand(MCOperand::createImm(encoding));
1944 }
1945
1946 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1947 assert(N == 1 && "Invalid number of operands!");
1948 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1950 Inst.addOperand(MCOperand::createImm(encoding));
1951 }
1952
1953 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1954 // Branch operands don't encode the low bits, so shift them off
1955 // here. If it's a label, however, just put it on directly as there's
1956 // not enough information now to do anything.
1957 assert(N == 1 && "Invalid number of operands!");
1958 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1959 if (!MCE) {
1960 addExpr(Inst, getImm());
1961 return;
1962 }
1963 assert(MCE && "Invalid constant immediate operand!");
1964 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1965 }
1966
1967 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1968 // Branch operands don't encode the low bits, so shift them off
1969 // here. If it's a label, however, just put it on directly as there's
1970 // not enough information now to do anything.
1971 assert(N == 1 && "Invalid number of operands!");
1972 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1973 if (!MCE) {
1974 addExpr(Inst, getImm());
1975 return;
1976 }
1977 assert(MCE && "Invalid constant immediate operand!");
1978 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1979 }
1980
1981 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1982 // Branch operands don't encode the low bits, so shift them off
1983 // here. If it's a label, however, just put it on directly as there's
1984 // not enough information now to do anything.
1985 assert(N == 1 && "Invalid number of operands!");
1986 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1987 if (!MCE) {
1988 addExpr(Inst, getImm());
1989 return;
1990 }
1991 assert(MCE && "Invalid constant immediate operand!");
1992 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1993 }
1994
1995 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1996 assert(N == 1 && "Invalid number of operands!");
1998 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1999 }
2000
2001 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2002 assert(N == 1 && "Invalid number of operands!");
2003 Inst.addOperand(MCOperand::createImm(getBarrier()));
2004 }
2005
2006 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 1 && "Invalid number of operands!");
2008 Inst.addOperand(MCOperand::createImm(getBarrier()));
2009 }
2010
2011 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2012 assert(N == 1 && "Invalid number of operands!");
2013
2014 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2015 }
2016
2017 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2018 assert(N == 1 && "Invalid number of operands!");
2019
2020 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2021 }
2022
2023 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2024 assert(N == 1 && "Invalid number of operands!");
2025
2026 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2027 }
2028
2029 void addSVCROperands(MCInst &Inst, unsigned N) const {
2030 assert(N == 1 && "Invalid number of operands!");
2031
2032 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2033 }
2034
2035 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037
2038 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2039 }
2040
2041 void addSysCROperands(MCInst &Inst, unsigned N) const {
2042 assert(N == 1 && "Invalid number of operands!");
2043 Inst.addOperand(MCOperand::createImm(getSysCR()));
2044 }
2045
2046 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2047 assert(N == 1 && "Invalid number of operands!");
2048 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2049 }
2050
2051 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2052 assert(N == 1 && "Invalid number of operands!");
2053 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2054 }
2055
2056 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2057 assert(N == 1 && "Invalid number of operands!");
2058 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2059 }
2060
2061 void addShifterOperands(MCInst &Inst, unsigned N) const {
2062 assert(N == 1 && "Invalid number of operands!");
2063 unsigned Imm =
2064 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2066 }
2067
2068 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2069 assert(N == 1 && "Invalid number of operands!");
2070
2071 if (!isScalarReg())
2072 return;
2073
2074 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2075 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2077 if (Reg != AArch64::XZR)
2078 llvm_unreachable("wrong register");
2079
2080 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2081 }
2082
2083 void addExtendOperands(MCInst &Inst, unsigned N) const {
2084 assert(N == 1 && "Invalid number of operands!");
2085 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2086 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2087 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2089 }
2090
2091 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2092 assert(N == 1 && "Invalid number of operands!");
2093 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2094 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2095 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2097 }
2098
2099 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2100 assert(N == 2 && "Invalid number of operands!");
2101 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2102 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2103 Inst.addOperand(MCOperand::createImm(IsSigned));
2104 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2105 }
2106
2107 // For 8-bit load/store instructions with a register offset, both the
2108 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2109 // they're disambiguated by whether the shift was explicit or implicit rather
2110 // than its size.
2111 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2112 assert(N == 2 && "Invalid number of operands!");
2113 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2114 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2115 Inst.addOperand(MCOperand::createImm(IsSigned));
2116 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2117 }
2118
2119 template<int Shift>
2120 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2121 assert(N == 1 && "Invalid number of operands!");
2122
2123 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2124 if (CE) {
2125 uint64_t Value = CE->getValue();
2126 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2127 } else {
2128 addExpr(Inst, getImm());
2129 }
2130 }
2131
2132 template<int Shift>
2133 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2134 assert(N == 1 && "Invalid number of operands!");
2135
2136 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2137 uint64_t Value = CE->getValue();
2138 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2139 }
2140
2141 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2142 assert(N == 1 && "Invalid number of operands!");
2143 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2144 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2145 }
2146
2147 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2148 assert(N == 1 && "Invalid number of operands!");
2149 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2150 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2151 }
2152
2153 void print(raw_ostream &OS) const override;
2154
2155 static std::unique_ptr<AArch64Operand>
2156 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2157 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2158 Op->Tok.Data = Str.data();
2159 Op->Tok.Length = Str.size();
2160 Op->Tok.IsSuffix = IsSuffix;
2161 Op->StartLoc = S;
2162 Op->EndLoc = S;
2163 return Op;
2164 }
2165
2166 static std::unique_ptr<AArch64Operand>
2167 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2168 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2170 unsigned ShiftAmount = 0,
2171 unsigned HasExplicitAmount = false) {
2172 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2173 Op->Reg.RegNum = RegNum;
2174 Op->Reg.Kind = Kind;
2175 Op->Reg.ElementWidth = 0;
2176 Op->Reg.EqualityTy = EqTy;
2177 Op->Reg.ShiftExtend.Type = ExtTy;
2178 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2179 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2180 Op->StartLoc = S;
2181 Op->EndLoc = E;
2182 return Op;
2183 }
2184
2185 static std::unique_ptr<AArch64Operand>
2186 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2187 SMLoc S, SMLoc E, MCContext &Ctx,
2189 unsigned ShiftAmount = 0,
2190 unsigned HasExplicitAmount = false) {
2191 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2192 Kind == RegKind::SVEPredicateVector ||
2193 Kind == RegKind::SVEPredicateAsCounter) &&
2194 "Invalid vector kind");
2195 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2196 HasExplicitAmount);
2197 Op->Reg.ElementWidth = ElementWidth;
2198 return Op;
2199 }
2200
2201 static std::unique_ptr<AArch64Operand>
2202 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2203 unsigned NumElements, unsigned ElementWidth,
2204 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2205 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2206 Op->VectorList.RegNum = RegNum;
2207 Op->VectorList.Count = Count;
2208 Op->VectorList.Stride = Stride;
2209 Op->VectorList.NumElements = NumElements;
2210 Op->VectorList.ElementWidth = ElementWidth;
2211 Op->VectorList.RegisterKind = RegisterKind;
2212 Op->StartLoc = S;
2213 Op->EndLoc = E;
2214 return Op;
2215 }
2216
2217 static std::unique_ptr<AArch64Operand>
2218 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2219 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2220 Op->VectorIndex.Val = Idx;
2221 Op->StartLoc = S;
2222 Op->EndLoc = E;
2223 return Op;
2224 }
2225
2226 static std::unique_ptr<AArch64Operand>
2227 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2228 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2229 Op->MatrixTileList.RegMask = RegMask;
2230 Op->StartLoc = S;
2231 Op->EndLoc = E;
2232 return Op;
2233 }
2234
2235 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2236 const unsigned ElementWidth) {
2237 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2238 RegMap = {
2239 {{0, AArch64::ZAB0},
2240 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2241 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2242 {{8, AArch64::ZAB0},
2243 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2244 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2245 {{16, AArch64::ZAH0},
2246 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2247 {{16, AArch64::ZAH1},
2248 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2249 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2250 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2251 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2252 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2253 };
2254
2255 if (ElementWidth == 64)
2256 OutRegs.insert(Reg);
2257 else {
2258 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2259 assert(!Regs.empty() && "Invalid tile or element width!");
2260 for (auto OutReg : Regs)
2261 OutRegs.insert(OutReg);
2262 }
2263 }
2264
2265 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2266 SMLoc E, MCContext &Ctx) {
2267 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2268 Op->Imm.Val = Val;
2269 Op->StartLoc = S;
2270 Op->EndLoc = E;
2271 return Op;
2272 }
2273
2274 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2275 unsigned ShiftAmount,
2276 SMLoc S, SMLoc E,
2277 MCContext &Ctx) {
2278 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2279 Op->ShiftedImm .Val = Val;
2280 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2281 Op->StartLoc = S;
2282 Op->EndLoc = E;
2283 return Op;
2284 }
2285
2286 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2287 unsigned Last, SMLoc S,
2288 SMLoc E,
2289 MCContext &Ctx) {
2290 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2291 Op->ImmRange.First = First;
2292 Op->ImmRange.Last = Last;
2293 Op->EndLoc = E;
2294 return Op;
2295 }
2296
2297 static std::unique_ptr<AArch64Operand>
2298 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2299 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2300 Op->CondCode.Code = Code;
2301 Op->StartLoc = S;
2302 Op->EndLoc = E;
2303 return Op;
2304 }
2305
2306 static std::unique_ptr<AArch64Operand>
2307 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2308 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2309 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2310 Op->FPImm.IsExact = IsExact;
2311 Op->StartLoc = S;
2312 Op->EndLoc = S;
2313 return Op;
2314 }
2315
2316 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2317 StringRef Str,
2318 SMLoc S,
2319 MCContext &Ctx,
2320 bool HasnXSModifier) {
2321 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2322 Op->Barrier.Val = Val;
2323 Op->Barrier.Data = Str.data();
2324 Op->Barrier.Length = Str.size();
2325 Op->Barrier.HasnXSModifier = HasnXSModifier;
2326 Op->StartLoc = S;
2327 Op->EndLoc = S;
2328 return Op;
2329 }
2330
2331 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2332 uint32_t MRSReg,
2333 uint32_t MSRReg,
2334 uint32_t PStateField,
2335 MCContext &Ctx) {
2336 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2337 Op->SysReg.Data = Str.data();
2338 Op->SysReg.Length = Str.size();
2339 Op->SysReg.MRSReg = MRSReg;
2340 Op->SysReg.MSRReg = MSRReg;
2341 Op->SysReg.PStateField = PStateField;
2342 Op->StartLoc = S;
2343 Op->EndLoc = S;
2344 return Op;
2345 }
2346
2347 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2348 SMLoc E, MCContext &Ctx) {
2349 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2350 Op->SysCRImm.Val = Val;
2351 Op->StartLoc = S;
2352 Op->EndLoc = E;
2353 return Op;
2354 }
2355
2356 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2357 StringRef Str,
2358 SMLoc S,
2359 MCContext &Ctx) {
2360 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2361 Op->Prefetch.Val = Val;
2362 Op->Barrier.Data = Str.data();
2363 Op->Barrier.Length = Str.size();
2364 Op->StartLoc = S;
2365 Op->EndLoc = S;
2366 return Op;
2367 }
2368
2369 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2370 StringRef Str,
2371 SMLoc S,
2372 MCContext &Ctx) {
2373 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2374 Op->PSBHint.Val = Val;
2375 Op->PSBHint.Data = Str.data();
2376 Op->PSBHint.Length = Str.size();
2377 Op->StartLoc = S;
2378 Op->EndLoc = S;
2379 return Op;
2380 }
2381
2382 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2383 StringRef Str,
2384 SMLoc S,
2385 MCContext &Ctx) {
2386 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2387 Op->BTIHint.Val = Val | 32;
2388 Op->BTIHint.Data = Str.data();
2389 Op->BTIHint.Length = Str.size();
2390 Op->StartLoc = S;
2391 Op->EndLoc = S;
2392 return Op;
2393 }
2394
2395 static std::unique_ptr<AArch64Operand>
2396 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2397 SMLoc S, SMLoc E, MCContext &Ctx) {
2398 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2399 Op->MatrixReg.RegNum = RegNum;
2400 Op->MatrixReg.ElementWidth = ElementWidth;
2401 Op->MatrixReg.Kind = Kind;
2402 Op->StartLoc = S;
2403 Op->EndLoc = E;
2404 return Op;
2405 }
2406
2407 static std::unique_ptr<AArch64Operand>
2408 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2409 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2410 Op->SVCR.PStateField = PStateField;
2411 Op->SVCR.Data = Str.data();
2412 Op->SVCR.Length = Str.size();
2413 Op->StartLoc = S;
2414 Op->EndLoc = S;
2415 return Op;
2416 }
2417
2418 static std::unique_ptr<AArch64Operand>
2419 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2420 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2421 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2422 Op->ShiftExtend.Type = ShOp;
2423 Op->ShiftExtend.Amount = Val;
2424 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2425 Op->StartLoc = S;
2426 Op->EndLoc = E;
2427 return Op;
2428 }
2429};
2430
2431} // end anonymous namespace.
2432
2433void AArch64Operand::print(raw_ostream &OS) const {
2434 switch (Kind) {
2435 case k_FPImm:
2436 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2437 if (!getFPImmIsExact())
2438 OS << " (inexact)";
2439 OS << ">";
2440 break;
2441 case k_Barrier: {
2442 StringRef Name = getBarrierName();
2443 if (!Name.empty())
2444 OS << "<barrier " << Name << ">";
2445 else
2446 OS << "<barrier invalid #" << getBarrier() << ">";
2447 break;
2448 }
2449 case k_Immediate:
2450 OS << *getImm();
2451 break;
2452 case k_ShiftedImm: {
2453 unsigned Shift = getShiftedImmShift();
2454 OS << "<shiftedimm ";
2455 OS << *getShiftedImmVal();
2456 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2457 break;
2458 }
2459 case k_ImmRange: {
2460 OS << "<immrange ";
2461 OS << getFirstImmVal();
2462 OS << ":" << getLastImmVal() << ">";
2463 break;
2464 }
2465 case k_CondCode:
2466 OS << "<condcode " << getCondCode() << ">";
2467 break;
2468 case k_VectorList: {
2469 OS << "<vectorlist ";
2470 unsigned Reg = getVectorListStart();
2471 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2472 OS << Reg + i * getVectorListStride() << " ";
2473 OS << ">";
2474 break;
2475 }
2476 case k_VectorIndex:
2477 OS << "<vectorindex " << getVectorIndex() << ">";
2478 break;
2479 case k_SysReg:
2480 OS << "<sysreg: " << getSysReg() << '>';
2481 break;
2482 case k_Token:
2483 OS << "'" << getToken() << "'";
2484 break;
2485 case k_SysCR:
2486 OS << "c" << getSysCR();
2487 break;
2488 case k_Prefetch: {
2489 StringRef Name = getPrefetchName();
2490 if (!Name.empty())
2491 OS << "<prfop " << Name << ">";
2492 else
2493 OS << "<prfop invalid #" << getPrefetch() << ">";
2494 break;
2495 }
2496 case k_PSBHint:
2497 OS << getPSBHintName();
2498 break;
2499 case k_BTIHint:
2500 OS << getBTIHintName();
2501 break;
2502 case k_MatrixRegister:
2503 OS << "<matrix " << getMatrixReg() << ">";
2504 break;
2505 case k_MatrixTileList: {
2506 OS << "<matrixlist ";
2507 unsigned RegMask = getMatrixTileListRegMask();
2508 unsigned MaxBits = 8;
2509 for (unsigned I = MaxBits; I > 0; --I)
2510 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2511 OS << '>';
2512 break;
2513 }
2514 case k_SVCR: {
2515 OS << getSVCR();
2516 break;
2517 }
2518 case k_Register:
2519 OS << "<register " << getReg() << ">";
2520 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2521 break;
2522 [[fallthrough]];
2523 case k_ShiftExtend:
2524 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2525 << getShiftExtendAmount();
2526 if (!hasShiftExtendAmount())
2527 OS << "<imp>";
2528 OS << '>';
2529 break;
2530 }
2531}
2532
2533/// @name Auto-generated Match Functions
2534/// {
2535
2537
2538/// }
2539
2541 return StringSwitch<unsigned>(Name.lower())
2542 .Case("v0", AArch64::Q0)
2543 .Case("v1", AArch64::Q1)
2544 .Case("v2", AArch64::Q2)
2545 .Case("v3", AArch64::Q3)
2546 .Case("v4", AArch64::Q4)
2547 .Case("v5", AArch64::Q5)
2548 .Case("v6", AArch64::Q6)
2549 .Case("v7", AArch64::Q7)
2550 .Case("v8", AArch64::Q8)
2551 .Case("v9", AArch64::Q9)
2552 .Case("v10", AArch64::Q10)
2553 .Case("v11", AArch64::Q11)
2554 .Case("v12", AArch64::Q12)
2555 .Case("v13", AArch64::Q13)
2556 .Case("v14", AArch64::Q14)
2557 .Case("v15", AArch64::Q15)
2558 .Case("v16", AArch64::Q16)
2559 .Case("v17", AArch64::Q17)
2560 .Case("v18", AArch64::Q18)
2561 .Case("v19", AArch64::Q19)
2562 .Case("v20", AArch64::Q20)
2563 .Case("v21", AArch64::Q21)
2564 .Case("v22", AArch64::Q22)
2565 .Case("v23", AArch64::Q23)
2566 .Case("v24", AArch64::Q24)
2567 .Case("v25", AArch64::Q25)
2568 .Case("v26", AArch64::Q26)
2569 .Case("v27", AArch64::Q27)
2570 .Case("v28", AArch64::Q28)
2571 .Case("v29", AArch64::Q29)
2572 .Case("v30", AArch64::Q30)
2573 .Case("v31", AArch64::Q31)
2574 .Default(0);
2575}
2576
2577/// Returns an optional pair of (#elements, element-width) if Suffix
2578/// is a valid vector kind. Where the number of elements in a vector
2579/// or the vector width is implicit or explicitly unknown (but still a
2580/// valid suffix kind), 0 is used.
2581static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2582 RegKind VectorKind) {
2583 std::pair<int, int> Res = {-1, -1};
2584
2585 switch (VectorKind) {
2586 case RegKind::NeonVector:
2587 Res =
2589 .Case("", {0, 0})
2590 .Case(".1d", {1, 64})
2591 .Case(".1q", {1, 128})
2592 // '.2h' needed for fp16 scalar pairwise reductions
2593 .Case(".2h", {2, 16})
2594 .Case(".2s", {2, 32})
2595 .Case(".2d", {2, 64})
2596 // '.4b' is another special case for the ARMv8.2a dot product
2597 // operand
2598 .Case(".4b", {4, 8})
2599 .Case(".4h", {4, 16})
2600 .Case(".4s", {4, 32})
2601 .Case(".8b", {8, 8})
2602 .Case(".8h", {8, 16})
2603 .Case(".16b", {16, 8})
2604 // Accept the width neutral ones, too, for verbose syntax. If those
2605 // aren't used in the right places, the token operand won't match so
2606 // all will work out.
2607 .Case(".b", {0, 8})
2608 .Case(".h", {0, 16})
2609 .Case(".s", {0, 32})
2610 .Case(".d", {0, 64})
2611 .Default({-1, -1});
2612 break;
2613 case RegKind::SVEPredicateAsCounter:
2614 case RegKind::SVEPredicateVector:
2615 case RegKind::SVEDataVector:
2616 case RegKind::Matrix:
2618 .Case("", {0, 0})
2619 .Case(".b", {0, 8})
2620 .Case(".h", {0, 16})
2621 .Case(".s", {0, 32})
2622 .Case(".d", {0, 64})
2623 .Case(".q", {0, 128})
2624 .Default({-1, -1});
2625 break;
2626 default:
2627 llvm_unreachable("Unsupported RegKind");
2628 }
2629
2630 if (Res == std::make_pair(-1, -1))
2631 return std::nullopt;
2632
2633 return std::optional<std::pair<int, int>>(Res);
2634}
2635
2636static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2637 return parseVectorKind(Suffix, VectorKind).has_value();
2638}
2639
2641 return StringSwitch<unsigned>(Name.lower())
2642 .Case("z0", AArch64::Z0)
2643 .Case("z1", AArch64::Z1)
2644 .Case("z2", AArch64::Z2)
2645 .Case("z3", AArch64::Z3)
2646 .Case("z4", AArch64::Z4)
2647 .Case("z5", AArch64::Z5)
2648 .Case("z6", AArch64::Z6)
2649 .Case("z7", AArch64::Z7)
2650 .Case("z8", AArch64::Z8)
2651 .Case("z9", AArch64::Z9)
2652 .Case("z10", AArch64::Z10)
2653 .Case("z11", AArch64::Z11)
2654 .Case("z12", AArch64::Z12)
2655 .Case("z13", AArch64::Z13)
2656 .Case("z14", AArch64::Z14)
2657 .Case("z15", AArch64::Z15)
2658 .Case("z16", AArch64::Z16)
2659 .Case("z17", AArch64::Z17)
2660 .Case("z18", AArch64::Z18)
2661 .Case("z19", AArch64::Z19)
2662 .Case("z20", AArch64::Z20)
2663 .Case("z21", AArch64::Z21)
2664 .Case("z22", AArch64::Z22)
2665 .Case("z23", AArch64::Z23)
2666 .Case("z24", AArch64::Z24)
2667 .Case("z25", AArch64::Z25)
2668 .Case("z26", AArch64::Z26)
2669 .Case("z27", AArch64::Z27)
2670 .Case("z28", AArch64::Z28)
2671 .Case("z29", AArch64::Z29)
2672 .Case("z30", AArch64::Z30)
2673 .Case("z31", AArch64::Z31)
2674 .Default(0);
2675}
2676
2678 return StringSwitch<unsigned>(Name.lower())
2679 .Case("p0", AArch64::P0)
2680 .Case("p1", AArch64::P1)
2681 .Case("p2", AArch64::P2)
2682 .Case("p3", AArch64::P3)
2683 .Case("p4", AArch64::P4)
2684 .Case("p5", AArch64::P5)
2685 .Case("p6", AArch64::P6)
2686 .Case("p7", AArch64::P7)
2687 .Case("p8", AArch64::P8)
2688 .Case("p9", AArch64::P9)
2689 .Case("p10", AArch64::P10)
2690 .Case("p11", AArch64::P11)
2691 .Case("p12", AArch64::P12)
2692 .Case("p13", AArch64::P13)
2693 .Case("p14", AArch64::P14)
2694 .Case("p15", AArch64::P15)
2695 .Default(0);
2696}
2697
2699 return StringSwitch<unsigned>(Name.lower())
2700 .Case("pn0", AArch64::P0)
2701 .Case("pn1", AArch64::P1)
2702 .Case("pn2", AArch64::P2)
2703 .Case("pn3", AArch64::P3)
2704 .Case("pn4", AArch64::P4)
2705 .Case("pn5", AArch64::P5)
2706 .Case("pn6", AArch64::P6)
2707 .Case("pn7", AArch64::P7)
2708 .Case("pn8", AArch64::P8)
2709 .Case("pn9", AArch64::P9)
2710 .Case("pn10", AArch64::P10)
2711 .Case("pn11", AArch64::P11)
2712 .Case("pn12", AArch64::P12)
2713 .Case("pn13", AArch64::P13)
2714 .Case("pn14", AArch64::P14)
2715 .Case("pn15", AArch64::P15)
2716 .Default(0);
2717}
2718
2720 return StringSwitch<unsigned>(Name.lower())
2721 .Case("za0.d", AArch64::ZAD0)
2722 .Case("za1.d", AArch64::ZAD1)
2723 .Case("za2.d", AArch64::ZAD2)
2724 .Case("za3.d", AArch64::ZAD3)
2725 .Case("za4.d", AArch64::ZAD4)
2726 .Case("za5.d", AArch64::ZAD5)
2727 .Case("za6.d", AArch64::ZAD6)
2728 .Case("za7.d", AArch64::ZAD7)
2729 .Case("za0.s", AArch64::ZAS0)
2730 .Case("za1.s", AArch64::ZAS1)
2731 .Case("za2.s", AArch64::ZAS2)
2732 .Case("za3.s", AArch64::ZAS3)
2733 .Case("za0.h", AArch64::ZAH0)
2734 .Case("za1.h", AArch64::ZAH1)
2735 .Case("za0.b", AArch64::ZAB0)
2736 .Default(0);
2737}
2738
2740 return StringSwitch<unsigned>(Name.lower())
2741 .Case("za", AArch64::ZA)
2742 .Case("za0.q", AArch64::ZAQ0)
2743 .Case("za1.q", AArch64::ZAQ1)
2744 .Case("za2.q", AArch64::ZAQ2)
2745 .Case("za3.q", AArch64::ZAQ3)
2746 .Case("za4.q", AArch64::ZAQ4)
2747 .Case("za5.q", AArch64::ZAQ5)
2748 .Case("za6.q", AArch64::ZAQ6)
2749 .Case("za7.q", AArch64::ZAQ7)
2750 .Case("za8.q", AArch64::ZAQ8)
2751 .Case("za9.q", AArch64::ZAQ9)
2752 .Case("za10.q", AArch64::ZAQ10)
2753 .Case("za11.q", AArch64::ZAQ11)
2754 .Case("za12.q", AArch64::ZAQ12)
2755 .Case("za13.q", AArch64::ZAQ13)
2756 .Case("za14.q", AArch64::ZAQ14)
2757 .Case("za15.q", AArch64::ZAQ15)
2758 .Case("za0.d", AArch64::ZAD0)
2759 .Case("za1.d", AArch64::ZAD1)
2760 .Case("za2.d", AArch64::ZAD2)
2761 .Case("za3.d", AArch64::ZAD3)
2762 .Case("za4.d", AArch64::ZAD4)
2763 .Case("za5.d", AArch64::ZAD5)
2764 .Case("za6.d", AArch64::ZAD6)
2765 .Case("za7.d", AArch64::ZAD7)
2766 .Case("za0.s", AArch64::ZAS0)
2767 .Case("za1.s", AArch64::ZAS1)
2768 .Case("za2.s", AArch64::ZAS2)
2769 .Case("za3.s", AArch64::ZAS3)
2770 .Case("za0.h", AArch64::ZAH0)
2771 .Case("za1.h", AArch64::ZAH1)
2772 .Case("za0.b", AArch64::ZAB0)
2773 .Case("za0h.q", AArch64::ZAQ0)
2774 .Case("za1h.q", AArch64::ZAQ1)
2775 .Case("za2h.q", AArch64::ZAQ2)
2776 .Case("za3h.q", AArch64::ZAQ3)
2777 .Case("za4h.q", AArch64::ZAQ4)
2778 .Case("za5h.q", AArch64::ZAQ5)
2779 .Case("za6h.q", AArch64::ZAQ6)
2780 .Case("za7h.q", AArch64::ZAQ7)
2781 .Case("za8h.q", AArch64::ZAQ8)
2782 .Case("za9h.q", AArch64::ZAQ9)
2783 .Case("za10h.q", AArch64::ZAQ10)
2784 .Case("za11h.q", AArch64::ZAQ11)
2785 .Case("za12h.q", AArch64::ZAQ12)
2786 .Case("za13h.q", AArch64::ZAQ13)
2787 .Case("za14h.q", AArch64::ZAQ14)
2788 .Case("za15h.q", AArch64::ZAQ15)
2789 .Case("za0h.d", AArch64::ZAD0)
2790 .Case("za1h.d", AArch64::ZAD1)
2791 .Case("za2h.d", AArch64::ZAD2)
2792 .Case("za3h.d", AArch64::ZAD3)
2793 .Case("za4h.d", AArch64::ZAD4)
2794 .Case("za5h.d", AArch64::ZAD5)
2795 .Case("za6h.d", AArch64::ZAD6)
2796 .Case("za7h.d", AArch64::ZAD7)
2797 .Case("za0h.s", AArch64::ZAS0)
2798 .Case("za1h.s", AArch64::ZAS1)
2799 .Case("za2h.s", AArch64::ZAS2)
2800 .Case("za3h.s", AArch64::ZAS3)
2801 .Case("za0h.h", AArch64::ZAH0)
2802 .Case("za1h.h", AArch64::ZAH1)
2803 .Case("za0h.b", AArch64::ZAB0)
2804 .Case("za0v.q", AArch64::ZAQ0)
2805 .Case("za1v.q", AArch64::ZAQ1)
2806 .Case("za2v.q", AArch64::ZAQ2)
2807 .Case("za3v.q", AArch64::ZAQ3)
2808 .Case("za4v.q", AArch64::ZAQ4)
2809 .Case("za5v.q", AArch64::ZAQ5)
2810 .Case("za6v.q", AArch64::ZAQ6)
2811 .Case("za7v.q", AArch64::ZAQ7)
2812 .Case("za8v.q", AArch64::ZAQ8)
2813 .Case("za9v.q", AArch64::ZAQ9)
2814 .Case("za10v.q", AArch64::ZAQ10)
2815 .Case("za11v.q", AArch64::ZAQ11)
2816 .Case("za12v.q", AArch64::ZAQ12)
2817 .Case("za13v.q", AArch64::ZAQ13)
2818 .Case("za14v.q", AArch64::ZAQ14)
2819 .Case("za15v.q", AArch64::ZAQ15)
2820 .Case("za0v.d", AArch64::ZAD0)
2821 .Case("za1v.d", AArch64::ZAD1)
2822 .Case("za2v.d", AArch64::ZAD2)
2823 .Case("za3v.d", AArch64::ZAD3)
2824 .Case("za4v.d", AArch64::ZAD4)
2825 .Case("za5v.d", AArch64::ZAD5)
2826 .Case("za6v.d", AArch64::ZAD6)
2827 .Case("za7v.d", AArch64::ZAD7)
2828 .Case("za0v.s", AArch64::ZAS0)
2829 .Case("za1v.s", AArch64::ZAS1)
2830 .Case("za2v.s", AArch64::ZAS2)
2831 .Case("za3v.s", AArch64::ZAS3)
2832 .Case("za0v.h", AArch64::ZAH0)
2833 .Case("za1v.h", AArch64::ZAH1)
2834 .Case("za0v.b", AArch64::ZAB0)
2835 .Default(0);
2836}
2837
2838bool AArch64AsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
2839 SMLoc &EndLoc) {
2840 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2841}
2842
2843OperandMatchResultTy AArch64AsmParser::tryParseRegister(MCRegister &RegNo,
2844 SMLoc &StartLoc,
2845 SMLoc &EndLoc) {
2846 StartLoc = getLoc();
2847 auto Res = tryParseScalarRegister(RegNo);
2848 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2849 return Res;
2850}
2851
2852// Matches a register name or register alias previously defined by '.req'
2853unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2854 RegKind Kind) {
2855 unsigned RegNum = 0;
2856 if ((RegNum = matchSVEDataVectorRegName(Name)))
2857 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2858
2859 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2860 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2861
2863 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2864
2865 if ((RegNum = MatchNeonVectorRegName(Name)))
2866 return Kind == RegKind::NeonVector ? RegNum : 0;
2867
2868 if ((RegNum = matchMatrixRegName(Name)))
2869 return Kind == RegKind::Matrix ? RegNum : 0;
2870
2871 if (Name.equals_insensitive("zt0"))
2872 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2873
2874 // The parsed register must be of RegKind Scalar
2875 if ((RegNum = MatchRegisterName(Name)))
2876 return (Kind == RegKind::Scalar) ? RegNum : 0;
2877
2878 if (!RegNum) {
2879 // Handle a few common aliases of registers.
2880 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2881 .Case("fp", AArch64::FP)
2882 .Case("lr", AArch64::LR)
2883 .Case("x31", AArch64::XZR)
2884 .Case("w31", AArch64::WZR)
2885 .Default(0))
2886 return Kind == RegKind::Scalar ? RegNum : 0;
2887
2888 // Check for aliases registered via .req. Canonicalize to lower case.
2889 // That's more consistent since register names are case insensitive, and
2890 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2891 auto Entry = RegisterReqs.find(Name.lower());
2892 if (Entry == RegisterReqs.end())
2893 return 0;
2894
2895 // set RegNum if the match is the right kind of register
2896 if (Kind == Entry->getValue().first)
2897 RegNum = Entry->getValue().second;
2898 }
2899 return RegNum;
2900}
2901
2902unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2903 switch (K) {
2904 case RegKind::Scalar:
2905 case RegKind::NeonVector:
2906 case RegKind::SVEDataVector:
2907 return 32;
2908 case RegKind::Matrix:
2909 case RegKind::SVEPredicateVector:
2910 case RegKind::SVEPredicateAsCounter:
2911 return 16;
2912 case RegKind::LookupTable:
2913 return 1;
2914 }
2915 llvm_unreachable("Unsupported RegKind");
2916}
2917
2918/// tryParseScalarRegister - Try to parse a register name. The token must be an
2919/// Identifier when called, and if it is a register name the token is eaten and
2920/// the register is added to the operand list.
2922AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2923 const AsmToken &Tok = getTok();
2924 if (Tok.isNot(AsmToken::Identifier))
2925 return MatchOperand_NoMatch;
2926
2927 std::string lowerCase = Tok.getString().lower();
2928 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2929 if (Reg == 0)
2930 return MatchOperand_NoMatch;
2931
2932 RegNum = Reg;
2933 Lex(); // Eat identifier token.
2934 return MatchOperand_Success;
2935}
2936
2937/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2939AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2940 SMLoc S = getLoc();
2941
2942 if (getTok().isNot(AsmToken::Identifier)) {
2943 Error(S, "Expected cN operand where 0 <= N <= 15");
2945 }
2946
2947 StringRef Tok = getTok().getIdentifier();
2948 if (Tok[0] != 'c' && Tok[0] != 'C') {
2949 Error(S, "Expected cN operand where 0 <= N <= 15");
2951 }
2952
2953 uint32_t CRNum;
2954 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2955 if (BadNum || CRNum > 15) {
2956 Error(S, "Expected cN operand where 0 <= N <= 15");
2958 }
2959
2960 Lex(); // Eat identifier token.
2961 Operands.push_back(
2962 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2963 return MatchOperand_Success;
2964}
2965
2966// Either an identifier for named values or a 6-bit immediate.
2968AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
2969 SMLoc S = getLoc();
2970 const AsmToken &Tok = getTok();
2971
2972 unsigned MaxVal = 63;
2973
2974 // Immediate case, with optional leading hash:
2975 if (parseOptionalToken(AsmToken::Hash) ||
2976 Tok.is(AsmToken::Integer)) {
2977 const MCExpr *ImmVal;
2978 if (getParser().parseExpression(ImmVal))
2980
2981 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2982 if (!MCE) {
2983 TokError("immediate value expected for prefetch operand");
2985 }
2986 unsigned prfop = MCE->getValue();
2987 if (prfop > MaxVal) {
2988 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2989 "] expected");
2991 }
2992
2993 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
2994 Operands.push_back(AArch64Operand::CreatePrefetch(
2995 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
2996 return MatchOperand_Success;
2997 }
2998
2999 if (Tok.isNot(AsmToken::Identifier)) {
3000 TokError("prefetch hint expected");
3002 }
3003
3004 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3005 if (!RPRFM) {
3006 TokError("prefetch hint expected");
3008 }
3009
3010 Operands.push_back(AArch64Operand::CreatePrefetch(
3011 RPRFM->Encoding, Tok.getString(), S, getContext()));
3012 Lex(); // Eat identifier token.
3013 return MatchOperand_Success;
3014}
3015
3016/// tryParsePrefetch - Try to parse a prefetch operand.
3017template <bool IsSVEPrefetch>
3019AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3020 SMLoc S = getLoc();
3021 const AsmToken &Tok = getTok();
3022
3023 auto LookupByName = [](StringRef N) {
3024 if (IsSVEPrefetch) {
3025 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3026 return std::optional<unsigned>(Res->Encoding);
3027 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3028 return std::optional<unsigned>(Res->Encoding);
3029 return std::optional<unsigned>();
3030 };
3031
3032 auto LookupByEncoding = [](unsigned E) {
3033 if (IsSVEPrefetch) {
3034 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3035 return std::optional<StringRef>(Res->Name);
3036 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3037 return std::optional<StringRef>(Res->Name);
3038 return std::optional<StringRef>();
3039 };
3040 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3041
3042 // Either an identifier for named values or a 5-bit immediate.
3043 // Eat optional hash.
3044 if (parseOptionalToken(AsmToken::Hash) ||
3045 Tok.is(AsmToken::Integer)) {
3046 const MCExpr *ImmVal;
3047 if (getParser().parseExpression(ImmVal))
3049
3050 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3051 if (!MCE) {
3052 TokError("immediate value expected for prefetch operand");
3054 }
3055 unsigned prfop = MCE->getValue();
3056 if (prfop > MaxVal) {
3057 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3058 "] expected");
3060 }
3061
3062 auto PRFM = LookupByEncoding(MCE->getValue());
3063 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3064 S, getContext()));
3065 return MatchOperand_Success;
3066 }
3067
3068 if (Tok.isNot(AsmToken::Identifier)) {
3069 TokError("prefetch hint expected");
3071 }
3072
3073 auto PRFM = LookupByName(Tok.getString());
3074 if (!PRFM) {
3075 TokError("prefetch hint expected");
3077 }
3078
3079 Operands.push_back(AArch64Operand::CreatePrefetch(
3080 *PRFM, Tok.getString(), S, getContext()));
3081 Lex(); // Eat identifier token.
3082 return MatchOperand_Success;
3083}
3084
3085/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3087AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3088 SMLoc S = getLoc();
3089 const AsmToken &Tok = getTok();
3090 if (Tok.isNot(AsmToken::Identifier)) {
3091 TokError("invalid operand for instruction");
3093 }
3094
3095 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3096 if (!PSB) {
3097 TokError("invalid operand for instruction");
3099 }
3100
3101 Operands.push_back(AArch64Operand::CreatePSBHint(
3102 PSB->Encoding, Tok.getString(), S, getContext()));
3103 Lex(); // Eat identifier token.
3104 return MatchOperand_Success;
3105}
3106
3108AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3109 SMLoc StartLoc = getLoc();
3110
3111 MCRegister RegNum;
3112
3113 // The case where xzr, xzr is not present is handled by an InstAlias.
3114
3115 auto RegTok = getTok(); // in case we need to backtrack
3116 if (tryParseScalarRegister(RegNum) != MatchOperand_Success)
3117 return MatchOperand_NoMatch;
3118
3119 if (RegNum != AArch64::XZR) {
3120 getLexer().UnLex(RegTok);
3121 return MatchOperand_NoMatch;
3122 }
3123
3124 if (parseComma())
3126
3127 if (tryParseScalarRegister(RegNum) != MatchOperand_Success) {
3128 TokError("expected register operand");
3130 }
3131
3132 if (RegNum != AArch64::XZR) {
3133 TokError("xzr must be followed by xzr");
3135 }
3136
3137 // We need to push something, since we claim this is an operand in .td.
3138 // See also AArch64AsmParser::parseKeywordOperand.
3139 Operands.push_back(AArch64Operand::CreateReg(
3140 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3141
3142 return MatchOperand_Success;
3143}
3144
3145/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3147AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3148 SMLoc S = getLoc();
3149 const AsmToken &Tok = getTok();
3150 if (Tok.isNot(AsmToken::Identifier)) {
3151 TokError("invalid operand for instruction");
3153 }
3154
3155 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3156 if (!BTI) {
3157 TokError("invalid operand for instruction");
3159 }
3160
3161 Operands.push_back(AArch64Operand::CreateBTIHint(
3162 BTI->Encoding, Tok.getString(), S, getContext()));
3163 Lex(); // Eat identifier token.
3164 return MatchOperand_Success;
3165}
3166
3167/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3168/// instruction.
3170AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3171 SMLoc S = getLoc();
3172 const MCExpr *Expr = nullptr;
3173
3174 if (getTok().is(AsmToken::Hash)) {
3175 Lex(); // Eat hash token.
3176 }
3177
3178 if (parseSymbolicImmVal(Expr))
3180
3181 AArch64MCExpr::VariantKind ELFRefKind;
3182 MCSymbolRefExpr::VariantKind DarwinRefKind;
3183 int64_t Addend;
3184 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3185 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3186 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3187 // No modifier was specified at all; this is the syntax for an ELF basic
3188 // ADRP relocation (unfortunately).
3189 Expr =
3191 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3192 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3193 Addend != 0) {
3194 Error(S, "gotpage label reference not allowed an addend");
3196 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3197 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3198 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3199 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3200 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3201 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3202 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3203 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3204 // The operand must be an @page or @gotpage qualified symbolref.
3205 Error(S, "page or gotpage label reference expected");
3207 }
3208 }
3209
3210 // We have either a label reference possibly with addend or an immediate. The
3211 // addend is a raw value here. The linker will adjust it to only reference the
3212 // page.
3213 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3214 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3215
3216 return MatchOperand_Success;
3217}
3218
3219/// tryParseAdrLabel - Parse and validate a source label for the ADR
3220/// instruction.
3222AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3223 SMLoc S = getLoc();
3224 const MCExpr *Expr = nullptr;
3225
3226 // Leave anything with a bracket to the default for SVE
3227 if (getTok().is(AsmToken::LBrac))
3228 return MatchOperand_NoMatch;
3229
3230 if (getTok().is(AsmToken::Hash))
3231 Lex(); // Eat hash token.
3232
3233 if (parseSymbolicImmVal(Expr))
3235
3236 AArch64MCExpr::VariantKind ELFRefKind;
3237 MCSymbolRefExpr::VariantKind DarwinRefKind;
3238 int64_t Addend;
3239 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3240 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3241 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3242 // No modifier was specified at all; this is the syntax for an ELF basic
3243 // ADR relocation (unfortunately).
3244 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3245 } else {
3246 Error(S, "unexpected adr label");
3248 }
3249 }
3250
3251 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3252 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3253 return MatchOperand_Success;
3254}
3255
3256/// tryParseFPImm - A floating point immediate expression operand.
3257template<bool AddFPZeroAsLiteral>
3259AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3260 SMLoc S = getLoc();
3261
3262 bool Hash = parseOptionalToken(AsmToken::Hash);
3263
3264 // Handle negation, as that still comes through as a separate token.
3265 bool isNegative = parseOptionalToken(AsmToken::Minus);
3266
3267 const AsmToken &Tok = getTok();
3268 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3269 if (!Hash)
3270 return MatchOperand_NoMatch;
3271 TokError("invalid floating point immediate");
3273 }
3274
3275 // Parse hexadecimal representation.
3276 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3277 if (Tok.getIntVal() > 255 || isNegative) {
3278 TokError("encoded floating point value out of range");
3280 }
3281
3283 Operands.push_back(
3284 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3285 } else {
3286 // Parse FP representation.
3287 APFloat RealVal(APFloat::IEEEdouble());
3288 auto StatusOrErr =
3289 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3290 if (errorToBool(StatusOrErr.takeError())) {
3291 TokError("invalid floating point representation");
3293 }
3294
3295 if (isNegative)
3296 RealVal.changeSign();
3297
3298 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3299 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3300 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3301 } else
3302 Operands.push_back(AArch64Operand::CreateFPImm(
3303 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3304 }
3305
3306 Lex(); // Eat the token.
3307
3308 return MatchOperand_Success;
3309}
3310
3311/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3312/// a shift suffix, for example '#1, lsl #12'.
3314AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3315 SMLoc S = getLoc();
3316
3317 if (getTok().is(AsmToken::Hash))
3318 Lex(); // Eat '#'
3319 else if (getTok().isNot(AsmToken::Integer))
3320 // Operand should start from # or should be integer, emit error otherwise.
3321 return MatchOperand_NoMatch;
3322
3323 if (getTok().is(AsmToken::Integer) &&
3324 getLexer().peekTok().is(AsmToken::Colon))
3325 return tryParseImmRange(Operands);
3326
3327 const MCExpr *Imm = nullptr;
3328 if (parseSymbolicImmVal(Imm))
3330 else if (getTok().isNot(AsmToken::Comma)) {
3331 Operands.push_back(
3332 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3333 return MatchOperand_Success;
3334 }
3335
3336 // Eat ','
3337 Lex();
3338 StringRef VecGroup;
3339 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3340 Operands.push_back(
3341 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3342 Operands.push_back(
3343 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3344 return MatchOperand_Success;
3345 }
3346
3347 // The optional operand must be "lsl #N" where N is non-negative.
3348 if (!getTok().is(AsmToken::Identifier) ||
3349 !getTok().getIdentifier().equals_insensitive("lsl")) {
3350 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3352 }
3353
3354 // Eat 'lsl'
3355 Lex();
3356
3357 parseOptionalToken(AsmToken::Hash);
3358
3359 if (getTok().isNot(AsmToken::Integer)) {
3360 Error(getLoc(), "only 'lsl #+N' valid after immediate");
3362 }
3363
3364 int64_t ShiftAmount = getTok().getIntVal();
3365
3366 if (ShiftAmount < 0) {
3367 Error(getLoc(), "positive shift amount required");
3369 }
3370 Lex(); // Eat the number
3371
3372 // Just in case the optional lsl #0 is used for immediates other than zero.
3373 if (ShiftAmount == 0 && Imm != nullptr) {
3374 Operands.push_back(
3375 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3376 return MatchOperand_Success;
3377 }
3378
3379 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3380 getLoc(), getContext()));
3381 return MatchOperand_Success;
3382}
3383
3384/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3385/// suggestion to help common typos.
3387AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3389 .Case("eq", AArch64CC::EQ)
3390 .Case("ne", AArch64CC::NE)
3391 .Case("cs", AArch64CC::HS)
3392 .Case("hs", AArch64CC::HS)
3393 .Case("cc", AArch64CC::LO)
3394 .Case("lo", AArch64CC::LO)
3395 .Case("mi", AArch64CC::MI)
3396 .Case("pl", AArch64CC::PL)
3397 .Case("vs", AArch64CC::VS)
3398 .Case("vc", AArch64CC::VC)
3399 .Case("hi", AArch64CC::HI)
3400 .Case("ls", AArch64CC::LS)
3401 .Case("ge", AArch64CC::GE)
3402 .Case("lt", AArch64CC::LT)
3403 .Case("gt", AArch64CC::GT)
3404 .Case("le", AArch64CC::LE)
3405 .Case("al", AArch64CC::AL)
3406 .Case("nv", AArch64CC::NV)
3408
3409 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3411 .Case("none", AArch64CC::EQ)
3412 .Case("any", AArch64CC::NE)
3413 .Case("nlast", AArch64CC::HS)
3414 .Case("last", AArch64CC::LO)
3415 .Case("first", AArch64CC::MI)
3416 .Case("nfrst", AArch64CC::PL)
3417 .Case("pmore", AArch64CC::HI)
3418 .Case("plast", AArch64CC::LS)
3419 .Case("tcont", AArch64CC::GE)
3420 .Case("tstop", AArch64CC::LT)
3422
3423 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3424 Suggestion = "nfrst";
3425 }
3426 return CC;
3427}
3428
3429/// parseCondCode - Parse a Condition Code operand.
3430bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3431 bool invertCondCode) {
3432 SMLoc S = getLoc();
3433 const AsmToken &Tok = getTok();
3434 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3435
3436 StringRef Cond = Tok.getString();
3437 std::string Suggestion;
3438 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3439 if (CC == AArch64CC::Invalid) {
3440 std::string Msg = "invalid condition code";
3441 if (!Suggestion.empty())
3442 Msg += ", did you mean " + Suggestion + "?";
3443 return TokError(Msg);
3444 }
3445 Lex(); // Eat identifier token.
3446
3447 if (invertCondCode) {
3448 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3449 return TokError("condition codes AL and NV are invalid for this instruction");
3451 }
3452
3453 Operands.push_back(
3454 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3455 return false;
3456}
3457
3459AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3460 const AsmToken &Tok = getTok();
3461 SMLoc S = getLoc();
3462
3463 if (Tok.isNot(AsmToken::Identifier)) {
3464 TokError("invalid operand for instruction");
3466 }
3467
3468 unsigned PStateImm = -1;
3469 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3470 if (!SVCR)
3471 return MatchOperand_NoMatch;
3472 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3473 PStateImm = SVCR->Encoding;
3474
3475 Operands.push_back(
3476 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3477 Lex(); // Eat identifier token.
3478 return MatchOperand_Success;
3479}
3480
3482AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3483 const AsmToken &Tok = getTok();
3484 SMLoc S = getLoc();
3485
3486 StringRef Name = Tok.getString();
3487
3488 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3489 Lex(); // eat "za[.(b|h|s|d)]"
3490 unsigned ElementWidth = 0;
3491 auto DotPosition = Name.find('.');
3492 if (DotPosition != StringRef::npos) {
3493 const auto &KindRes =
3494 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3495 if (!KindRes) {
3496 TokError(
3497 "Expected the register to be followed by element width suffix");
3499 }
3500 ElementWidth = KindRes->second;
3501 }
3502 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3503 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3504 getContext()));
3505 if (getLexer().is(AsmToken::LBrac)) {
3506 // There's no comma after matrix operand, so we can parse the next operand
3507 // immediately.
3508 if (parseOperand(Operands, false, false))
3509 return MatchOperand_NoMatch;
3510 }
3511 return MatchOperand_Success;
3512 }
3513
3514 // Try to parse matrix register.
3515 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3516 if (!Reg)
3517 return MatchOperand_NoMatch;
3518
3519 size_t DotPosition = Name.find('.');
3520 assert(DotPosition != StringRef::npos && "Unexpected register");
3521
3522 StringRef Head = Name.take_front(DotPosition);
3523 StringRef Tail = Name.drop_front(DotPosition);
3524 StringRef RowOrColumn = Head.take_back();
3525
3526 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3527 .Case("h", MatrixKind::Row)
3528 .Case("v", MatrixKind::Col)
3529 .Default(MatrixKind::Tile);
3530
3531 // Next up, parsing the suffix
3532 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3533 if (!KindRes) {
3534 TokError("Expected the register to be followed by element width suffix");
3536 }
3537 unsigned ElementWidth = KindRes->second;
3538
3539 Lex();
3540
3541 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3542 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3543
3544 if (getLexer().is(AsmToken::LBrac)) {
3545 // There's no comma after matrix operand, so we can parse the next operand
3546 // immediately.
3547 if (parseOperand(Operands, false, false))
3548 return MatchOperand_NoMatch;
3549 }
3550 return MatchOperand_Success;
3551}
3552
3553/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3554/// them if present.
3556AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3557 const AsmToken &Tok = getTok();
3558 std::string LowerID = Tok.getString().lower();
3561 .Case("lsl", AArch64_AM::LSL)
3562 .Case("lsr", AArch64_AM::LSR)
3563 .Case("asr", AArch64_AM::ASR)
3564 .Case("ror", AArch64_AM::ROR)
3565 .Case("msl", AArch64_AM::MSL)
3566 .Case("uxtb", AArch64_AM::UXTB)
3567 .Case("uxth", AArch64_AM::UXTH)
3568 .Case("uxtw", AArch64_AM::UXTW)
3569 .Case("uxtx", AArch64_AM::UXTX)
3570 .Case("sxtb", AArch64_AM::SXTB)
3571 .Case("sxth", AArch64_AM::SXTH)
3572 .Case("sxtw", AArch64_AM::SXTW)
3573 .Case("sxtx", AArch64_AM::SXTX)
3575
3577 return MatchOperand_NoMatch;
3578
3579 SMLoc S = Tok.getLoc();
3580 Lex();
3581
3582 bool Hash = parseOptionalToken(AsmToken::Hash);
3583
3584 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3585 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3586 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3587 ShOp == AArch64_AM::MSL) {
3588 // We expect a number here.
3589 TokError("expected #imm after shift specifier");
3591 }
3592
3593 // "extend" type operations don't need an immediate, #0 is implicit.
3594 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3595 Operands.push_back(
3596 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3597 return MatchOperand_Success;
3598 }
3599
3600 // Make sure we do actually have a number, identifier or a parenthesized
3601 // expression.
3602 SMLoc E = getLoc();
3603 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3604 !getTok().is(AsmToken::Identifier)) {
3605 Error(E, "expected integer shift amount");
3607 }
3608
3609 const MCExpr *ImmVal;
3610 if (getParser().parseExpression(ImmVal))
3612
3613 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3614 if (!MCE) {
3615 Error(E, "expected constant '#imm' after shift specifier");
3617 }
3618
3619 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3620 Operands.push_back(AArch64Operand::CreateShiftExtend(
3621 ShOp, MCE->getValue(), true, S, E, getContext()));
3622 return MatchOperand_Success;
3623}
3624
3625static const struct Extension {
3626 const char *Name;
3628} ExtensionMap[] = {
3629 {"crc", {AArch64::FeatureCRC}},
3630 {"sm4", {AArch64::FeatureSM4}},
3631 {"sha3", {AArch64::FeatureSHA3}},
3632 {"sha2", {AArch64::FeatureSHA2}},
3633 {"aes", {AArch64::FeatureAES}},
3634 {"crypto", {AArch64::FeatureCrypto}},
3635 {"fp", {AArch64::FeatureFPARMv8}},
3636 {"simd", {AArch64::FeatureNEON}},
3637 {"ras", {AArch64::FeatureRAS}},
3638 {"rasv2", {AArch64::FeatureRASv2}},
3639 {"lse", {AArch64::FeatureLSE}},
3640 {"predres", {AArch64::FeaturePredRes}},
3641 {"predres2", {AArch64::FeatureSPECRES2}},
3642 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3643 {"mte", {AArch64::FeatureMTE}},
3644 {"memtag", {AArch64::FeatureMTE}},
3645 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3646 {"pan", {AArch64::FeaturePAN}},
3647 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3648 {"ccpp", {AArch64::FeatureCCPP}},
3649 {"rcpc", {AArch64::FeatureRCPC}},
3650 {"rng", {AArch64::FeatureRandGen}},
3651 {"sve", {AArch64::FeatureSVE}},
3652 {"sve2", {AArch64::FeatureSVE2}},
3653 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3654 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3655 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3656 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3657 {"sve2p1", {AArch64::FeatureSVE2p1}},
3658 {"b16b16", {AArch64::FeatureB16B16}},
3659 {"ls64", {AArch64::FeatureLS64}},
3660 {"xs", {AArch64::FeatureXS}},
3661 {"pauth", {AArch64::FeaturePAuth}},
3662 {"flagm", {AArch64::FeatureFlagM}},
3663 {"rme", {AArch64::FeatureRME}},
3664 {"sme", {AArch64::FeatureSME}},
3665 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3666 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3667 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3668 {"sme2", {AArch64::FeatureSME2}},
3669 {"sme2p1", {AArch64::FeatureSME2p1}},
3670 {"hbc", {AArch64::FeatureHBC}},
3671 {"mops", {AArch64::FeatureMOPS}},
3672 {"mec", {AArch64::FeatureMEC}},
3673 {"the", {AArch64::FeatureTHE}},
3674 {"d128", {AArch64::FeatureD128}},
3675 {"lse128", {AArch64::FeatureLSE128}},
3676 {"ite", {AArch64::FeatureITE}},
3677 {"cssc", {AArch64::FeatureCSSC}},
3678 {"rcpc3", {AArch64::FeatureRCPC3}},
3679 {"gcs", {AArch64::FeatureGCS}},
3680 {"bf16", {AArch64::FeatureBF16}},
3681 {"compnum", {AArch64::FeatureComplxNum}},
3682 {"dotprod", {AArch64::FeatureDotProd}},
3683 {"f32mm", {AArch64::FeatureMatMulFP32}},
3684 {"f64mm", {AArch64::FeatureMatMulFP64}},
3685 {"fp16", {AArch64::FeatureFullFP16}},
3686 {"fp16fml", {AArch64::FeatureFP16FML}},
3687 {"i8mm", {AArch64::FeatureMatMulInt8}},
3688 {"lor", {AArch64::FeatureLOR}},
3689 {"profile", {AArch64::FeatureSPE}},
3690 // "rdma" is the name documented by binutils for the feature, but
3691 // binutils also accepts incomplete prefixes of features, so "rdm"
3692 // works too. Support both spellings here.
3693 {"rdm", {AArch64::FeatureRDM}},
3694 {"rdma", {AArch64::FeatureRDM}},
3695 {"sb", {AArch64::FeatureSB}},
3696 {"ssbs", {AArch64::FeatureSSBS}},
3697 {"tme", {AArch64::FeatureTME}},
3699
3700static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3701 if (FBS[AArch64::HasV8_0aOps])
3702 Str += "ARMv8a";
3703 if (FBS[AArch64::HasV8_1aOps])
3704 Str += "ARMv8.1a";
3705 else if (FBS[AArch64::HasV8_2aOps])
3706 Str += "ARMv8.2a";
3707 else if (FBS[AArch64::HasV8_3aOps])
3708 Str += "ARMv8.3a";
3709 else if (FBS[AArch64::HasV8_4aOps])
3710 Str += "ARMv8.4a";
3711 else if (FBS[AArch64::HasV8_5aOps])
3712 Str += "ARMv8.5a";
3713 else if (FBS[AArch64::HasV8_6aOps])
3714 Str += "ARMv8.6a";
3715 else if (FBS[AArch64::HasV8_7aOps])
3716 Str += "ARMv8.7a";
3717 else if (FBS[AArch64::HasV8_8aOps])
3718 Str += "ARMv8.8a";
3719 else if (FBS[AArch64::HasV8_9aOps])
3720 Str += "ARMv8.9a";
3721 else if (FBS[AArch64::HasV9_0aOps])
3722 Str += "ARMv9-a";
3723 else if (FBS[AArch64::HasV9_1aOps])
3724 Str += "ARMv9.1a";
3725 else if (FBS[AArch64::HasV9_2aOps])
3726 Str += "ARMv9.2a";
3727 else if (FBS[AArch64::HasV9_3aOps])
3728 Str += "ARMv9.3a";
3729 else if (FBS[AArch64::HasV9_4aOps])
3730 Str += "ARMv9.4a";
3731 else if (FBS[AArch64::HasV8_0rOps])
3732 Str += "ARMv8r";
3733 else {
3734 SmallVector<std::string, 2> ExtMatches;
3735 for (const auto& Ext : ExtensionMap) {
3736 // Use & in case multiple features are enabled
3737 if ((FBS & Ext.Features) != FeatureBitset())
3738 ExtMatches.push_back(Ext.Name);
3739 }
3740 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3741 }
3742}
3743
3744void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3745 SMLoc S) {
3746 const uint16_t Op2 = Encoding & 7;
3747 const uint16_t Cm = (Encoding & 0x78) >> 3;
3748 const uint16_t Cn = (Encoding & 0x780) >> 7;
3749 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3750
3751 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3752
3753 Operands.push_back(
3754 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3755 Operands.push_back(
3756 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3757 Operands.push_back(
3758 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3759 Expr = MCConstantExpr::create(Op2, getContext());
3760 Operands.push_back(
3761 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3762}
3763
3764/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3765/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3766bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3768 if (Name.contains('.'))
3769 return TokError("invalid operand");
3770
3771 Mnemonic = Name;
3772 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3773
3774 const AsmToken &Tok = getTok();
3775 StringRef Op = Tok.getString();
3776 SMLoc S = Tok.getLoc();
3777
3778 if (Mnemonic == "ic") {
3779 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3780 if (!IC)
3781 return TokError("invalid operand for IC instruction");
3782 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3783 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3785 return TokError(Str);
3786 }
3787 createSysAlias(IC->Encoding, Operands, S);
3788 } else if (Mnemonic == "dc") {
3789 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3790 if (!DC)
3791 return TokError("invalid operand for DC instruction");
3792 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3793 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3795 return TokError(Str);
3796 }
3797 createSysAlias(DC->Encoding, Operands, S);
3798 } else if (Mnemonic == "at") {
3799 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3800 if (!AT)
3801 return TokError("invalid operand for AT instruction");
3802 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3803 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3805 return TokError(Str);
3806 }
3807 createSysAlias(AT->Encoding, Operands, S);
3808 } else if (Mnemonic == "tlbi") {
3809 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3810 if (!TLBI)
3811 return TokError("invalid operand for TLBI instruction");
3812 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3813 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3815 return TokError(Str);
3816 }
3817 createSysAlias(TLBI->Encoding, Operands, S);
3818 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3819
3820 if (Op.lower() != "rctx")
3821 return TokError("invalid operand for prediction restriction instruction");
3822
3823 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3824 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3825 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3826
3827 if (Mnemonic == "cosp" && !hasSpecres2)
3828 return TokError("COSP requires: predres2");
3829 if (!hasPredres)
3830 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3831
3832 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3833 : Mnemonic == "dvp" ? 0b101
3834 : Mnemonic == "cosp" ? 0b110
3835 : Mnemonic == "cpp" ? 0b111
3836 : 0;
3837 assert(PRCTX_Op2 &&
3838 "Invalid mnemonic for prediction restriction instruction");
3839 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3840 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3841
3842 createSysAlias(Encoding, Operands, S);
3843 }
3844
3845 Lex(); // Eat operand.
3846
3847 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3848 bool HasRegister = false;
3849
3850 // Check for the optional register operand.
3851 if (parseOptionalToken(AsmToken::Comma)) {
3852 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3853 return TokError("expected register operand");
3854 HasRegister = true;
3855 }
3856
3857 if (ExpectRegister && !HasRegister)
3858 return TokError("specified " + Mnemonic + " op requires a register");
3859 else if (!ExpectRegister && HasRegister)
3860 return TokError("specified " + Mnemonic + " op does not use a register");
3861
3862 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3863 return true;
3864
3865 return false;
3866}
3867
3868/// parseSyspAlias - The TLBIP instructions are simple aliases for
3869/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3870bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3872 if (Name.contains('.'))
3873 return TokError("invalid operand");
3874
3875 Mnemonic = Name;
3876 Operands.push_back(
3877 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3878
3879 const AsmToken &Tok = getTok();
3880 StringRef Op = Tok.getString();
3881 SMLoc S = Tok.getLoc();
3882
3883 if (Mnemonic == "tlbip") {
3884 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3885 if (HasnXSQualifier) {
3886 Op = Op.drop_back(3);
3887 }
3888 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3889 if (!TLBIorig)
3890 return TokError("invalid operand for TLBIP instruction");
3891 const AArch64TLBI::TLBI TLBI(
3892 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3893 TLBIorig->NeedsReg,
3894 HasnXSQualifier
3895 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3896 : TLBIorig->FeaturesRequired);
3897 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3898 std::string Name =
3899 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3900 std::string Str("TLBIP " + Name + " requires: ");
3902 return TokError(Str);
3903 }
3904 createSysAlias(TLBI.Encoding, Operands, S);
3905 }
3906
3907 Lex(); // Eat operand.
3908
3909 if (parseComma())
3910 return true;
3911
3912 if (Tok.isNot(AsmToken::Identifier))
3913 return TokError("expected register identifier");
3914 auto Result = tryParseSyspXzrPair(Operands);
3915 if (Result == MatchOperand_NoMatch)
3916 Result = tryParseGPRSeqPair(Operands);
3917 if (Result != MatchOperand_Success)
3918 return TokError("specified " + Mnemonic +
3919 " op requires a pair of registers");
3920
3921 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3922 return true;
3923
3924 return false;
3925}
3926
3928AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3929 MCAsmParser &Parser = getParser();
3930 const AsmToken &Tok = getTok();
3931
3932 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3933 TokError("'csync' operand expected");
3935 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3936 // Immediate operand.
3937 const MCExpr *ImmVal;
3938 SMLoc ExprLoc = getLoc();
3939 AsmToken IntTok = Tok;
3940 if (getParser().parseExpression(ImmVal))
3942 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3943 if (!MCE) {
3944 Error(ExprLoc, "immediate value expected for barrier operand");
3946 }
3947 int64_t Value = MCE->getValue();
3948 if (Mnemonic == "dsb" && Value > 15) {
3949 // This case is a no match here, but it might be matched by the nXS
3950 // variant. Deliberately not unlex the optional '#' as it is not necessary
3951 // to characterize an integer immediate.
3952 Parser.getLexer().UnLex(IntTok);
3953 return MatchOperand_NoMatch;
3954 }
3955 if (Value < 0 || Value > 15) {
3956 Error(ExprLoc, "barrier operand out of range");
3958 }
3959 auto DB = AArch64DB::lookupDBByEncoding(Value);
3960 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3961 ExprLoc, getContext(),
3962 false /*hasnXSModifier*/));
3963 return MatchOperand_Success;
3964 }
3965
3966 if (Tok.isNot(AsmToken::Identifier)) {
3967 TokError("invalid operand for instruction");
3969 }
3970
3971 StringRef Operand = Tok.getString();
3972 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3973 auto DB = AArch64DB::lookupDBByName(Operand);
3974 // The only valid named option for ISB is 'sy'
3975 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3976 TokError("'sy' or #imm operand expected");
3978 // The only valid named option for TSB is 'csync'
3979 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3980 TokError("'csync' operand expected");
3982 } else if (!DB && !TSB) {
3983 if (Mnemonic == "dsb") {
3984 // This case is a no match here, but it might be matched by the nXS
3985 // variant.
3986 return MatchOperand_NoMatch;
3987 }
3988 TokError("invalid barrier option name");
3990 }
3991
3992 Operands.push_back(AArch64Operand::CreateBarrier(
3993 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3994 getContext(), false /*hasnXSModifier*/));
3995 Lex(); // Consume the option
3996
3997 return MatchOperand_Success;
3998}
3999
4001AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4002 const AsmToken &Tok = getTok();
4003
4004 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4005 if (Mnemonic != "dsb")
4007
4008 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4009 // Immediate operand.
4010 const MCExpr *ImmVal;
4011 SMLoc ExprLoc = getLoc();
4012 if (getParser().parseExpression(ImmVal))
4014 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4015 if (!MCE) {
4016 Error(ExprLoc, "immediate value expected for barrier operand");
4018 }
4019 int64_t Value = MCE->getValue();
4020 // v8.7-A DSB in the nXS variant accepts only the following immediate
4021 // values: 16, 20, 24, 28.
4022 if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
4023 Error(ExprLoc, "barrier operand out of range");
4025 }
4026 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4027 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4028 ExprLoc, getContext(),
4029 true /*hasnXSModifier*/));
4030 return MatchOperand_Success;
4031 }
4032
4033 if (Tok.isNot(AsmToken::Identifier)) {
4034 TokError("invalid operand for instruction");
4036 }
4037
4038 StringRef Operand = Tok.getString();
4039 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4040
4041 if (!DB) {
4042 TokError("invalid barrier option name");
4044 }
4045
4046 Operands.push_back(
4047 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4048 getContext(), true /*hasnXSModifier*/));
4049 Lex(); // Consume the option
4050
4051 return MatchOperand_Success;
4052}
4053
4055AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4056 const AsmToken &Tok = getTok();
4057
4058 if (Tok.isNot(AsmToken::Identifier))
4059 return MatchOperand_NoMatch;
4060
4061 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4062 return MatchOperand_NoMatch;
4063
4064 int MRSReg, MSRReg;
4065 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4066 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4067 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4068 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4069 } else
4070 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4071
4072 unsigned PStateImm = -1;
4073 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4074 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4075 PStateImm = PState15->Encoding;
4076 if (!PState15) {
4077 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4078 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4079 PStateImm = PState1->Encoding;
4080 }
4081
4082 Operands.push_back(
4083 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4084 PStateImm, getContext()));
4085 Lex(); // Eat identifier
4086
4087 return MatchOperand_Success;
4088}
4089
4090/// tryParseNeonVectorRegister - Parse a vector register operand.
4091bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4092 if (getTok().isNot(AsmToken::Identifier))
4093 return true;
4094
4095 SMLoc S = getLoc();
4096 // Check for a vector register specifier first.
4100 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4101 if (Res != MatchOperand_Success)
4102 return true;
4103
4104 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4105 if (!KindRes)
4106 return true;
4107
4108 unsigned ElementWidth = KindRes->second;
4109 Operands.push_back(
4110 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4111 S, getLoc(), getContext()));
4112
4113 // If there was an explicit qualifier, that goes on as a literal text
4114 // operand.
4115 if (!Kind.empty())
4116 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4117
4118 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4119}
4120
4122AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4123 SMLoc SIdx = getLoc();
4124 if (parseOptionalToken(AsmToken::LBrac)) {
4125 const MCExpr *ImmVal;
4126 if (getParser().parseExpression(ImmVal))
4127 return MatchOperand_NoMatch;
4128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4129 if (!MCE) {
4130 TokError("immediate value expected for vector index");
4131 return MatchOperand_ParseFail;;
4132 }
4133
4134 SMLoc E = getLoc();
4135
4136 if (parseToken(AsmToken::RBrac, "']' expected"))
4137 return MatchOperand_ParseFail;;
4138
4139 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4140 E, getContext()));
4141 return MatchOperand_Success;
4142 }
4143
4144 return MatchOperand_NoMatch;
4145}
4146
4147// tryParseVectorRegister - Try to parse a vector register name with
4148// optional kind specifier. If it is a register specifier, eat the token
4149// and return it.
4151AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
4152 RegKind MatchKind) {
4153 const AsmToken &Tok = getTok();
4154
4155 if (Tok.isNot(AsmToken::Identifier))
4156 return MatchOperand_NoMatch;
4157
4158 StringRef Name = Tok.getString();
4159 // If there is a kind specifier, it's separated from the register name by
4160 // a '.'.
4161 size_t Start = 0, Next = Name.find('.');
4162 StringRef Head = Name.slice(Start, Next);
4163 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4164
4165 if (RegNum) {
4166 if (Next != StringRef::npos) {
4167 Kind = Name.slice(Next, StringRef::npos);
4168 if (!isValidVectorKind(Kind, MatchKind)) {
4169 TokError("invalid vector kind qualifier");
4171 }
4172 }
4173 Lex(); // Eat the register token.
4174
4175 Reg = RegNum;
4176 return MatchOperand_Success;
4177 }
4178
4179 return MatchOperand_NoMatch;
4180}
4181
4182/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4183template <RegKind RK> OperandMatchResultTy
4184AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4185 // Check for a SVE predicate register specifier first.
4186 const SMLoc S = getLoc();
4188 MCRegister RegNum;
4189 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4190 if (Res != MatchOperand_Success)
4191 return Res;
4192
4193 const auto &KindRes = parseVectorKind(Kind, RK);
4194 if (!KindRes)
4195 return MatchOperand_NoMatch;
4196
4197 unsigned ElementWidth = KindRes->second;
4198 Operands.push_back(AArch64Operand::CreateVectorReg(
4199 RegNum, RK, ElementWidth, S,
4200 getLoc(), getContext()));
4201
4202 if (getLexer().is(AsmToken::LBrac)) {
4203 if (RK == RegKind::SVEPredicateAsCounter) {
4204 OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands);
4205 if (ResIndex == MatchOperand_Success)
4206 return MatchOperand_Success;
4207 } else {
4208 // Indexed predicate, there's no comma so try parse the next operand
4209 // immediately.
4210 if (parseOperand(Operands, false, false))
4211 return MatchOperand_NoMatch;
4212 }
4213 }
4214
4215 // Not all predicates are followed by a '/m' or '/z'.
4216 if (getTok().isNot(AsmToken::Slash))
4217 return MatchOperand_Success;
4218
4219 // But when they do they shouldn't have an element type suffix.
4220 if (!Kind.empty()) {
4221 Error(S, "not expecting size suffix");
4223 }
4224
4225 // Add a literal slash as operand
4226 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4227
4228 Lex(); // Eat the slash.
4229
4230 // Zeroing or merging?
4231 auto Pred = getTok().getString().lower();
4232 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") {
4233 Error(getLoc(), "expecting 'z' predication");
4235 }
4236
4237 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") {
4238 Error(getLoc(), "expecting 'm' or 'z' predication");
4240 }
4241
4242 // Add zero/merge token.
4243 const char *ZM = Pred == "z" ? "z" : "m";
4244 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4245
4246 Lex(); // Eat zero/merge token.
4247 return MatchOperand_Success;
4248}
4249
4250/// parseRegister - Parse a register operand.
4251bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4252 // Try for a Neon vector register.
4253 if (!tryParseNeonVectorRegister(Operands))
4254 return false;
4255
4256 if (tryParseZTOperand(Operands) == MatchOperand_Success)
4257 return false;
4258
4259 // Otherwise try for a scalar register.
4260 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
4261 return false;
4262
4263 return true;
4264}
4265
4266bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4267 bool HasELFModifier = false;
4269
4270 if (parseOptionalToken(AsmToken::Colon)) {
4271 HasELFModifier = true;
4272
4273 if (getTok().isNot(AsmToken::Identifier))
4274 return TokError("expect relocation specifier in operand after ':'");
4275
4276 std::string LowerCase = getTok().getIdentifier().lower();
4277 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4279 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4280 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4281 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4282 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4283 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4284 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4285 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4286 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4287 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4288 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4289 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4290 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4291 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4292 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4293 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4294 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4295 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4296 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4297 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4298 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4299 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4300 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4301 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4302 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4303 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4304 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4305 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4306 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4307 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4308 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4309 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4310 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4311 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4312 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4313 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4315 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4316 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4318 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4319 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4320 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4322 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4323 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4325
4326 if (RefKind == AArch64MCExpr::VK_INVALID)
4327 return TokError("expect relocation specifier in operand after ':'");
4328
4329 Lex(); // Eat identifier
4330
4331 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4332 return true;
4333 }
4334
4335 if (getParser().parseExpression(ImmVal))
4336 return true;
4337
4338 if (HasELFModifier)
4339 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4340
4341 return false;
4342}
4343
4345AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4346 if (getTok().isNot(AsmToken::LCurly))
4347 return MatchOperand_NoMatch;
4348
4349 auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
4350 StringRef Name = getTok().getString();
4351 size_t DotPosition = Name.find('.');
4352 if (DotPosition == StringRef::npos)
4353 return MatchOperand_NoMatch;
4354
4355 unsigned RegNum = matchMatrixTileListRegName(Name);
4356 if (!RegNum)
4357 return MatchOperand_NoMatch;
4358
4359 StringRef Tail = Name.drop_front(DotPosition);
4360 const std::optional<std::pair<int, int>> &KindRes =
4361 parseVectorKind(Tail, RegKind::Matrix);
4362 if (!KindRes) {
4363 TokError("Expected the register to be followed by element width suffix");
4365 }
4366 ElementWidth = KindRes->second;
4367 Reg = RegNum;
4368 Lex(); // Eat the register.
4369 return MatchOperand_Success;
4370 };
4371
4372 SMLoc S = getLoc();
4373 auto LCurly = getTok();
4374 Lex(); // Eat left bracket token.
4375
4376 // Empty matrix list
4377 if (parseOptionalToken(AsmToken::RCurly)) {
4378 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4379 /*RegMask=*/0, S, getLoc(), getContext()));
4380 return MatchOperand_Success;
4381 }
4382
4383 // Try parse {za} alias early
4384 if (getTok().getString().equals_insensitive("za")) {
4385 Lex(); // Eat 'za'
4386
4387 if (parseToken(AsmToken::RCurly, "'}' expected"))
4389
4390 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4391 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4392 return MatchOperand_Success;
4393 }
4394
4395 SMLoc TileLoc = getLoc();
4396
4397 unsigned FirstReg, ElementWidth;
4398 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4399 if (ParseRes != MatchOperand_Success) {
4400 getLexer().UnLex(LCurly);
4401 return ParseRes;
4402 }
4403
4404 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4405
4406 unsigned PrevReg = FirstReg;
4407
4409 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4410
4411 SmallSet<unsigned, 8> SeenRegs;
4412 SeenRegs.insert(FirstReg);
4413
4414 while (parseOptionalToken(AsmToken::Comma)) {
4415 TileLoc = getLoc();
4416 unsigned Reg, NextElementWidth;
4417 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4418 if (ParseRes != MatchOperand_Success)
4419 return ParseRes;
4420
4421 // Element size must match on all regs in the list.
4422 if (ElementWidth != NextElementWidth) {
4423 Error(TileLoc, "mismatched register size suffix");
4425 }
4426
4427 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4428 Warning(TileLoc, "tile list not in ascending order");
4429
4430 if (SeenRegs.contains(Reg))
4431 Warning(TileLoc, "duplicate tile in list");
4432 else {
4433 SeenRegs.insert(Reg);
4434 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4435 }
4436
4437 PrevReg = Reg;
4438 }
4439
4440 if (parseToken(AsmToken::RCurly, "'}' expected"))
4442
4443 unsigned RegMask = 0;
4444 for (auto Reg : DRegs)
4445 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4446 RI->getEncodingValue(AArch64::ZAD0));
4447 Operands.push_back(
4448 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4449
4450 return MatchOperand_Success;
4451}
4452
4453template <RegKind VectorKind>
4455AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4456 bool ExpectMatch) {
4457 MCAsmParser &Parser = getParser();
4458 if (!getTok().is(AsmToken::LCurly))
4459 return MatchOperand_NoMatch;
4460
4461 // Wrapper around parse function
4462 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4463 bool NoMatchIsError) {
4464 auto RegTok = getTok();
4465 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4466 if (ParseRes == MatchOperand_Success) {
4467 if (parseVectorKind(Kind, VectorKind))
4468 return ParseRes;
4469 llvm_unreachable("Expected a valid vector kind");
4470 }
4471
4472 if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch &&
4473 RegTok.getString().equals_insensitive("zt0"))
4474 return MatchOperand_NoMatch;
4475
4476 if (RegTok.isNot(AsmToken::Identifier) ||
4477 ParseRes == MatchOperand_ParseFail ||
4478 (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
4479 !RegTok.getString().starts_with_insensitive("za"))) {
4480 Error(Loc, "vector register expected");
4482 }
4483
4484 return MatchOperand_NoMatch;
4485 };
4486
4487 int NumRegs = getNumRegsForRegKind(VectorKind);
4488 SMLoc S = getLoc();
4489 auto LCurly = getTok();
4490 Lex(); // Eat left bracket token.
4491
4493 MCRegister FirstReg;
4494 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4495
4496 // Put back the original left bracket if there was no match, so that
4497 // different types of list-operands can be matched (e.g. SVE, Neon).
4498 if (ParseRes == MatchOperand_NoMatch)
4499 Parser.getLexer().UnLex(LCurly);
4500
4501 if (ParseRes != MatchOperand_Success)
4502 return ParseRes;
4503
4504 int64_t PrevReg = FirstReg;
4505 unsigned Count = 1;
4506
4507 int Stride = 1;
4508 if (parseOptionalToken(AsmToken::Minus)) {
4509 SMLoc Loc = getLoc();
4510 StringRef NextKind;
4511
4513 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4514 if (ParseRes != MatchOperand_Success)
4515 return ParseRes;
4516
4517 // Any Kind suffices must match on all regs in the list.
4518 if (Kind != NextKind) {
4519 Error(Loc, "mismatched register size suffix");
4521 }
4522
4523 unsigned Space =
4524 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4525
4526 if (Space == 0 || Space > 3) {
4527 Error(Loc, "invalid number of vectors");
4529 }
4530
4531 Count += Space;
4532 }
4533 else {
4534 bool HasCalculatedStride = false;
4535 while (parseOptionalToken(AsmToken::Comma)) {
4536 SMLoc Loc = getLoc();
4537 StringRef NextKind;
4539 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4540 if (ParseRes != MatchOperand_Success)
4541 return ParseRes;
4542
4543 // Any Kind suffices must match on all regs in the list.
4544 if (Kind != NextKind) {
4545 Error(Loc, "mismatched register size suffix");
4547 }
4548
4549 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4550 unsigned PrevRegVal =
4551 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4552 if (!HasCalculatedStride) {
4553 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4554 : (RegVal + NumRegs - PrevRegVal);
4555 HasCalculatedStride = true;
4556 }
4557
4558 // Register must be incremental (with a wraparound at last register).
4559 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) {
4560 Error(Loc, "registers must have the same sequential stride");
4562 }
4563
4564 PrevReg = Reg;
4565 ++Count;
4566 }
4567 }
4568
4569 if (parseToken(AsmToken::RCurly, "'}' expected"))
4571
4572 if (Count > 4) {
4573 Error(S, "invalid number of vectors");
4575 }
4576
4577 unsigned NumElements = 0;
4578 unsigned ElementWidth = 0;
4579 if (!Kind.empty()) {
4580 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4581 std::tie(NumElements, ElementWidth) = *VK;
4582 }
4583
4584 Operands.push_back(AArch64Operand::CreateVectorList(
4585 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4586 getLoc(), getContext()));
4587
4588 return MatchOperand_Success;
4589}
4590
4591/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4592bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4593 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4594 if (ParseRes != MatchOperand_Success)
4595 return true;
4596
4597 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4598}
4599
4601AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4602 SMLoc StartLoc = getLoc();
4603
4604 MCRegister RegNum;
4605 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4606 if (Res != MatchOperand_Success)
4607 return Res;
4608
4609 if (!parseOptionalToken(AsmToken::Comma)) {
4610 Operands.push_back(AArch64Operand::CreateReg(
4611 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4612 return MatchOperand_Success;
4613 }
4614
4615 parseOptionalToken(AsmToken::Hash);
4616
4617 if (getTok().isNot(AsmToken::Integer)) {
4618 Error(getLoc(), "index must be absent or #0");
4620 }
4621
4622 const MCExpr *ImmVal;
4623 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4624 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4625 Error(getLoc(), "index must be absent or #0");
4627 }
4628
4629 Operands.push_back(AArch64Operand::CreateReg(
4630 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4631 return MatchOperand_Success;
4632}
4633
4635AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4636 SMLoc StartLoc = getLoc();
4637 const AsmToken &Tok = getTok();
4638 std::string Name = Tok.getString().lower();
4639
4640 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4641
4642 if (RegNum == 0)
4643 return MatchOperand_NoMatch;
4644
4645 Operands.push_back(AArch64Operand::CreateReg(
4646 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4647 Lex(); // Eat identifier token.
4648
4649 // Check if register is followed by an index
4650 if (parseOptionalToken(AsmToken::LBrac)) {
4651 const MCExpr *ImmVal;
4652 if (getParser().parseExpression(ImmVal))
4653 return MatchOperand_NoMatch;
4654 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4655 if (!MCE) {
4656 TokError("immediate value expected for vector index");
4658 }
4659 if (parseToken(AsmToken::RBrac, "']' expected"))
4661
4662 Operands.push_back(AArch64Operand::CreateImm(
4663 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4664 getLoc(), getContext()));
4665 }
4666
4667 return MatchOperand_Success;
4668}
4669
4670template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4672AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4673 SMLoc StartLoc = getLoc();
4674
4675 MCRegister RegNum;
4676 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4677 if (Res != MatchOperand_Success)
4678 return Res;
4679
4680 // No shift/extend is the default.
4681 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4682 Operands.push_back(AArch64Operand::CreateReg(
4683 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4684 return MatchOperand_Success;
4685 }
4686
4687 // Eat the comma
4688 Lex();
4689
4690 // Match the shift
4692 Res = tryParseOptionalShiftExtend(ExtOpnd);
4693 if (Res != MatchOperand_Success)
4694 return Res;
4695
4696 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4697 Operands.push_back(AArch64Operand::CreateReg(
4698 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4699 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4700 Ext->hasShiftExtendAmount()));
4701
4702 return MatchOperand_Success;
4703}
4704
4705bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4706 MCAsmParser &Parser = getParser();
4707
4708 // Some SVE instructions have a decoration after the immediate, i.e.
4709 // "mul vl". We parse them here and add tokens, which must be present in the
4710 // asm string in the tablegen instruction.
4711 bool NextIsVL =
4712 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4713 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4714 if (!getTok().getString().equals_insensitive("mul") ||
4715 !(NextIsVL || NextIsHash))
4716 return true;
4717
4718 Operands.push_back(
4719 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4720 Lex(); // Eat the "mul"
4721
4722 if (NextIsVL) {
4723 Operands.push_back(
4724 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4725 Lex(); // Eat the "vl"
4726 return false;
4727 }
4728
4729 if (NextIsHash) {
4730 Lex(); // Eat the #
4731 SMLoc S = getLoc();
4732
4733 // Parse immediate operand.
4734 const MCExpr *ImmVal;
4735 if (!Parser.parseExpression(ImmVal))
4736 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4737 Operands.push_back(AArch64Operand::CreateImm(
4738 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4739 getContext()));
4740 return MatchOperand_Success;
4741 }
4742 }
4743
4744 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4745}
4746
4747bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4748 StringRef &VecGroup) {
4749 MCAsmParser &Parser = getParser();
4750 auto Tok = Parser.getTok();
4751 if (Tok.isNot(AsmToken::Identifier))
4752 return true;
4753
4755 .Case("vgx2", "vgx2")
4756 .Case("vgx4", "vgx4")
4757 .Default("");
4758
4759 if (VG.empty())
4760 return true;
4761
4762 VecGroup = VG;
4763 Parser.Lex(); // Eat vgx[2|4]
4764 return false;
4765}
4766
4767bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4768 auto Tok = getTok();
4769 if (Tok.isNot(AsmToken::Identifier))
4770 return true;
4771
4772 auto Keyword = Tok.getString();
4774 .Case("sm", "sm")
4775 .Case("za", "za")
4776 .Default(Keyword);
4777 Operands.push_back(
4778 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4779
4780 Lex();
4781 return false;
4782}
4783
4784/// parseOperand - Parse a arm instruction operand. For now this parses the
4785/// operand regardless of the mnemonic.
4786bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4787 bool invertCondCode) {
4788 MCAsmParser &Parser = getParser();
4789
4790 OperandMatchResultTy ResTy =
4791 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4792
4793 // Check if the current operand has a custom associated parser, if so, try to
4794 // custom parse the operand, or fallback to the general approach.
4795 if (ResTy == MatchOperand_Success)
4796 return false;
4797 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4798 // there was a match, but an error occurred, in which case, just return that
4799 // the operand parsing failed.
4800 if (ResTy == MatchOperand_ParseFail)
4801 return true;
4802
4803 // Nothing custom, so do general case parsing.
4804 SMLoc S, E;
4805 switch (getLexer().getKind()) {
4806 default: {
4807 SMLoc S = getLoc();
4808 const MCExpr *Expr;
4809 if (parseSymbolicImmVal(Expr))
4810 return Error(S, "invalid operand");
4811
4812 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4813 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4814 return false;
4815 }
4816 case AsmToken::LBrac: {
4817 Operands.push_back(
4818 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4819 Lex(); // Eat '['
4820
4821 // There's no comma after a '[', so we can parse the next operand
4822 // immediately.
4823 return parseOperand(Operands, false, false);
4824 }
4825 case AsmToken::LCurly: {
4826 if (!parseNeonVectorList(Operands))
4827 return false;
4828
4829 Operands.push_back(
4830 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4831 Lex(); // Eat '{'
4832
4833 // There's no comma after a '{', so we can parse the next operand
4834 // immediately.
4835 return parseOperand(Operands, false, false);
4836 }
4837 case AsmToken::Identifier: {
4838 // See if this is a "VG" decoration used by SME instructions.
4839 StringRef VecGroup;
4840 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4841 Operands.push_back(
4842 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4843 return false;
4844 }
4845 // If we're expecting a Condition Code operand, then just parse that.
4846 if (isCondCode)
4847 return parseCondCode(Operands, invertCondCode);
4848
4849 // If it's a register name, parse it.
4850 if (!parseRegister(Operands))
4851 return false;
4852
4853 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4854 // by SVE instructions.
4855 if (!parseOptionalMulOperand(Operands))
4856 return false;
4857
4858 // This could be an optional "shift" or "extend" operand.
4859 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4860 // We can only continue if no tokens were eaten.
4861 if (GotShift != MatchOperand_NoMatch)
4862 return GotShift;
4863
4864 // If this is a two-word mnemonic, parse its special keyword
4865 // operand as an identifier.
4866 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4867 Mnemonic == "gcsb")
4868 return parseKeywordOperand(Operands);
4869
4870 // This was not a register so parse other operands that start with an
4871 // identifier (like labels) as expressions and create them as immediates.
4872 const MCExpr *IdVal;
4873 S = getLoc();
4874 if (getParser().parseExpression(IdVal))
4875 return true;
4876 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4877 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4878 return false;
4879 }
4880 case AsmToken::Integer:
4881 case AsmToken::Real:
4882 case AsmToken::Hash: {
4883 // #42 -> immediate.
4884 S = getLoc();
4885