LLVM 19.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
279 template <RegKind VectorKind>
280 ParseStatus tryParseVectorList(OperandVector &Operands,
281 bool ExpectMatch = false);
282 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
283 ParseStatus tryParseSVEPattern(OperandVector &Operands);
284 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
285 ParseStatus tryParseGPR64x8(OperandVector &Operands);
286 ParseStatus tryParseImmRange(OperandVector &Operands);
287
288public:
289 enum AArch64MatchResultTy {
290 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
291#define GET_OPERAND_DIAGNOSTIC_TYPES
292#include "AArch64GenAsmMatcher.inc"
293 };
294 bool IsILP32;
295
296 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
297 const MCInstrInfo &MII, const MCTargetOptions &Options)
298 : MCTargetAsmParser(Options, STI, MII) {
302 if (S.getTargetStreamer() == nullptr)
304
305 // Alias .hword/.word/.[dx]word to the target-independent
306 // .2byte/.4byte/.8byte directives as they have the same form and
307 // semantics:
308 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
309 Parser.addAliasForDirective(".hword", ".2byte");
310 Parser.addAliasForDirective(".word", ".4byte");
311 Parser.addAliasForDirective(".dword", ".8byte");
312 Parser.addAliasForDirective(".xword", ".8byte");
313
314 // Initialize the set of available features.
315 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
316 }
317
318 bool areEqualRegs(const MCParsedAsmOperand &Op1,
319 const MCParsedAsmOperand &Op2) const override;
321 SMLoc NameLoc, OperandVector &Operands) override;
322 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
324 SMLoc &EndLoc) override;
325 bool ParseDirective(AsmToken DirectiveID) override;
327 unsigned Kind) override;
328
329 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
330
331 static bool classifySymbolRef(const MCExpr *Expr,
332 AArch64MCExpr::VariantKind &ELFRefKind,
333 MCSymbolRefExpr::VariantKind &DarwinRefKind,
334 int64_t &Addend);
335};
336
337/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
338/// instruction.
339class AArch64Operand : public MCParsedAsmOperand {
340private:
341 enum KindTy {
342 k_Immediate,
343 k_ShiftedImm,
344 k_ImmRange,
345 k_CondCode,
346 k_Register,
347 k_MatrixRegister,
348 k_MatrixTileList,
349 k_SVCR,
350 k_VectorList,
351 k_VectorIndex,
352 k_Token,
353 k_SysReg,
354 k_SysCR,
355 k_Prefetch,
356 k_ShiftExtend,
357 k_FPImm,
358 k_Barrier,
359 k_PSBHint,
360 k_BTIHint,
361 } Kind;
362
363 SMLoc StartLoc, EndLoc;
364
365 struct TokOp {
366 const char *Data;
367 unsigned Length;
368 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
369 };
370
371 // Separate shift/extend operand.
372 struct ShiftExtendOp {
374 unsigned Amount;
375 bool HasExplicitAmount;
376 };
377
378 struct RegOp {
379 unsigned RegNum;
380 RegKind Kind;
381 int ElementWidth;
382
383 // The register may be allowed as a different register class,
384 // e.g. for GPR64as32 or GPR32as64.
385 RegConstraintEqualityTy EqualityTy;
386
387 // In some cases the shift/extend needs to be explicitly parsed together
388 // with the register, rather than as a separate operand. This is needed
389 // for addressing modes where the instruction as a whole dictates the
390 // scaling/extend, rather than specific bits in the instruction.
391 // By parsing them as a single operand, we avoid the need to pass an
392 // extra operand in all CodeGen patterns (because all operands need to
393 // have an associated value), and we avoid the need to update TableGen to
394 // accept operands that have no associated bits in the instruction.
395 //
396 // An added benefit of parsing them together is that the assembler
397 // can give a sensible diagnostic if the scaling is not correct.
398 //
399 // The default is 'lsl #0' (HasExplicitAmount = false) if no
400 // ShiftExtend is specified.
401 ShiftExtendOp ShiftExtend;
402 };
403
404 struct MatrixRegOp {
405 unsigned RegNum;
406 unsigned ElementWidth;
407 MatrixKind Kind;
408 };
409
410 struct MatrixTileListOp {
411 unsigned RegMask = 0;
412 };
413
414 struct VectorListOp {
415 unsigned RegNum;
416 unsigned Count;
417 unsigned Stride;
418 unsigned NumElements;
419 unsigned ElementWidth;
420 RegKind RegisterKind;
421 };
422
423 struct VectorIndexOp {
424 int Val;
425 };
426
427 struct ImmOp {
428 const MCExpr *Val;
429 };
430
431 struct ShiftedImmOp {
432 const MCExpr *Val;
433 unsigned ShiftAmount;
434 };
435
436 struct ImmRangeOp {
437 unsigned First;
438 unsigned Last;
439 };
440
441 struct CondCodeOp {
443 };
444
445 struct FPImmOp {
446 uint64_t Val; // APFloat value bitcasted to uint64_t.
447 bool IsExact; // describes whether parsed value was exact.
448 };
449
450 struct BarrierOp {
451 const char *Data;
452 unsigned Length;
453 unsigned Val; // Not the enum since not all values have names.
454 bool HasnXSModifier;
455 };
456
457 struct SysRegOp {
458 const char *Data;
459 unsigned Length;
460 uint32_t MRSReg;
461 uint32_t MSRReg;
462 uint32_t PStateField;
463 };
464
465 struct SysCRImmOp {
466 unsigned Val;
467 };
468
469 struct PrefetchOp {
470 const char *Data;
471 unsigned Length;
472 unsigned Val;
473 };
474
475 struct PSBHintOp {
476 const char *Data;
477 unsigned Length;
478 unsigned Val;
479 };
480
481 struct BTIHintOp {
482 const char *Data;
483 unsigned Length;
484 unsigned Val;
485 };
486
487 struct SVCROp {
488 const char *Data;
489 unsigned Length;
490 unsigned PStateField;
491 };
492
493 union {
494 struct TokOp Tok;
495 struct RegOp Reg;
496 struct MatrixRegOp MatrixReg;
497 struct MatrixTileListOp MatrixTileList;
498 struct VectorListOp VectorList;
499 struct VectorIndexOp VectorIndex;
500 struct ImmOp Imm;
501 struct ShiftedImmOp ShiftedImm;
502 struct ImmRangeOp ImmRange;
503 struct CondCodeOp CondCode;
504 struct FPImmOp FPImm;
505 struct BarrierOp Barrier;
506 struct SysRegOp SysReg;
507 struct SysCRImmOp SysCRImm;
508 struct PrefetchOp Prefetch;
509 struct PSBHintOp PSBHint;
510 struct BTIHintOp BTIHint;
511 struct ShiftExtendOp ShiftExtend;
512 struct SVCROp SVCR;
513 };
514
515 // Keep the MCContext around as the MCExprs may need manipulated during
516 // the add<>Operands() calls.
517 MCContext &Ctx;
518
519public:
520 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
521
522 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
523 Kind = o.Kind;
524 StartLoc = o.StartLoc;
525 EndLoc = o.EndLoc;
526 switch (Kind) {
527 case k_Token:
528 Tok = o.Tok;
529 break;
530 case k_Immediate:
531 Imm = o.Imm;
532 break;
533 case k_ShiftedImm:
534 ShiftedImm = o.ShiftedImm;
535 break;
536 case k_ImmRange:
537 ImmRange = o.ImmRange;
538 break;
539 case k_CondCode:
540 CondCode = o.CondCode;
541 break;
542 case k_FPImm:
543 FPImm = o.FPImm;
544 break;
545 case k_Barrier:
546 Barrier = o.Barrier;
547 break;
548 case k_Register:
549 Reg = o.Reg;
550 break;
551 case k_MatrixRegister:
552 MatrixReg = o.MatrixReg;
553 break;
554 case k_MatrixTileList:
555 MatrixTileList = o.MatrixTileList;
556 break;
557 case k_VectorList:
558 VectorList = o.VectorList;
559 break;
560 case k_VectorIndex:
561 VectorIndex = o.VectorIndex;
562 break;
563 case k_SysReg:
564 SysReg = o.SysReg;
565 break;
566 case k_SysCR:
567 SysCRImm = o.SysCRImm;
568 break;
569 case k_Prefetch:
570 Prefetch = o.Prefetch;
571 break;
572 case k_PSBHint:
573 PSBHint = o.PSBHint;
574 break;
575 case k_BTIHint:
576 BTIHint = o.BTIHint;
577 break;
578 case k_ShiftExtend:
579 ShiftExtend = o.ShiftExtend;
580 break;
581 case k_SVCR:
582 SVCR = o.SVCR;
583 break;
584 }
585 }
586
587 /// getStartLoc - Get the location of the first token of this operand.
588 SMLoc getStartLoc() const override { return StartLoc; }
589 /// getEndLoc - Get the location of the last token of this operand.
590 SMLoc getEndLoc() const override { return EndLoc; }
591
592 StringRef getToken() const {
593 assert(Kind == k_Token && "Invalid access!");
594 return StringRef(Tok.Data, Tok.Length);
595 }
596
597 bool isTokenSuffix() const {
598 assert(Kind == k_Token && "Invalid access!");
599 return Tok.IsSuffix;
600 }
601
602 const MCExpr *getImm() const {
603 assert(Kind == k_Immediate && "Invalid access!");
604 return Imm.Val;
605 }
606
607 const MCExpr *getShiftedImmVal() const {
608 assert(Kind == k_ShiftedImm && "Invalid access!");
609 return ShiftedImm.Val;
610 }
611
612 unsigned getShiftedImmShift() const {
613 assert(Kind == k_ShiftedImm && "Invalid access!");
614 return ShiftedImm.ShiftAmount;
615 }
616
617 unsigned getFirstImmVal() const {
618 assert(Kind == k_ImmRange && "Invalid access!");
619 return ImmRange.First;
620 }
621
622 unsigned getLastImmVal() const {
623 assert(Kind == k_ImmRange && "Invalid access!");
624 return ImmRange.Last;
625 }
626
628 assert(Kind == k_CondCode && "Invalid access!");
629 return CondCode.Code;
630 }
631
632 APFloat getFPImm() const {
633 assert (Kind == k_FPImm && "Invalid access!");
634 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
635 }
636
637 bool getFPImmIsExact() const {
638 assert (Kind == k_FPImm && "Invalid access!");
639 return FPImm.IsExact;
640 }
641
642 unsigned getBarrier() const {
643 assert(Kind == k_Barrier && "Invalid access!");
644 return Barrier.Val;
645 }
646
647 StringRef getBarrierName() const {
648 assert(Kind == k_Barrier && "Invalid access!");
649 return StringRef(Barrier.Data, Barrier.Length);
650 }
651
652 bool getBarriernXSModifier() const {
653 assert(Kind == k_Barrier && "Invalid access!");
654 return Barrier.HasnXSModifier;
655 }
656
657 unsigned getReg() const override {
658 assert(Kind == k_Register && "Invalid access!");
659 return Reg.RegNum;
660 }
661
662 unsigned getMatrixReg() const {
663 assert(Kind == k_MatrixRegister && "Invalid access!");
664 return MatrixReg.RegNum;
665 }
666
667 unsigned getMatrixElementWidth() const {
668 assert(Kind == k_MatrixRegister && "Invalid access!");
669 return MatrixReg.ElementWidth;
670 }
671
672 MatrixKind getMatrixKind() const {
673 assert(Kind == k_MatrixRegister && "Invalid access!");
674 return MatrixReg.Kind;
675 }
676
677 unsigned getMatrixTileListRegMask() const {
678 assert(isMatrixTileList() && "Invalid access!");
679 return MatrixTileList.RegMask;
680 }
681
682 RegConstraintEqualityTy getRegEqualityTy() const {
683 assert(Kind == k_Register && "Invalid access!");
684 return Reg.EqualityTy;
685 }
686
687 unsigned getVectorListStart() const {
688 assert(Kind == k_VectorList && "Invalid access!");
689 return VectorList.RegNum;
690 }
691
692 unsigned getVectorListCount() const {
693 assert(Kind == k_VectorList && "Invalid access!");
694 return VectorList.Count;
695 }
696
697 unsigned getVectorListStride() const {
698 assert(Kind == k_VectorList && "Invalid access!");
699 return VectorList.Stride;
700 }
701
702 int getVectorIndex() const {
703 assert(Kind == k_VectorIndex && "Invalid access!");
704 return VectorIndex.Val;
705 }
706
707 StringRef getSysReg() const {
708 assert(Kind == k_SysReg && "Invalid access!");
709 return StringRef(SysReg.Data, SysReg.Length);
710 }
711
712 unsigned getSysCR() const {
713 assert(Kind == k_SysCR && "Invalid access!");
714 return SysCRImm.Val;
715 }
716
717 unsigned getPrefetch() const {
718 assert(Kind == k_Prefetch && "Invalid access!");
719 return Prefetch.Val;
720 }
721
722 unsigned getPSBHint() const {
723 assert(Kind == k_PSBHint && "Invalid access!");
724 return PSBHint.Val;
725 }
726
727 StringRef getPSBHintName() const {
728 assert(Kind == k_PSBHint && "Invalid access!");
729 return StringRef(PSBHint.Data, PSBHint.Length);
730 }
731
732 unsigned getBTIHint() const {
733 assert(Kind == k_BTIHint && "Invalid access!");
734 return BTIHint.Val;
735 }
736
737 StringRef getBTIHintName() const {
738 assert(Kind == k_BTIHint && "Invalid access!");
739 return StringRef(BTIHint.Data, BTIHint.Length);
740 }
741
742 StringRef getSVCR() const {
743 assert(Kind == k_SVCR && "Invalid access!");
744 return StringRef(SVCR.Data, SVCR.Length);
745 }
746
747 StringRef getPrefetchName() const {
748 assert(Kind == k_Prefetch && "Invalid access!");
749 return StringRef(Prefetch.Data, Prefetch.Length);
750 }
751
752 AArch64_AM::ShiftExtendType getShiftExtendType() const {
753 if (Kind == k_ShiftExtend)
754 return ShiftExtend.Type;
755 if (Kind == k_Register)
756 return Reg.ShiftExtend.Type;
757 llvm_unreachable("Invalid access!");
758 }
759
760 unsigned getShiftExtendAmount() const {
761 if (Kind == k_ShiftExtend)
762 return ShiftExtend.Amount;
763 if (Kind == k_Register)
764 return Reg.ShiftExtend.Amount;
765 llvm_unreachable("Invalid access!");
766 }
767
768 bool hasShiftExtendAmount() const {
769 if (Kind == k_ShiftExtend)
770 return ShiftExtend.HasExplicitAmount;
771 if (Kind == k_Register)
772 return Reg.ShiftExtend.HasExplicitAmount;
773 llvm_unreachable("Invalid access!");
774 }
775
776 bool isImm() const override { return Kind == k_Immediate; }
777 bool isMem() const override { return false; }
778
779 bool isUImm6() const {
780 if (!isImm())
781 return false;
782 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
783 if (!MCE)
784 return false;
785 int64_t Val = MCE->getValue();
786 return (Val >= 0 && Val < 64);
787 }
788
789 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
790
791 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
792 return isImmScaled<Bits, Scale>(true);
793 }
794
795 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
796 DiagnosticPredicate isUImmScaled() const {
797 if (IsRange && isImmRange() &&
798 (getLastImmVal() != getFirstImmVal() + Offset))
799 return DiagnosticPredicateTy::NoMatch;
800
801 return isImmScaled<Bits, Scale, IsRange>(false);
802 }
803
804 template <int Bits, int Scale, bool IsRange = false>
805 DiagnosticPredicate isImmScaled(bool Signed) const {
806 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
807 (isImmRange() && !IsRange))
808 return DiagnosticPredicateTy::NoMatch;
809
810 int64_t Val;
811 if (isImmRange())
812 Val = getFirstImmVal();
813 else {
814 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
815 if (!MCE)
816 return DiagnosticPredicateTy::NoMatch;
817 Val = MCE->getValue();
818 }
819
820 int64_t MinVal, MaxVal;
821 if (Signed) {
822 int64_t Shift = Bits - 1;
823 MinVal = (int64_t(1) << Shift) * -Scale;
824 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
825 } else {
826 MinVal = 0;
827 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
828 }
829
830 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
831 return DiagnosticPredicateTy::Match;
832
833 return DiagnosticPredicateTy::NearMatch;
834 }
835
836 DiagnosticPredicate isSVEPattern() const {
837 if (!isImm())
838 return DiagnosticPredicateTy::NoMatch;
839 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
840 if (!MCE)
841 return DiagnosticPredicateTy::NoMatch;
842 int64_t Val = MCE->getValue();
843 if (Val >= 0 && Val < 32)
844 return DiagnosticPredicateTy::Match;
845 return DiagnosticPredicateTy::NearMatch;
846 }
847
848 DiagnosticPredicate isSVEVecLenSpecifier() const {
849 if (!isImm())
850 return DiagnosticPredicateTy::NoMatch;
851 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
852 if (!MCE)
853 return DiagnosticPredicateTy::NoMatch;
854 int64_t Val = MCE->getValue();
855 if (Val >= 0 && Val <= 1)
856 return DiagnosticPredicateTy::Match;
857 return DiagnosticPredicateTy::NearMatch;
858 }
859
860 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
862 MCSymbolRefExpr::VariantKind DarwinRefKind;
863 int64_t Addend;
864 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
865 Addend)) {
866 // If we don't understand the expression, assume the best and
867 // let the fixup and relocation code deal with it.
868 return true;
869 }
870
871 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
872 ELFRefKind == AArch64MCExpr::VK_LO12 ||
873 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
874 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
875 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
876 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
879 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
880 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
881 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
882 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
883 // Note that we don't range-check the addend. It's adjusted modulo page
884 // size when converted, so there is no "out of range" condition when using
885 // @pageoff.
886 return true;
887 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
888 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
889 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
890 return Addend == 0;
891 }
892
893 return false;
894 }
895
896 template <int Scale> bool isUImm12Offset() const {
897 if (!isImm())
898 return false;
899
900 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
901 if (!MCE)
902 return isSymbolicUImm12Offset(getImm());
903
904 int64_t Val = MCE->getValue();
905 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
906 }
907
908 template <int N, int M>
909 bool isImmInRange() const {
910 if (!isImm())
911 return false;
912 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
913 if (!MCE)
914 return false;
915 int64_t Val = MCE->getValue();
916 return (Val >= N && Val <= M);
917 }
918
919 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
920 // a logical immediate can always be represented when inverted.
921 template <typename T>
922 bool isLogicalImm() const {
923 if (!isImm())
924 return false;
925 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
926 if (!MCE)
927 return false;
928
929 int64_t Val = MCE->getValue();
930 // Avoid left shift by 64 directly.
931 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
932 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
933 if ((Val & Upper) && (Val & Upper) != Upper)
934 return false;
935
936 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
937 }
938
939 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
940
941 bool isImmRange() const { return Kind == k_ImmRange; }
942
943 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
944 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
945 /// immediate that can be shifted by 'Shift'.
946 template <unsigned Width>
947 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
948 if (isShiftedImm() && Width == getShiftedImmShift())
949 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
950 return std::make_pair(CE->getValue(), Width);
951
952 if (isImm())
953 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
954 int64_t Val = CE->getValue();
955 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
956 return std::make_pair(Val >> Width, Width);
957 else
958 return std::make_pair(Val, 0u);
959 }
960
961 return {};
962 }
963
964 bool isAddSubImm() const {
965 if (!isShiftedImm() && !isImm())
966 return false;
967
968 const MCExpr *Expr;
969
970 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
971 if (isShiftedImm()) {
972 unsigned Shift = ShiftedImm.ShiftAmount;
973 Expr = ShiftedImm.Val;
974 if (Shift != 0 && Shift != 12)
975 return false;
976 } else {
977 Expr = getImm();
978 }
979
981 MCSymbolRefExpr::VariantKind DarwinRefKind;
982 int64_t Addend;
983 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
984 DarwinRefKind, Addend)) {
985 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
986 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
987 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
988 || ELFRefKind == AArch64MCExpr::VK_LO12
989 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
990 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
991 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
992 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
993 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
994 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
995 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
996 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
997 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
998 }
999
1000 // If it's a constant, it should be a real immediate in range.
1001 if (auto ShiftedVal = getShiftedVal<12>())
1002 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1003
1004 // If it's an expression, we hope for the best and let the fixup/relocation
1005 // code deal with it.
1006 return true;
1007 }
1008
1009 bool isAddSubImmNeg() const {
1010 if (!isShiftedImm() && !isImm())
1011 return false;
1012
1013 // Otherwise it should be a real negative immediate in range.
1014 if (auto ShiftedVal = getShiftedVal<12>())
1015 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1016
1017 return false;
1018 }
1019
1020 // Signed value in the range -128 to +127. For element widths of
1021 // 16 bits or higher it may also be a signed multiple of 256 in the
1022 // range -32768 to +32512.
1023 // For element-width of 8 bits a range of -128 to 255 is accepted,
1024 // since a copy of a byte can be either signed/unsigned.
1025 template <typename T>
1026 DiagnosticPredicate isSVECpyImm() const {
1027 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1028 return DiagnosticPredicateTy::NoMatch;
1029
1030 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1031 std::is_same<int8_t, T>::value;
1032 if (auto ShiftedImm = getShiftedVal<8>())
1033 if (!(IsByte && ShiftedImm->second) &&
1034 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1035 << ShiftedImm->second))
1036 return DiagnosticPredicateTy::Match;
1037
1038 return DiagnosticPredicateTy::NearMatch;
1039 }
1040
1041 // Unsigned value in the range 0 to 255. For element widths of
1042 // 16 bits or higher it may also be a signed multiple of 256 in the
1043 // range 0 to 65280.
1044 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1045 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1046 return DiagnosticPredicateTy::NoMatch;
1047
1048 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1049 std::is_same<int8_t, T>::value;
1050 if (auto ShiftedImm = getShiftedVal<8>())
1051 if (!(IsByte && ShiftedImm->second) &&
1052 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1053 << ShiftedImm->second))
1054 return DiagnosticPredicateTy::Match;
1055
1056 return DiagnosticPredicateTy::NearMatch;
1057 }
1058
1059 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1060 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1061 return DiagnosticPredicateTy::Match;
1062 return DiagnosticPredicateTy::NoMatch;
1063 }
1064
1065 bool isCondCode() const { return Kind == k_CondCode; }
1066
1067 bool isSIMDImmType10() const {
1068 if (!isImm())
1069 return false;
1070 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1071 if (!MCE)
1072 return false;
1074 }
1075
1076 template<int N>
1077 bool isBranchTarget() const {
1078 if (!isImm())
1079 return false;
1080 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1081 if (!MCE)
1082 return true;
1083 int64_t Val = MCE->getValue();
1084 if (Val & 0x3)
1085 return false;
1086 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1087 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1088 }
1089
1090 bool
1091 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1092 if (!isImm())
1093 return false;
1094
1095 AArch64MCExpr::VariantKind ELFRefKind;
1096 MCSymbolRefExpr::VariantKind DarwinRefKind;
1097 int64_t Addend;
1098 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1099 DarwinRefKind, Addend)) {
1100 return false;
1101 }
1102 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1103 return false;
1104
1105 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1106 }
1107
1108 bool isMovWSymbolG3() const {
1110 }
1111
1112 bool isMovWSymbolG2() const {
1113 return isMovWSymbol(
1118 }
1119
1120 bool isMovWSymbolG1() const {
1121 return isMovWSymbol(
1127 }
1128
1129 bool isMovWSymbolG0() const {
1130 return isMovWSymbol(
1136 }
1137
1138 template<int RegWidth, int Shift>
1139 bool isMOVZMovAlias() const {
1140 if (!isImm()) return false;
1141
1142 const MCExpr *E = getImm();
1143 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1144 uint64_t Value = CE->getValue();
1145
1146 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1147 }
1148 // Only supports the case of Shift being 0 if an expression is used as an
1149 // operand
1150 return !Shift && E;
1151 }
1152
1153 template<int RegWidth, int Shift>
1154 bool isMOVNMovAlias() const {
1155 if (!isImm()) return false;
1156
1157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1158 if (!CE) return false;
1159 uint64_t Value = CE->getValue();
1160
1161 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1162 }
1163
1164 bool isFPImm() const {
1165 return Kind == k_FPImm &&
1166 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1167 }
1168
1169 bool isBarrier() const {
1170 return Kind == k_Barrier && !getBarriernXSModifier();
1171 }
1172 bool isBarriernXS() const {
1173 return Kind == k_Barrier && getBarriernXSModifier();
1174 }
1175 bool isSysReg() const { return Kind == k_SysReg; }
1176
1177 bool isMRSSystemRegister() const {
1178 if (!isSysReg()) return false;
1179
1180 return SysReg.MRSReg != -1U;
1181 }
1182
1183 bool isMSRSystemRegister() const {
1184 if (!isSysReg()) return false;
1185 return SysReg.MSRReg != -1U;
1186 }
1187
1188 bool isSystemPStateFieldWithImm0_1() const {
1189 if (!isSysReg()) return false;
1190 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1191 }
1192
1193 bool isSystemPStateFieldWithImm0_15() const {
1194 if (!isSysReg())
1195 return false;
1196 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1197 }
1198
1199 bool isSVCR() const {
1200 if (Kind != k_SVCR)
1201 return false;
1202 return SVCR.PStateField != -1U;
1203 }
1204
1205 bool isReg() const override {
1206 return Kind == k_Register;
1207 }
1208
1209 bool isVectorList() const { return Kind == k_VectorList; }
1210
1211 bool isScalarReg() const {
1212 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1213 }
1214
1215 bool isNeonVectorReg() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1217 }
1218
1219 bool isNeonVectorRegLo() const {
1220 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1221 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1222 Reg.RegNum) ||
1223 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1224 Reg.RegNum));
1225 }
1226
1227 bool isNeonVectorReg0to7() const {
1228 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1229 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1230 Reg.RegNum));
1231 }
1232
1233 bool isMatrix() const { return Kind == k_MatrixRegister; }
1234 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1235
1236 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1237 RegKind RK;
1238 switch (Class) {
1239 case AArch64::PPRRegClassID:
1240 case AArch64::PPR_3bRegClassID:
1241 case AArch64::PPR_p8to15RegClassID:
1242 case AArch64::PNRRegClassID:
1243 case AArch64::PNR_p8to15RegClassID:
1244 RK = RegKind::SVEPredicateAsCounter;
1245 break;
1246 default:
1247 llvm_unreachable("Unsupport register class");
1248 }
1249
1250 return (Kind == k_Register && Reg.Kind == RK) &&
1251 AArch64MCRegisterClasses[Class].contains(getReg());
1252 }
1253
1254 template <unsigned Class> bool isSVEVectorReg() const {
1255 RegKind RK;
1256 switch (Class) {
1257 case AArch64::ZPRRegClassID:
1258 case AArch64::ZPR_3bRegClassID:
1259 case AArch64::ZPR_4bRegClassID:
1260 RK = RegKind::SVEDataVector;
1261 break;
1262 case AArch64::PPRRegClassID:
1263 case AArch64::PPR_3bRegClassID:
1264 case AArch64::PPR_p8to15RegClassID:
1265 case AArch64::PNRRegClassID:
1266 case AArch64::PNR_p8to15RegClassID:
1267 RK = RegKind::SVEPredicateVector;
1268 break;
1269 default:
1270 llvm_unreachable("Unsupport register class");
1271 }
1272
1273 return (Kind == k_Register && Reg.Kind == RK) &&
1274 AArch64MCRegisterClasses[Class].contains(getReg());
1275 }
1276
1277 template <unsigned Class> bool isFPRasZPR() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1279 AArch64MCRegisterClasses[Class].contains(getReg());
1280 }
1281
1282 template <int ElementWidth, unsigned Class>
1283 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1284 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1285 return DiagnosticPredicateTy::NoMatch;
1286
1287 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1288 return DiagnosticPredicateTy::Match;
1289
1290 return DiagnosticPredicateTy::NearMatch;
1291 }
1292
1293 template <int ElementWidth, unsigned Class>
1294 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1295 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1296 return DiagnosticPredicateTy::NoMatch;
1297
1298 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1299 return DiagnosticPredicateTy::Match;
1300
1301 return DiagnosticPredicateTy::NearMatch;
1302 }
1303
1304 template <int ElementWidth, unsigned Class>
1305 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1306 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1307 return DiagnosticPredicateTy::NoMatch;
1308
1309 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1310 return DiagnosticPredicateTy::Match;
1311
1312 return DiagnosticPredicateTy::NearMatch;
1313 }
1314
1315 template <int ElementWidth, unsigned Class,
1316 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1317 bool ShiftWidthAlwaysSame>
1318 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1319 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1320 if (!VectorMatch.isMatch())
1321 return DiagnosticPredicateTy::NoMatch;
1322
1323 // Give a more specific diagnostic when the user has explicitly typed in
1324 // a shift-amount that does not match what is expected, but for which
1325 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1326 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1327 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1328 ShiftExtendTy == AArch64_AM::SXTW) &&
1329 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1330 return DiagnosticPredicateTy::NoMatch;
1331
1332 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1333 return DiagnosticPredicateTy::Match;
1334
1335 return DiagnosticPredicateTy::NearMatch;
1336 }
1337
1338 bool isGPR32as64() const {
1339 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1340 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1341 }
1342
1343 bool isGPR64as32() const {
1344 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1345 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1346 }
1347
1348 bool isGPR64x8() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1351 Reg.RegNum);
1352 }
1353
1354 bool isWSeqPair() const {
1355 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1356 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1357 Reg.RegNum);
1358 }
1359
1360 bool isXSeqPair() const {
1361 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1362 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1363 Reg.RegNum);
1364 }
1365
1366 bool isSyspXzrPair() const {
1367 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1368 }
1369
1370 template<int64_t Angle, int64_t Remainder>
1371 DiagnosticPredicate isComplexRotation() const {
1372 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1373
1374 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1375 if (!CE) return DiagnosticPredicateTy::NoMatch;
1376 uint64_t Value = CE->getValue();
1377
1378 if (Value % Angle == Remainder && Value <= 270)
1379 return DiagnosticPredicateTy::Match;
1380 return DiagnosticPredicateTy::NearMatch;
1381 }
1382
1383 template <unsigned RegClassID> bool isGPR64() const {
1384 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1385 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1386 }
1387
1388 template <unsigned RegClassID, int ExtWidth>
1389 DiagnosticPredicate isGPR64WithShiftExtend() const {
1390 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1391 return DiagnosticPredicateTy::NoMatch;
1392
1393 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1394 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1395 return DiagnosticPredicateTy::Match;
1396 return DiagnosticPredicateTy::NearMatch;
1397 }
1398
1399 /// Is this a vector list with the type implicit (presumably attached to the
1400 /// instruction itself)?
1401 template <RegKind VectorKind, unsigned NumRegs>
1402 bool isImplicitlyTypedVectorList() const {
1403 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1404 VectorList.NumElements == 0 &&
1405 VectorList.RegisterKind == VectorKind;
1406 }
1407
1408 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1409 unsigned ElementWidth, unsigned Stride = 1>
1410 bool isTypedVectorList() const {
1411 if (Kind != k_VectorList)
1412 return false;
1413 if (VectorList.Count != NumRegs)
1414 return false;
1415 if (VectorList.RegisterKind != VectorKind)
1416 return false;
1417 if (VectorList.ElementWidth != ElementWidth)
1418 return false;
1419 if (VectorList.Stride != Stride)
1420 return false;
1421 return VectorList.NumElements == NumElements;
1422 }
1423
1424 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1425 unsigned ElementWidth>
1426 DiagnosticPredicate isTypedVectorListMultiple() const {
1427 bool Res =
1428 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1429 if (!Res)
1430 return DiagnosticPredicateTy::NoMatch;
1431 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1432 return DiagnosticPredicateTy::NearMatch;
1433 return DiagnosticPredicateTy::Match;
1434 }
1435
1436 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1437 unsigned ElementWidth>
1438 DiagnosticPredicate isTypedVectorListStrided() const {
1439 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1440 ElementWidth, Stride>();
1441 if (!Res)
1442 return DiagnosticPredicateTy::NoMatch;
1443 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1444 ((VectorList.RegNum >= AArch64::Z16) &&
1445 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1446 return DiagnosticPredicateTy::Match;
1447 return DiagnosticPredicateTy::NoMatch;
1448 }
1449
1450 template <int Min, int Max>
1451 DiagnosticPredicate isVectorIndex() const {
1452 if (Kind != k_VectorIndex)
1453 return DiagnosticPredicateTy::NoMatch;
1454 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1455 return DiagnosticPredicateTy::Match;
1456 return DiagnosticPredicateTy::NearMatch;
1457 }
1458
1459 bool isToken() const override { return Kind == k_Token; }
1460
1461 bool isTokenEqual(StringRef Str) const {
1462 return Kind == k_Token && getToken() == Str;
1463 }
1464 bool isSysCR() const { return Kind == k_SysCR; }
1465 bool isPrefetch() const { return Kind == k_Prefetch; }
1466 bool isPSBHint() const { return Kind == k_PSBHint; }
1467 bool isBTIHint() const { return Kind == k_BTIHint; }
1468 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1469 bool isShifter() const {
1470 if (!isShiftExtend())
1471 return false;
1472
1473 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1474 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1475 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1476 ST == AArch64_AM::MSL);
1477 }
1478
1479 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1480 if (Kind != k_FPImm)
1481 return DiagnosticPredicateTy::NoMatch;
1482
1483 if (getFPImmIsExact()) {
1484 // Lookup the immediate from table of supported immediates.
1485 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1486 assert(Desc && "Unknown enum value");
1487
1488 // Calculate its FP value.
1489 APFloat RealVal(APFloat::IEEEdouble());
1490 auto StatusOrErr =
1491 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1492 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1493 llvm_unreachable("FP immediate is not exact");
1494
1495 if (getFPImm().bitwiseIsEqual(RealVal))
1496 return DiagnosticPredicateTy::Match;
1497 }
1498
1499 return DiagnosticPredicateTy::NearMatch;
1500 }
1501
1502 template <unsigned ImmA, unsigned ImmB>
1503 DiagnosticPredicate isExactFPImm() const {
1504 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1505 if ((Res = isExactFPImm<ImmA>()))
1506 return DiagnosticPredicateTy::Match;
1507 if ((Res = isExactFPImm<ImmB>()))
1508 return DiagnosticPredicateTy::Match;
1509 return Res;
1510 }
1511
1512 bool isExtend() const {
1513 if (!isShiftExtend())
1514 return false;
1515
1516 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1518 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1519 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1520 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1521 ET == AArch64_AM::LSL) &&
1522 getShiftExtendAmount() <= 4;
1523 }
1524
1525 bool isExtend64() const {
1526 if (!isExtend())
1527 return false;
1528 // Make sure the extend expects a 32-bit source register.
1529 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1530 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1531 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1532 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1533 }
1534
1535 bool isExtendLSL64() const {
1536 if (!isExtend())
1537 return false;
1538 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1539 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1540 ET == AArch64_AM::LSL) &&
1541 getShiftExtendAmount() <= 4;
1542 }
1543
1544 bool isLSLImm3Shift() const {
1545 if (!isShiftExtend())
1546 return false;
1547 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1549 }
1550
1551 template<int Width> bool isMemXExtend() const {
1552 if (!isExtend())
1553 return false;
1554 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1556 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1557 getShiftExtendAmount() == 0);
1558 }
1559
1560 template<int Width> bool isMemWExtend() const {
1561 if (!isExtend())
1562 return false;
1563 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1564 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1565 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1566 getShiftExtendAmount() == 0);
1567 }
1568
1569 template <unsigned width>
1570 bool isArithmeticShifter() const {
1571 if (!isShifter())
1572 return false;
1573
1574 // An arithmetic shifter is LSL, LSR, or ASR.
1575 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1576 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1577 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1578 }
1579
1580 template <unsigned width>
1581 bool isLogicalShifter() const {
1582 if (!isShifter())
1583 return false;
1584
1585 // A logical shifter is LSL, LSR, ASR or ROR.
1586 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1587 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1588 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1589 getShiftExtendAmount() < width;
1590 }
1591
1592 bool isMovImm32Shifter() const {
1593 if (!isShifter())
1594 return false;
1595
1596 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1597 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1598 if (ST != AArch64_AM::LSL)
1599 return false;
1600 uint64_t Val = getShiftExtendAmount();
1601 return (Val == 0 || Val == 16);
1602 }
1603
1604 bool isMovImm64Shifter() const {
1605 if (!isShifter())
1606 return false;
1607
1608 // A MOVi shifter is LSL of 0 or 16.
1609 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1610 if (ST != AArch64_AM::LSL)
1611 return false;
1612 uint64_t Val = getShiftExtendAmount();
1613 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1614 }
1615
1616 bool isLogicalVecShifter() const {
1617 if (!isShifter())
1618 return false;
1619
1620 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1621 unsigned Shift = getShiftExtendAmount();
1622 return getShiftExtendType() == AArch64_AM::LSL &&
1623 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1624 }
1625
1626 bool isLogicalVecHalfWordShifter() const {
1627 if (!isLogicalVecShifter())
1628 return false;
1629
1630 // A logical vector shifter is a left shift by 0 or 8.
1631 unsigned Shift = getShiftExtendAmount();
1632 return getShiftExtendType() == AArch64_AM::LSL &&
1633 (Shift == 0 || Shift == 8);
1634 }
1635
1636 bool isMoveVecShifter() const {
1637 if (!isShiftExtend())
1638 return false;
1639
1640 // A logical vector shifter is a left shift by 8 or 16.
1641 unsigned Shift = getShiftExtendAmount();
1642 return getShiftExtendType() == AArch64_AM::MSL &&
1643 (Shift == 8 || Shift == 16);
1644 }
1645
1646 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1647 // to LDUR/STUR when the offset is not legal for the former but is for
1648 // the latter. As such, in addition to checking for being a legal unscaled
1649 // address, also check that it is not a legal scaled address. This avoids
1650 // ambiguity in the matcher.
1651 template<int Width>
1652 bool isSImm9OffsetFB() const {
1653 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1654 }
1655
1656 bool isAdrpLabel() const {
1657 // Validation was handled during parsing, so we just verify that
1658 // something didn't go haywire.
1659 if (!isImm())
1660 return false;
1661
1662 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1663 int64_t Val = CE->getValue();
1664 int64_t Min = - (4096 * (1LL << (21 - 1)));
1665 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1666 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1667 }
1668
1669 return true;
1670 }
1671
1672 bool isAdrLabel() const {
1673 // Validation was handled during parsing, so we just verify that
1674 // something didn't go haywire.
1675 if (!isImm())
1676 return false;
1677
1678 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1679 int64_t Val = CE->getValue();
1680 int64_t Min = - (1LL << (21 - 1));
1681 int64_t Max = ((1LL << (21 - 1)) - 1);
1682 return Val >= Min && Val <= Max;
1683 }
1684
1685 return true;
1686 }
1687
1688 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1689 DiagnosticPredicate isMatrixRegOperand() const {
1690 if (!isMatrix())
1691 return DiagnosticPredicateTy::NoMatch;
1692 if (getMatrixKind() != Kind ||
1693 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1694 EltSize != getMatrixElementWidth())
1695 return DiagnosticPredicateTy::NearMatch;
1696 return DiagnosticPredicateTy::Match;
1697 }
1698
1699 bool isPAuthPCRelLabel16Operand() const {
1700 // PAuth PCRel16 operands are similar to regular branch targets, but only
1701 // negative values are allowed for concrete immediates as signing instr
1702 // should be in a lower address.
1703 if (!isImm())
1704 return false;
1705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1706 if (!MCE)
1707 return true;
1708 int64_t Val = MCE->getValue();
1709 if (Val & 0b11)
1710 return false;
1711 return (Val <= 0) && (Val > -(1 << 18));
1712 }
1713
1714 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1715 // Add as immediates when possible. Null MCExpr = 0.
1716 if (!Expr)
1718 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1719 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1720 else
1722 }
1723
1724 void addRegOperands(MCInst &Inst, unsigned N) const {
1725 assert(N == 1 && "Invalid number of operands!");
1727 }
1728
1729 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1730 assert(N == 1 && "Invalid number of operands!");
1731 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1732 }
1733
1734 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1735 assert(N == 1 && "Invalid number of operands!");
1736 assert(
1737 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1738
1739 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1740 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1741 RI->getEncodingValue(getReg()));
1742
1744 }
1745
1746 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1747 assert(N == 1 && "Invalid number of operands!");
1748 assert(
1749 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1750
1751 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1752 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1753 RI->getEncodingValue(getReg()));
1754
1756 }
1757
1758 template <int Width>
1759 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1760 unsigned Base;
1761 switch (Width) {
1762 case 8: Base = AArch64::B0; break;
1763 case 16: Base = AArch64::H0; break;
1764 case 32: Base = AArch64::S0; break;
1765 case 64: Base = AArch64::D0; break;
1766 case 128: Base = AArch64::Q0; break;
1767 default:
1768 llvm_unreachable("Unsupported width");
1769 }
1770 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1771 }
1772
1773 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1774 assert(N == 1 && "Invalid number of operands!");
1775 Inst.addOperand(
1776 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1777 }
1778
1779 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1780 assert(N == 1 && "Invalid number of operands!");
1781 assert(
1782 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1783 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1784 }
1785
1786 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1787 assert(N == 1 && "Invalid number of operands!");
1788 assert(
1789 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1791 }
1792
1793 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1794 assert(N == 1 && "Invalid number of operands!");
1796 }
1797
1798 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1799 assert(N == 1 && "Invalid number of operands!");
1801 }
1802
1803 enum VecListIndexType {
1804 VecListIdx_DReg = 0,
1805 VecListIdx_QReg = 1,
1806 VecListIdx_ZReg = 2,
1807 VecListIdx_PReg = 3,
1808 };
1809
1810 template <VecListIndexType RegTy, unsigned NumRegs>
1811 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1812 assert(N == 1 && "Invalid number of operands!");
1813 static const unsigned FirstRegs[][5] = {
1814 /* DReg */ { AArch64::Q0,
1815 AArch64::D0, AArch64::D0_D1,
1816 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1817 /* QReg */ { AArch64::Q0,
1818 AArch64::Q0, AArch64::Q0_Q1,
1819 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1820 /* ZReg */ { AArch64::Z0,
1821 AArch64::Z0, AArch64::Z0_Z1,
1822 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1823 /* PReg */ { AArch64::P0,
1824 AArch64::P0, AArch64::P0_P1 }
1825 };
1826
1827 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1828 " NumRegs must be <= 4 for ZRegs");
1829
1830 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1831 " NumRegs must be <= 2 for PRegs");
1832
1833 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1834 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1835 FirstRegs[(unsigned)RegTy][0]));
1836 }
1837
1838 template <unsigned NumRegs>
1839 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!");
1841 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1842
1843 switch (NumRegs) {
1844 case 2:
1845 if (getVectorListStart() < AArch64::Z16) {
1846 assert((getVectorListStart() < AArch64::Z8) &&
1847 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1849 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1850 } else {
1851 assert((getVectorListStart() < AArch64::Z24) &&
1852 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1854 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1855 }
1856 break;
1857 case 4:
1858 if (getVectorListStart() < AArch64::Z16) {
1859 assert((getVectorListStart() < AArch64::Z4) &&
1860 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1862 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1863 } else {
1864 assert((getVectorListStart() < AArch64::Z20) &&
1865 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1867 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1868 }
1869 break;
1870 default:
1871 llvm_unreachable("Unsupported number of registers for strided vec list");
1872 }
1873 }
1874
1875 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1876 assert(N == 1 && "Invalid number of operands!");
1877 unsigned RegMask = getMatrixTileListRegMask();
1878 assert(RegMask <= 0xFF && "Invalid mask!");
1879 Inst.addOperand(MCOperand::createImm(RegMask));
1880 }
1881
1882 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1883 assert(N == 1 && "Invalid number of operands!");
1884 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1885 }
1886
1887 template <unsigned ImmIs0, unsigned ImmIs1>
1888 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1889 assert(N == 1 && "Invalid number of operands!");
1890 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1891 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1892 }
1893
1894 void addImmOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1896 // If this is a pageoff symrefexpr with an addend, adjust the addend
1897 // to be only the page-offset portion. Otherwise, just add the expr
1898 // as-is.
1899 addExpr(Inst, getImm());
1900 }
1901
1902 template <int Shift>
1903 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1904 assert(N == 2 && "Invalid number of operands!");
1905 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1906 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1907 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1908 } else if (isShiftedImm()) {
1909 addExpr(Inst, getShiftedImmVal());
1910 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1911 } else {
1912 addExpr(Inst, getImm());
1914 }
1915 }
1916
1917 template <int Shift>
1918 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1919 assert(N == 2 && "Invalid number of operands!");
1920 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1921 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1922 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1923 } else
1924 llvm_unreachable("Not a shifted negative immediate");
1925 }
1926
1927 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1928 assert(N == 1 && "Invalid number of operands!");
1930 }
1931
1932 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 1 && "Invalid number of operands!");
1934 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1935 if (!MCE)
1936 addExpr(Inst, getImm());
1937 else
1938 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1939 }
1940
1941 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1942 addImmOperands(Inst, N);
1943 }
1944
1945 template<int Scale>
1946 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1947 assert(N == 1 && "Invalid number of operands!");
1948 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1949
1950 if (!MCE) {
1951 Inst.addOperand(MCOperand::createExpr(getImm()));
1952 return;
1953 }
1954 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1955 }
1956
1957 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!");
1959 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1961 }
1962
1963 template <int Scale>
1964 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1965 assert(N == 1 && "Invalid number of operands!");
1966 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1967 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1968 }
1969
1970 template <int Scale>
1971 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1972 assert(N == 1 && "Invalid number of operands!");
1973 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1974 }
1975
1976 template <typename T>
1977 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1978 assert(N == 1 && "Invalid number of operands!");
1979 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1980 std::make_unsigned_t<T> Val = MCE->getValue();
1981 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1982 Inst.addOperand(MCOperand::createImm(encoding));
1983 }
1984
1985 template <typename T>
1986 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1989 std::make_unsigned_t<T> Val = ~MCE->getValue();
1990 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1991 Inst.addOperand(MCOperand::createImm(encoding));
1992 }
1993
1994 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1995 assert(N == 1 && "Invalid number of operands!");
1996 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1998 Inst.addOperand(MCOperand::createImm(encoding));
1999 }
2000
2001 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2002 // Branch operands don't encode the low bits, so shift them off
2003 // here. If it's a label, however, just put it on directly as there's
2004 // not enough information now to do anything.
2005 assert(N == 1 && "Invalid number of operands!");
2006 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2007 if (!MCE) {
2008 addExpr(Inst, getImm());
2009 return;
2010 }
2011 assert(MCE && "Invalid constant immediate operand!");
2012 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2013 }
2014
2015 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2016 // PC-relative operands don't encode the low bits, so shift them off
2017 // here. If it's a label, however, just put it on directly as there's
2018 // not enough information now to do anything.
2019 assert(N == 1 && "Invalid number of operands!");
2020 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2021 if (!MCE) {
2022 addExpr(Inst, getImm());
2023 return;
2024 }
2025 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2026 }
2027
2028 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2029 // Branch operands don't encode the low bits, so shift them off
2030 // here. If it's a label, however, just put it on directly as there's
2031 // not enough information now to do anything.
2032 assert(N == 1 && "Invalid number of operands!");
2033 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2034 if (!MCE) {
2035 addExpr(Inst, getImm());
2036 return;
2037 }
2038 assert(MCE && "Invalid constant immediate operand!");
2039 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2040 }
2041
2042 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2043 // Branch operands don't encode the low bits, so shift them off
2044 // here. If it's a label, however, just put it on directly as there's
2045 // not enough information now to do anything.
2046 assert(N == 1 && "Invalid number of operands!");
2047 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2048 if (!MCE) {
2049 addExpr(Inst, getImm());
2050 return;
2051 }
2052 assert(MCE && "Invalid constant immediate operand!");
2053 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2054 }
2055
2056 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2057 assert(N == 1 && "Invalid number of operands!");
2059 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2060 }
2061
2062 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2063 assert(N == 1 && "Invalid number of operands!");
2064 Inst.addOperand(MCOperand::createImm(getBarrier()));
2065 }
2066
2067 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 Inst.addOperand(MCOperand::createImm(getBarrier()));
2070 }
2071
2072 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2073 assert(N == 1 && "Invalid number of operands!");
2074
2075 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2076 }
2077
2078 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2079 assert(N == 1 && "Invalid number of operands!");
2080
2081 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2082 }
2083
2084 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2085 assert(N == 1 && "Invalid number of operands!");
2086
2087 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2088 }
2089
2090 void addSVCROperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092
2093 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2094 }
2095
2096 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2097 assert(N == 1 && "Invalid number of operands!");
2098
2099 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2100 }
2101
2102 void addSysCROperands(MCInst &Inst, unsigned N) const {
2103 assert(N == 1 && "Invalid number of operands!");
2104 Inst.addOperand(MCOperand::createImm(getSysCR()));
2105 }
2106
2107 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2108 assert(N == 1 && "Invalid number of operands!");
2109 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2110 }
2111
2112 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2113 assert(N == 1 && "Invalid number of operands!");
2114 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2115 }
2116
2117 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2118 assert(N == 1 && "Invalid number of operands!");
2119 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2120 }
2121
2122 void addShifterOperands(MCInst &Inst, unsigned N) const {
2123 assert(N == 1 && "Invalid number of operands!");
2124 unsigned Imm =
2125 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2127 }
2128
2129 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2130 assert(N == 1 && "Invalid number of operands!");
2131 unsigned Imm = getShiftExtendAmount();
2133 }
2134
2135 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2136 assert(N == 1 && "Invalid number of operands!");
2137
2138 if (!isScalarReg())
2139 return;
2140
2141 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2142 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2144 if (Reg != AArch64::XZR)
2145 llvm_unreachable("wrong register");
2146
2147 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2148 }
2149
2150 void addExtendOperands(MCInst &Inst, unsigned N) const {
2151 assert(N == 1 && "Invalid number of operands!");
2152 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2153 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2154 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2156 }
2157
2158 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2159 assert(N == 1 && "Invalid number of operands!");
2160 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2161 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2162 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2164 }
2165
2166 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2167 assert(N == 2 && "Invalid number of operands!");
2168 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2169 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2170 Inst.addOperand(MCOperand::createImm(IsSigned));
2171 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2172 }
2173
2174 // For 8-bit load/store instructions with a register offset, both the
2175 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2176 // they're disambiguated by whether the shift was explicit or implicit rather
2177 // than its size.
2178 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2179 assert(N == 2 && "Invalid number of operands!");
2180 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2181 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2182 Inst.addOperand(MCOperand::createImm(IsSigned));
2183 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2184 }
2185
2186 template<int Shift>
2187 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2188 assert(N == 1 && "Invalid number of operands!");
2189
2190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2191 if (CE) {
2192 uint64_t Value = CE->getValue();
2193 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2194 } else {
2195 addExpr(Inst, getImm());
2196 }
2197 }
2198
2199 template<int Shift>
2200 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2201 assert(N == 1 && "Invalid number of operands!");
2202
2203 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2204 uint64_t Value = CE->getValue();
2205 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2206 }
2207
2208 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2211 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2212 }
2213
2214 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2217 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2218 }
2219
2220 void print(raw_ostream &OS) const override;
2221
2222 static std::unique_ptr<AArch64Operand>
2223 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2224 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2225 Op->Tok.Data = Str.data();
2226 Op->Tok.Length = Str.size();
2227 Op->Tok.IsSuffix = IsSuffix;
2228 Op->StartLoc = S;
2229 Op->EndLoc = S;
2230 return Op;
2231 }
2232
2233 static std::unique_ptr<AArch64Operand>
2234 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2235 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2237 unsigned ShiftAmount = 0,
2238 unsigned HasExplicitAmount = false) {
2239 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2240 Op->Reg.RegNum = RegNum;
2241 Op->Reg.Kind = Kind;
2242 Op->Reg.ElementWidth = 0;
2243 Op->Reg.EqualityTy = EqTy;
2244 Op->Reg.ShiftExtend.Type = ExtTy;
2245 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2246 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2247 Op->StartLoc = S;
2248 Op->EndLoc = E;
2249 return Op;
2250 }
2251
2252 static std::unique_ptr<AArch64Operand>
2253 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2254 SMLoc S, SMLoc E, MCContext &Ctx,
2256 unsigned ShiftAmount = 0,
2257 unsigned HasExplicitAmount = false) {
2258 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2259 Kind == RegKind::SVEPredicateVector ||
2260 Kind == RegKind::SVEPredicateAsCounter) &&
2261 "Invalid vector kind");
2262 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2263 HasExplicitAmount);
2264 Op->Reg.ElementWidth = ElementWidth;
2265 return Op;
2266 }
2267
2268 static std::unique_ptr<AArch64Operand>
2269 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2270 unsigned NumElements, unsigned ElementWidth,
2271 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2272 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2273 Op->VectorList.RegNum = RegNum;
2274 Op->VectorList.Count = Count;
2275 Op->VectorList.Stride = Stride;
2276 Op->VectorList.NumElements = NumElements;
2277 Op->VectorList.ElementWidth = ElementWidth;
2278 Op->VectorList.RegisterKind = RegisterKind;
2279 Op->StartLoc = S;
2280 Op->EndLoc = E;
2281 return Op;
2282 }
2283
2284 static std::unique_ptr<AArch64Operand>
2285 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2286 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2287 Op->VectorIndex.Val = Idx;
2288 Op->StartLoc = S;
2289 Op->EndLoc = E;
2290 return Op;
2291 }
2292
2293 static std::unique_ptr<AArch64Operand>
2294 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2295 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2296 Op->MatrixTileList.RegMask = RegMask;
2297 Op->StartLoc = S;
2298 Op->EndLoc = E;
2299 return Op;
2300 }
2301
2302 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2303 const unsigned ElementWidth) {
2304 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2305 RegMap = {
2306 {{0, AArch64::ZAB0},
2307 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2308 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2309 {{8, AArch64::ZAB0},
2310 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2311 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2312 {{16, AArch64::ZAH0},
2313 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2314 {{16, AArch64::ZAH1},
2315 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2316 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2317 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2318 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2319 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2320 };
2321
2322 if (ElementWidth == 64)
2323 OutRegs.insert(Reg);
2324 else {
2325 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2326 assert(!Regs.empty() && "Invalid tile or element width!");
2327 for (auto OutReg : Regs)
2328 OutRegs.insert(OutReg);
2329 }
2330 }
2331
2332 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2333 SMLoc E, MCContext &Ctx) {
2334 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2335 Op->Imm.Val = Val;
2336 Op->StartLoc = S;
2337 Op->EndLoc = E;
2338 return Op;
2339 }
2340
2341 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2342 unsigned ShiftAmount,
2343 SMLoc S, SMLoc E,
2344 MCContext &Ctx) {
2345 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2346 Op->ShiftedImm .Val = Val;
2347 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2348 Op->StartLoc = S;
2349 Op->EndLoc = E;
2350 return Op;
2351 }
2352
2353 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2354 unsigned Last, SMLoc S,
2355 SMLoc E,
2356 MCContext &Ctx) {
2357 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2358 Op->ImmRange.First = First;
2359 Op->ImmRange.Last = Last;
2360 Op->EndLoc = E;
2361 return Op;
2362 }
2363
2364 static std::unique_ptr<AArch64Operand>
2365 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2366 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2367 Op->CondCode.Code = Code;
2368 Op->StartLoc = S;
2369 Op->EndLoc = E;
2370 return Op;
2371 }
2372
2373 static std::unique_ptr<AArch64Operand>
2374 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2375 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2376 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2377 Op->FPImm.IsExact = IsExact;
2378 Op->StartLoc = S;
2379 Op->EndLoc = S;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2384 StringRef Str,
2385 SMLoc S,
2386 MCContext &Ctx,
2387 bool HasnXSModifier) {
2388 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2389 Op->Barrier.Val = Val;
2390 Op->Barrier.Data = Str.data();
2391 Op->Barrier.Length = Str.size();
2392 Op->Barrier.HasnXSModifier = HasnXSModifier;
2393 Op->StartLoc = S;
2394 Op->EndLoc = S;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2399 uint32_t MRSReg,
2400 uint32_t MSRReg,
2401 uint32_t PStateField,
2402 MCContext &Ctx) {
2403 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2404 Op->SysReg.Data = Str.data();
2405 Op->SysReg.Length = Str.size();
2406 Op->SysReg.MRSReg = MRSReg;
2407 Op->SysReg.MSRReg = MSRReg;
2408 Op->SysReg.PStateField = PStateField;
2409 Op->StartLoc = S;
2410 Op->EndLoc = S;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2415 SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2417 Op->SysCRImm.Val = Val;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2424 StringRef Str,
2425 SMLoc S,
2426 MCContext &Ctx) {
2427 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2428 Op->Prefetch.Val = Val;
2429 Op->Barrier.Data = Str.data();
2430 Op->Barrier.Length = Str.size();
2431 Op->StartLoc = S;
2432 Op->EndLoc = S;
2433 return Op;
2434 }
2435
2436 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2437 StringRef Str,
2438 SMLoc S,
2439 MCContext &Ctx) {
2440 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2441 Op->PSBHint.Val = Val;
2442 Op->PSBHint.Data = Str.data();
2443 Op->PSBHint.Length = Str.size();
2444 Op->StartLoc = S;
2445 Op->EndLoc = S;
2446 return Op;
2447 }
2448
2449 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2450 StringRef Str,
2451 SMLoc S,
2452 MCContext &Ctx) {
2453 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2454 Op->BTIHint.Val = Val | 32;
2455 Op->BTIHint.Data = Str.data();
2456 Op->BTIHint.Length = Str.size();
2457 Op->StartLoc = S;
2458 Op->EndLoc = S;
2459 return Op;
2460 }
2461
2462 static std::unique_ptr<AArch64Operand>
2463 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2464 SMLoc S, SMLoc E, MCContext &Ctx) {
2465 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2466 Op->MatrixReg.RegNum = RegNum;
2467 Op->MatrixReg.ElementWidth = ElementWidth;
2468 Op->MatrixReg.Kind = Kind;
2469 Op->StartLoc = S;
2470 Op->EndLoc = E;
2471 return Op;
2472 }
2473
2474 static std::unique_ptr<AArch64Operand>
2475 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2476 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2477 Op->SVCR.PStateField = PStateField;
2478 Op->SVCR.Data = Str.data();
2479 Op->SVCR.Length = Str.size();
2480 Op->StartLoc = S;
2481 Op->EndLoc = S;
2482 return Op;
2483 }
2484
2485 static std::unique_ptr<AArch64Operand>
2486 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2487 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2488 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2489 Op->ShiftExtend.Type = ShOp;
2490 Op->ShiftExtend.Amount = Val;
2491 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2492 Op->StartLoc = S;
2493 Op->EndLoc = E;
2494 return Op;
2495 }
2496};
2497
2498} // end anonymous namespace.
2499
2500void AArch64Operand::print(raw_ostream &OS) const {
2501 switch (Kind) {
2502 case k_FPImm:
2503 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2504 if (!getFPImmIsExact())
2505 OS << " (inexact)";
2506 OS << ">";
2507 break;
2508 case k_Barrier: {
2509 StringRef Name = getBarrierName();
2510 if (!Name.empty())
2511 OS << "<barrier " << Name << ">";
2512 else
2513 OS << "<barrier invalid #" << getBarrier() << ">";
2514 break;
2515 }
2516 case k_Immediate:
2517 OS << *getImm();
2518 break;
2519 case k_ShiftedImm: {
2520 unsigned Shift = getShiftedImmShift();
2521 OS << "<shiftedimm ";
2522 OS << *getShiftedImmVal();
2523 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2524 break;
2525 }
2526 case k_ImmRange: {
2527 OS << "<immrange ";
2528 OS << getFirstImmVal();
2529 OS << ":" << getLastImmVal() << ">";
2530 break;
2531 }
2532 case k_CondCode:
2533 OS << "<condcode " << getCondCode() << ">";
2534 break;
2535 case k_VectorList: {
2536 OS << "<vectorlist ";
2537 unsigned Reg = getVectorListStart();
2538 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2539 OS << Reg + i * getVectorListStride() << " ";
2540 OS << ">";
2541 break;
2542 }
2543 case k_VectorIndex:
2544 OS << "<vectorindex " << getVectorIndex() << ">";
2545 break;
2546 case k_SysReg:
2547 OS << "<sysreg: " << getSysReg() << '>';
2548 break;
2549 case k_Token:
2550 OS << "'" << getToken() << "'";
2551 break;
2552 case k_SysCR:
2553 OS << "c" << getSysCR();
2554 break;
2555 case k_Prefetch: {
2556 StringRef Name = getPrefetchName();
2557 if (!Name.empty())
2558 OS << "<prfop " << Name << ">";
2559 else
2560 OS << "<prfop invalid #" << getPrefetch() << ">";
2561 break;
2562 }
2563 case k_PSBHint:
2564 OS << getPSBHintName();
2565 break;
2566 case k_BTIHint:
2567 OS << getBTIHintName();
2568 break;
2569 case k_MatrixRegister:
2570 OS << "<matrix " << getMatrixReg() << ">";
2571 break;
2572 case k_MatrixTileList: {
2573 OS << "<matrixlist ";
2574 unsigned RegMask = getMatrixTileListRegMask();
2575 unsigned MaxBits = 8;
2576 for (unsigned I = MaxBits; I > 0; --I)
2577 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2578 OS << '>';
2579 break;
2580 }
2581 case k_SVCR: {
2582 OS << getSVCR();
2583 break;
2584 }
2585 case k_Register:
2586 OS << "<register " << getReg() << ">";
2587 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2588 break;
2589 [[fallthrough]];
2590 case k_ShiftExtend:
2591 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2592 << getShiftExtendAmount();
2593 if (!hasShiftExtendAmount())
2594 OS << "<imp>";
2595 OS << '>';
2596 break;
2597 }
2598}
2599
2600/// @name Auto-generated Match Functions
2601/// {
2602
2604
2605/// }
2606
2608 return StringSwitch<unsigned>(Name.lower())
2609 .Case("v0", AArch64::Q0)
2610 .Case("v1", AArch64::Q1)
2611 .Case("v2", AArch64::Q2)
2612 .Case("v3", AArch64::Q3)
2613 .Case("v4", AArch64::Q4)
2614 .Case("v5", AArch64::Q5)
2615 .Case("v6", AArch64::Q6)
2616 .Case("v7", AArch64::Q7)
2617 .Case("v8", AArch64::Q8)
2618 .Case("v9", AArch64::Q9)
2619 .Case("v10", AArch64::Q10)
2620 .Case("v11", AArch64::Q11)
2621 .Case("v12", AArch64::Q12)
2622 .Case("v13", AArch64::Q13)
2623 .Case("v14", AArch64::Q14)
2624 .Case("v15", AArch64::Q15)
2625 .Case("v16", AArch64::Q16)
2626 .Case("v17", AArch64::Q17)
2627 .Case("v18", AArch64::Q18)
2628 .Case("v19", AArch64::Q19)
2629 .Case("v20", AArch64::Q20)
2630 .Case("v21", AArch64::Q21)
2631 .Case("v22", AArch64::Q22)
2632 .Case("v23", AArch64::Q23)
2633 .Case("v24", AArch64::Q24)
2634 .Case("v25", AArch64::Q25)
2635 .Case("v26", AArch64::Q26)
2636 .Case("v27", AArch64::Q27)
2637 .Case("v28", AArch64::Q28)
2638 .Case("v29", AArch64::Q29)
2639 .Case("v30", AArch64::Q30)
2640 .Case("v31", AArch64::Q31)
2641 .Default(0);
2642}
2643
2644/// Returns an optional pair of (#elements, element-width) if Suffix
2645/// is a valid vector kind. Where the number of elements in a vector
2646/// or the vector width is implicit or explicitly unknown (but still a
2647/// valid suffix kind), 0 is used.
2648static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2649 RegKind VectorKind) {
2650 std::pair<int, int> Res = {-1, -1};
2651
2652 switch (VectorKind) {
2653 case RegKind::NeonVector:
2655 .Case("", {0, 0})
2656 .Case(".1d", {1, 64})
2657 .Case(".1q", {1, 128})
2658 // '.2h' needed for fp16 scalar pairwise reductions
2659 .Case(".2h", {2, 16})
2660 .Case(".2b", {2, 8})
2661 .Case(".2s", {2, 32})
2662 .Case(".2d", {2, 64})
2663 // '.4b' is another special case for the ARMv8.2a dot product
2664 // operand
2665 .Case(".4b", {4, 8})
2666 .Case(".4h", {4, 16})
2667 .Case(".4s", {4, 32})
2668 .Case(".8b", {8, 8})
2669 .Case(".8h", {8, 16})
2670 .Case(".16b", {16, 8})
2671 // Accept the width neutral ones, too, for verbose syntax. If
2672 // those aren't used in the right places, the token operand won't
2673 // match so all will work out.
2674 .Case(".b", {0, 8})
2675 .Case(".h", {0, 16})
2676 .Case(".s", {0, 32})
2677 .Case(".d", {0, 64})
2678 .Default({-1, -1});
2679 break;
2680 case RegKind::SVEPredicateAsCounter:
2681 case RegKind::SVEPredicateVector:
2682 case RegKind::SVEDataVector:
2683 case RegKind::Matrix:
2685 .Case("", {0, 0})
2686 .Case(".b", {0, 8})
2687 .Case(".h", {0, 16})
2688 .Case(".s", {0, 32})
2689 .Case(".d", {0, 64})
2690 .Case(".q", {0, 128})
2691 .Default({-1, -1});
2692 break;
2693 default:
2694 llvm_unreachable("Unsupported RegKind");
2695 }
2696
2697 if (Res == std::make_pair(-1, -1))
2698 return std::nullopt;
2699
2700 return std::optional<std::pair<int, int>>(Res);
2701}
2702
2703static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2704 return parseVectorKind(Suffix, VectorKind).has_value();
2705}
2706
2708 return StringSwitch<unsigned>(Name.lower())
2709 .Case("z0", AArch64::Z0)
2710 .Case("z1", AArch64::Z1)
2711 .Case("z2", AArch64::Z2)
2712 .Case("z3", AArch64::Z3)
2713 .Case("z4", AArch64::Z4)
2714 .Case("z5", AArch64::Z5)
2715 .Case("z6", AArch64::Z6)
2716 .Case("z7", AArch64::Z7)
2717 .Case("z8", AArch64::Z8)
2718 .Case("z9", AArch64::Z9)
2719 .Case("z10", AArch64::Z10)
2720 .Case("z11", AArch64::Z11)
2721 .Case("z12", AArch64::Z12)
2722 .Case("z13", AArch64::Z13)
2723 .Case("z14", AArch64::Z14)
2724 .Case("z15", AArch64::Z15)
2725 .Case("z16", AArch64::Z16)
2726 .Case("z17", AArch64::Z17)
2727 .Case("z18", AArch64::Z18)
2728 .Case("z19", AArch64::Z19)
2729 .Case("z20", AArch64::Z20)
2730 .Case("z21", AArch64::Z21)
2731 .Case("z22", AArch64::Z22)
2732 .Case("z23", AArch64::Z23)
2733 .Case("z24", AArch64::Z24)
2734 .Case("z25", AArch64::Z25)
2735 .Case("z26", AArch64::Z26)
2736 .Case("z27", AArch64::Z27)
2737 .Case("z28", AArch64::Z28)
2738 .Case("z29", AArch64::Z29)
2739 .Case("z30", AArch64::Z30)
2740 .Case("z31", AArch64::Z31)
2741 .Default(0);
2742}
2743
2745 return StringSwitch<unsigned>(Name.lower())
2746 .Case("p0", AArch64::P0)
2747 .Case("p1", AArch64::P1)
2748 .Case("p2", AArch64::P2)
2749 .Case("p3", AArch64::P3)
2750 .Case("p4", AArch64::P4)
2751 .Case("p5", AArch64::P5)
2752 .Case("p6", AArch64::P6)
2753 .Case("p7", AArch64::P7)
2754 .Case("p8", AArch64::P8)
2755 .Case("p9", AArch64::P9)
2756 .Case("p10", AArch64::P10)
2757 .Case("p11", AArch64::P11)
2758 .Case("p12", AArch64::P12)
2759 .Case("p13", AArch64::P13)
2760 .Case("p14", AArch64::P14)
2761 .Case("p15", AArch64::P15)
2762 .Default(0);
2763}
2764
2766 return StringSwitch<unsigned>(Name.lower())
2767 .Case("pn0", AArch64::PN0)
2768 .Case("pn1", AArch64::PN1)
2769 .Case("pn2", AArch64::PN2)
2770 .Case("pn3", AArch64::PN3)
2771 .Case("pn4", AArch64::PN4)
2772 .Case("pn5", AArch64::PN5)
2773 .Case("pn6", AArch64::PN6)
2774 .Case("pn7", AArch64::PN7)
2775 .Case("pn8", AArch64::PN8)
2776 .Case("pn9", AArch64::PN9)
2777 .Case("pn10", AArch64::PN10)
2778 .Case("pn11", AArch64::PN11)
2779 .Case("pn12", AArch64::PN12)
2780 .Case("pn13", AArch64::PN13)
2781 .Case("pn14", AArch64::PN14)
2782 .Case("pn15", AArch64::PN15)
2783 .Default(0);
2784}
2785
2787 return StringSwitch<unsigned>(Name.lower())
2788 .Case("za0.d", AArch64::ZAD0)
2789 .Case("za1.d", AArch64::ZAD1)
2790 .Case("za2.d", AArch64::ZAD2)
2791 .Case("za3.d", AArch64::ZAD3)
2792 .Case("za4.d", AArch64::ZAD4)
2793 .Case("za5.d", AArch64::ZAD5)
2794 .Case("za6.d", AArch64::ZAD6)
2795 .Case("za7.d", AArch64::ZAD7)
2796 .Case("za0.s", AArch64::ZAS0)
2797 .Case("za1.s", AArch64::ZAS1)
2798 .Case("za2.s", AArch64::ZAS2)
2799 .Case("za3.s", AArch64::ZAS3)
2800 .Case("za0.h", AArch64::ZAH0)
2801 .Case("za1.h", AArch64::ZAH1)
2802 .Case("za0.b", AArch64::ZAB0)
2803 .Default(0);
2804}
2805
2807 return StringSwitch<unsigned>(Name.lower())
2808 .Case("za", AArch64::ZA)
2809 .Case("za0.q", AArch64::ZAQ0)
2810 .Case("za1.q", AArch64::ZAQ1)
2811 .Case("za2.q", AArch64::ZAQ2)
2812 .Case("za3.q", AArch64::ZAQ3)
2813 .Case("za4.q", AArch64::ZAQ4)
2814 .Case("za5.q", AArch64::ZAQ5)
2815 .Case("za6.q", AArch64::ZAQ6)
2816 .Case("za7.q", AArch64::ZAQ7)
2817 .Case("za8.q", AArch64::ZAQ8)
2818 .Case("za9.q", AArch64::ZAQ9)
2819 .Case("za10.q", AArch64::ZAQ10)
2820 .Case("za11.q", AArch64::ZAQ11)
2821 .Case("za12.q", AArch64::ZAQ12)
2822 .Case("za13.q", AArch64::ZAQ13)
2823 .Case("za14.q", AArch64::ZAQ14)
2824 .Case("za15.q", AArch64::ZAQ15)
2825 .Case("za0.d", AArch64::ZAD0)
2826 .Case("za1.d", AArch64::ZAD1)
2827 .Case("za2.d", AArch64::ZAD2)
2828 .Case("za3.d", AArch64::ZAD3)
2829 .Case("za4.d", AArch64::ZAD4)
2830 .Case("za5.d", AArch64::ZAD5)
2831 .Case("za6.d", AArch64::ZAD6)
2832 .Case("za7.d", AArch64::ZAD7)
2833 .Case("za0.s", AArch64::ZAS0)
2834 .Case("za1.s", AArch64::ZAS1)
2835 .Case("za2.s", AArch64::ZAS2)
2836 .Case("za3.s", AArch64::ZAS3)
2837 .Case("za0.h", AArch64::ZAH0)
2838 .Case("za1.h", AArch64::ZAH1)
2839 .Case("za0.b", AArch64::ZAB0)
2840 .Case("za0h.q", AArch64::ZAQ0)
2841 .Case("za1h.q", AArch64::ZAQ1)
2842 .Case("za2h.q", AArch64::ZAQ2)
2843 .Case("za3h.q", AArch64::ZAQ3)
2844 .Case("za4h.q", AArch64::ZAQ4)
2845 .Case("za5h.q", AArch64::ZAQ5)
2846 .Case("za6h.q", AArch64::ZAQ6)
2847 .Case("za7h.q", AArch64::ZAQ7)
2848 .Case("za8h.q", AArch64::ZAQ8)
2849 .Case("za9h.q", AArch64::ZAQ9)
2850 .Case("za10h.q", AArch64::ZAQ10)
2851 .Case("za11h.q", AArch64::ZAQ11)
2852 .Case("za12h.q", AArch64::ZAQ12)
2853 .Case("za13h.q", AArch64::ZAQ13)
2854 .Case("za14h.q", AArch64::ZAQ14)
2855 .Case("za15h.q", AArch64::ZAQ15)
2856 .Case("za0h.d", AArch64::ZAD0)
2857 .Case("za1h.d", AArch64::ZAD1)
2858 .Case("za2h.d", AArch64::ZAD2)
2859 .Case("za3h.d", AArch64::ZAD3)
2860 .Case("za4h.d", AArch64::ZAD4)
2861 .Case("za5h.d", AArch64::ZAD5)
2862 .Case("za6h.d", AArch64::ZAD6)
2863 .Case("za7h.d", AArch64::ZAD7)
2864 .Case("za0h.s", AArch64::ZAS0)
2865 .Case("za1h.s", AArch64::ZAS1)
2866 .Case("za2h.s", AArch64::ZAS2)
2867 .Case("za3h.s", AArch64::ZAS3)
2868 .Case("za0h.h", AArch64::ZAH0)
2869 .Case("za1h.h", AArch64::ZAH1)
2870 .Case("za0h.b", AArch64::ZAB0)
2871 .Case("za0v.q", AArch64::ZAQ0)
2872 .Case("za1v.q", AArch64::ZAQ1)
2873 .Case("za2v.q", AArch64::ZAQ2)
2874 .Case("za3v.q", AArch64::ZAQ3)
2875 .Case("za4v.q", AArch64::ZAQ4)
2876 .Case("za5v.q", AArch64::ZAQ5)
2877 .Case("za6v.q", AArch64::ZAQ6)
2878 .Case("za7v.q", AArch64::ZAQ7)
2879 .Case("za8v.q", AArch64::ZAQ8)
2880 .Case("za9v.q", AArch64::ZAQ9)
2881 .Case("za10v.q", AArch64::ZAQ10)
2882 .Case("za11v.q", AArch64::ZAQ11)
2883 .Case("za12v.q", AArch64::ZAQ12)
2884 .Case("za13v.q", AArch64::ZAQ13)
2885 .Case("za14v.q", AArch64::ZAQ14)
2886 .Case("za15v.q", AArch64::ZAQ15)
2887 .Case("za0v.d", AArch64::ZAD0)
2888 .Case("za1v.d", AArch64::ZAD1)
2889 .Case("za2v.d", AArch64::ZAD2)
2890 .Case("za3v.d", AArch64::ZAD3)
2891 .Case("za4v.d", AArch64::ZAD4)
2892 .Case("za5v.d", AArch64::ZAD5)
2893 .Case("za6v.d", AArch64::ZAD6)
2894 .Case("za7v.d", AArch64::ZAD7)
2895 .Case("za0v.s", AArch64::ZAS0)
2896 .Case("za1v.s", AArch64::ZAS1)
2897 .Case("za2v.s", AArch64::ZAS2)
2898 .Case("za3v.s", AArch64::ZAS3)
2899 .Case("za0v.h", AArch64::ZAH0)
2900 .Case("za1v.h", AArch64::ZAH1)
2901 .Case("za0v.b", AArch64::ZAB0)
2902 .Default(0);
2903}
2904
2905bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2906 SMLoc &EndLoc) {
2907 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2908}
2909
2910ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2911 SMLoc &EndLoc) {
2912 StartLoc = getLoc();
2913 ParseStatus Res = tryParseScalarRegister(Reg);
2914 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2915 return Res;
2916}
2917
2918// Matches a register name or register alias previously defined by '.req'
2919unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2920 RegKind Kind) {
2921 unsigned RegNum = 0;
2922 if ((RegNum = matchSVEDataVectorRegName(Name)))
2923 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2924
2925 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2926 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2927
2929 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2930
2931 if ((RegNum = MatchNeonVectorRegName(Name)))
2932 return Kind == RegKind::NeonVector ? RegNum : 0;
2933
2934 if ((RegNum = matchMatrixRegName(Name)))
2935 return Kind == RegKind::Matrix ? RegNum : 0;
2936
2937 if (Name.equals_insensitive("zt0"))
2938 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2939
2940 // The parsed register must be of RegKind Scalar
2941 if ((RegNum = MatchRegisterName(Name)))
2942 return (Kind == RegKind::Scalar) ? RegNum : 0;
2943
2944 if (!RegNum) {
2945 // Handle a few common aliases of registers.
2946 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2947 .Case("fp", AArch64::FP)
2948 .Case("lr", AArch64::LR)
2949 .Case("x31", AArch64::XZR)
2950 .Case("w31", AArch64::WZR)
2951 .Default(0))
2952 return Kind == RegKind::Scalar ? RegNum : 0;
2953
2954 // Check for aliases registered via .req. Canonicalize to lower case.
2955 // That's more consistent since register names are case insensitive, and
2956 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2957 auto Entry = RegisterReqs.find(Name.lower());
2958 if (Entry == RegisterReqs.end())
2959 return 0;
2960
2961 // set RegNum if the match is the right kind of register
2962 if (Kind == Entry->getValue().first)
2963 RegNum = Entry->getValue().second;
2964 }
2965 return RegNum;
2966}
2967
2968unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2969 switch (K) {
2970 case RegKind::Scalar:
2971 case RegKind::NeonVector:
2972 case RegKind::SVEDataVector:
2973 return 32;
2974 case RegKind::Matrix:
2975 case RegKind::SVEPredicateVector:
2976 case RegKind::SVEPredicateAsCounter:
2977 return 16;
2978 case RegKind::LookupTable:
2979 return 1;
2980 }
2981 llvm_unreachable("Unsupported RegKind");
2982}
2983
2984/// tryParseScalarRegister - Try to parse a register name. The token must be an
2985/// Identifier when called, and if it is a register name the token is eaten and
2986/// the register is added to the operand list.
2987ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2988 const AsmToken &Tok = getTok();
2989 if (Tok.isNot(AsmToken::Identifier))
2990 return ParseStatus::NoMatch;
2991
2992 std::string lowerCase = Tok.getString().lower();
2993 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2994 if (Reg == 0)
2995 return ParseStatus::NoMatch;
2996
2997 RegNum = Reg;
2998 Lex(); // Eat identifier token.
2999 return ParseStatus::Success;
3000}
3001
3002/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3003ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3004 SMLoc S = getLoc();
3005
3006 if (getTok().isNot(AsmToken::Identifier))
3007 return Error(S, "Expected cN operand where 0 <= N <= 15");
3008
3009 StringRef Tok = getTok().getIdentifier();
3010 if (Tok[0] != 'c' && Tok[0] != 'C')
3011 return Error(S, "Expected cN operand where 0 <= N <= 15");
3012
3013 uint32_t CRNum;
3014 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3015 if (BadNum || CRNum > 15)
3016 return Error(S, "Expected cN operand where 0 <= N <= 15");
3017
3018 Lex(); // Eat identifier token.
3019 Operands.push_back(
3020 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3021 return ParseStatus::Success;
3022}
3023
3024// Either an identifier for named values or a 6-bit immediate.
3025ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3026 SMLoc S = getLoc();
3027 const AsmToken &Tok = getTok();
3028
3029 unsigned MaxVal = 63;
3030
3031 // Immediate case, with optional leading hash:
3032 if (parseOptionalToken(AsmToken::Hash) ||
3033 Tok.is(AsmToken::Integer)) {
3034 const MCExpr *ImmVal;
3035 if (getParser().parseExpression(ImmVal))
3036 return ParseStatus::Failure;
3037
3038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3039 if (!MCE)
3040 return TokError("immediate value expected for prefetch operand");
3041 unsigned prfop = MCE->getValue();
3042 if (prfop > MaxVal)
3043 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3044 "] expected");
3045
3046 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3047 Operands.push_back(AArch64Operand::CreatePrefetch(
3048 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3049 return ParseStatus::Success;
3050 }
3051
3052 if (Tok.isNot(AsmToken::Identifier))
3053 return TokError("prefetch hint expected");
3054
3055 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3056 if (!RPRFM)
3057 return TokError("prefetch hint expected");
3058
3059 Operands.push_back(AArch64Operand::CreatePrefetch(
3060 RPRFM->Encoding, Tok.getString(), S, getContext()));
3061 Lex(); // Eat identifier token.
3062 return ParseStatus::Success;
3063}
3064
3065/// tryParsePrefetch - Try to parse a prefetch operand.
3066template <bool IsSVEPrefetch>
3067ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3068 SMLoc S = getLoc();
3069 const AsmToken &Tok = getTok();
3070
3071 auto LookupByName = [](StringRef N) {
3072 if (IsSVEPrefetch) {
3073 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3074 return std::optional<unsigned>(Res->Encoding);
3075 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3076 return std::optional<unsigned>(Res->Encoding);
3077 return std::optional<unsigned>();
3078 };
3079
3080 auto LookupByEncoding = [](unsigned E) {
3081 if (IsSVEPrefetch) {
3082 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3083 return std::optional<StringRef>(Res->Name);
3084 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3085 return std::optional<StringRef>(Res->Name);
3086 return std::optional<StringRef>();
3087 };
3088 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3089
3090 // Either an identifier for named values or a 5-bit immediate.
3091 // Eat optional hash.
3092 if (parseOptionalToken(AsmToken::Hash) ||
3093 Tok.is(AsmToken::Integer)) {
3094 const MCExpr *ImmVal;
3095 if (getParser().parseExpression(ImmVal))
3096 return ParseStatus::Failure;
3097
3098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3099 if (!MCE)
3100 return TokError("immediate value expected for prefetch operand");
3101 unsigned prfop = MCE->getValue();
3102 if (prfop > MaxVal)
3103 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3104 "] expected");
3105
3106 auto PRFM = LookupByEncoding(MCE->getValue());
3107 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3108 S, getContext()));
3109 return ParseStatus::Success;
3110 }
3111
3112 if (Tok.isNot(AsmToken::Identifier))
3113 return TokError("prefetch hint expected");
3114
3115 auto PRFM = LookupByName(Tok.getString());
3116 if (!PRFM)
3117 return TokError("prefetch hint expected");
3118
3119 Operands.push_back(AArch64Operand::CreatePrefetch(
3120 *PRFM, Tok.getString(), S, getContext()));
3121 Lex(); // Eat identifier token.
3122 return ParseStatus::Success;
3123}
3124
3125/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3126ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3127 SMLoc S = getLoc();
3128 const AsmToken &Tok = getTok();
3129 if (Tok.isNot(AsmToken::Identifier))
3130 return TokError("invalid operand for instruction");
3131
3132 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3133 if (!PSB)
3134 return TokError("invalid operand for instruction");
3135
3136 Operands.push_back(AArch64Operand::CreatePSBHint(
3137 PSB->Encoding, Tok.getString(), S, getContext()));
3138 Lex(); // Eat identifier token.
3139 return ParseStatus::Success;
3140}
3141
3142ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3143 SMLoc StartLoc = getLoc();
3144
3145 MCRegister RegNum;
3146
3147 // The case where xzr, xzr is not present is handled by an InstAlias.
3148
3149 auto RegTok = getTok(); // in case we need to backtrack
3150 if (!tryParseScalarRegister(RegNum).isSuccess())
3151 return ParseStatus::NoMatch;
3152
3153 if (RegNum != AArch64::XZR) {
3154 getLexer().UnLex(RegTok);
3155 return ParseStatus::NoMatch;
3156 }
3157
3158 if (parseComma())
3159 return ParseStatus::Failure;
3160
3161 if (!tryParseScalarRegister(RegNum).isSuccess())
3162 return TokError("expected register operand");
3163
3164 if (RegNum != AArch64::XZR)
3165 return TokError("xzr must be followed by xzr");
3166
3167 // We need to push something, since we claim this is an operand in .td.
3168 // See also AArch64AsmParser::parseKeywordOperand.
3169 Operands.push_back(AArch64Operand::CreateReg(
3170 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3171
3172 return ParseStatus::Success;
3173}
3174
3175/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3176ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3177 SMLoc S = getLoc();
3178 const AsmToken &Tok = getTok();
3179 if (Tok.isNot(AsmToken::Identifier))
3180 return TokError("invalid operand for instruction");
3181
3182 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3183 if (!BTI)
3184 return TokError("invalid operand for instruction");
3185
3186 Operands.push_back(AArch64Operand::CreateBTIHint(
3187 BTI->Encoding, Tok.getString(), S, getContext()));
3188 Lex(); // Eat identifier token.
3189 return ParseStatus::Success;
3190}
3191
3192/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3193/// instruction.
3194ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3195 SMLoc S = getLoc();
3196 const MCExpr *Expr = nullptr;
3197
3198 if (getTok().is(AsmToken::Hash)) {
3199 Lex(); // Eat hash token.
3200 }
3201
3202 if (parseSymbolicImmVal(Expr))
3203 return ParseStatus::Failure;
3204
3205 AArch64MCExpr::VariantKind ELFRefKind;
3206 MCSymbolRefExpr::VariantKind DarwinRefKind;
3207 int64_t Addend;
3208 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3209 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3210 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3211 // No modifier was specified at all; this is the syntax for an ELF basic
3212 // ADRP relocation (unfortunately).
3213 Expr =
3215 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3216 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3217 Addend != 0) {
3218 return Error(S, "gotpage label reference not allowed an addend");
3219 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3220 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3221 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3222 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3223 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3224 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3225 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3226 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3227 // The operand must be an @page or @gotpage qualified symbolref.
3228 return Error(S, "page or gotpage label reference expected");
3229 }
3230 }
3231
3232 // We have either a label reference possibly with addend or an immediate. The
3233 // addend is a raw value here. The linker will adjust it to only reference the
3234 // page.
3235 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3237
3238 return ParseStatus::Success;
3239}
3240
3241/// tryParseAdrLabel - Parse and validate a source label for the ADR
3242/// instruction.
3243ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3244 SMLoc S = getLoc();
3245 const MCExpr *Expr = nullptr;
3246
3247 // Leave anything with a bracket to the default for SVE
3248 if (getTok().is(AsmToken::LBrac))
3249 return ParseStatus::NoMatch;
3250
3251 if (getTok().is(AsmToken::Hash))
3252 Lex(); // Eat hash token.
3253
3254 if (parseSymbolicImmVal(Expr))
3255 return ParseStatus::Failure;
3256
3257 AArch64MCExpr::VariantKind ELFRefKind;
3258 MCSymbolRefExpr::VariantKind DarwinRefKind;
3259 int64_t Addend;
3260 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3261 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3262 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3263 // No modifier was specified at all; this is the syntax for an ELF basic
3264 // ADR relocation (unfortunately).
3265 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3266 } else {
3267 return Error(S, "unexpected adr label");
3268 }
3269 }
3270
3271 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3272 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3273 return ParseStatus::Success;
3274}
3275
3276/// tryParseFPImm - A floating point immediate expression operand.
3277template <bool AddFPZeroAsLiteral>
3278ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3279 SMLoc S = getLoc();
3280
3281 bool Hash = parseOptionalToken(AsmToken::Hash);
3282
3283 // Handle negation, as that still comes through as a separate token.
3284 bool isNegative = parseOptionalToken(AsmToken::Minus);
3285
3286 const AsmToken &Tok = getTok();
3287 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3288 if (!Hash)
3289 return ParseStatus::NoMatch;
3290 return TokError("invalid floating point immediate");
3291 }
3292
3293 // Parse hexadecimal representation.
3294 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3295 if (Tok.getIntVal() > 255 || isNegative)
3296 return TokError("encoded floating point value out of range");
3297
3299 Operands.push_back(
3300 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3301 } else {
3302 // Parse FP representation.
3303 APFloat RealVal(APFloat::IEEEdouble());
3304 auto StatusOrErr =
3305 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3306 if (errorToBool(StatusOrErr.takeError()))
3307 return TokError("invalid floating point representation");
3308
3309 if (isNegative)
3310 RealVal.changeSign();
3311
3312 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3313 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3314 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3315 } else
3316 Operands.push_back(AArch64Operand::CreateFPImm(
3317 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3318 }
3319
3320 Lex(); // Eat the token.
3321
3322 return ParseStatus::Success;
3323}
3324
3325/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3326/// a shift suffix, for example '#1, lsl #12'.
3328AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3329 SMLoc S = getLoc();
3330
3331 if (getTok().is(AsmToken::Hash))
3332 Lex(); // Eat '#'
3333 else if (getTok().isNot(AsmToken::Integer))
3334 // Operand should start from # or should be integer, emit error otherwise.
3335 return ParseStatus::NoMatch;
3336
3337 if (getTok().is(AsmToken::Integer) &&
3338 getLexer().peekTok().is(AsmToken::Colon))
3339 return tryParseImmRange(Operands);
3340
3341 const MCExpr *Imm = nullptr;
3342 if (parseSymbolicImmVal(Imm))
3343 return ParseStatus::Failure;
3344 else if (getTok().isNot(AsmToken::Comma)) {
3345 Operands.push_back(
3346 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3347 return ParseStatus::Success;
3348 }
3349
3350 // Eat ','
3351 Lex();
3352 StringRef VecGroup;
3353 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3354 Operands.push_back(
3355 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3356 Operands.push_back(
3357 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3358 return ParseStatus::Success;
3359 }
3360
3361 // The optional operand must be "lsl #N" where N is non-negative.
3362 if (!getTok().is(AsmToken::Identifier) ||
3363 !getTok().getIdentifier().equals_insensitive("lsl"))
3364 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3365
3366 // Eat 'lsl'
3367 Lex();
3368
3369 parseOptionalToken(AsmToken::Hash);
3370
3371 if (getTok().isNot(AsmToken::Integer))
3372 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3373
3374 int64_t ShiftAmount = getTok().getIntVal();
3375
3376 if (ShiftAmount < 0)
3377 return Error(getLoc(), "positive shift amount required");
3378 Lex(); // Eat the number
3379
3380 // Just in case the optional lsl #0 is used for immediates other than zero.
3381 if (ShiftAmount == 0 && Imm != nullptr) {
3382 Operands.push_back(
3383 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3384 return ParseStatus::Success;
3385 }
3386
3387 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3388 getLoc(), getContext()));
3389 return ParseStatus::Success;
3390}
3391
3392/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3393/// suggestion to help common typos.
3395AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3397 .Case("eq", AArch64CC::EQ)
3398 .Case("ne", AArch64CC::NE)
3399 .Case("cs", AArch64CC::HS)
3400 .Case("hs", AArch64CC::HS)
3401 .Case("cc", AArch64CC::LO)
3402 .Case("lo", AArch64CC::LO)
3403 .Case("mi", AArch64CC::MI)
3404 .Case("pl", AArch64CC::PL)
3405 .Case("vs", AArch64CC::VS)
3406 .Case("vc", AArch64CC::VC)
3407 .Case("hi", AArch64CC::HI)
3408 .Case("ls", AArch64CC::LS)
3409 .Case("ge", AArch64CC::GE)
3410 .Case("lt", AArch64CC::LT)
3411 .Case("gt", AArch64CC::GT)
3412 .Case("le", AArch64CC::LE)
3413 .Case("al", AArch64CC::AL)
3414 .Case("nv", AArch64CC::NV)
3416
3417 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3419 .Case("none", AArch64CC::EQ)
3420 .Case("any", AArch64CC::NE)
3421 .Case("nlast", AArch64CC::HS)
3422 .Case("last", AArch64CC::LO)
3423 .Case("first", AArch64CC::MI)
3424 .Case("nfrst", AArch64CC::PL)
3425 .Case("pmore", AArch64CC::HI)
3426 .Case("plast", AArch64CC::LS)
3427 .Case("tcont", AArch64CC::GE)
3428 .Case("tstop", AArch64CC::LT)
3430
3431 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3432 Suggestion = "nfrst";
3433 }
3434 return CC;
3435}
3436
3437/// parseCondCode - Parse a Condition Code operand.
3438bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3439 bool invertCondCode) {
3440 SMLoc S = getLoc();
3441 const AsmToken &Tok = getTok();
3442 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3443
3444 StringRef Cond = Tok.getString();
3445 std::string Suggestion;
3446 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3447 if (CC == AArch64CC::Invalid) {
3448 std::string Msg = "invalid condition code";
3449 if (!Suggestion.empty())
3450 Msg += ", did you mean " + Suggestion + "?";
3451 return TokError(Msg);
3452 }
3453 Lex(); // Eat identifier token.
3454
3455 if (invertCondCode) {
3456 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3457 return TokError("condition codes AL and NV are invalid for this instruction");
3459 }
3460
3461 Operands.push_back(
3462 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3463 return false;
3464}
3465
3466ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3467 const AsmToken &Tok = getTok();
3468 SMLoc S = getLoc();
3469
3470 if (Tok.isNot(AsmToken::Identifier))
3471 return TokError("invalid operand for instruction");
3472
3473 unsigned PStateImm = -1;
3474 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3475 if (!SVCR)
3476 return ParseStatus::NoMatch;
3477 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3478 PStateImm = SVCR->Encoding;
3479
3480 Operands.push_back(
3481 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3482 Lex(); // Eat identifier token.
3483 return ParseStatus::Success;
3484}
3485
3486ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3487 const AsmToken &Tok = getTok();
3488 SMLoc S = getLoc();
3489
3490 StringRef Name = Tok.getString();
3491
3492 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3493 Lex(); // eat "za[.(b|h|s|d)]"
3494 unsigned ElementWidth = 0;
3495 auto DotPosition = Name.find('.');
3496 if (DotPosition != StringRef::npos) {
3497 const auto &KindRes =
3498 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3499 if (!KindRes)
3500 return TokError(
3501 "Expected the register to be followed by element width suffix");
3502 ElementWidth = KindRes->second;
3503 }
3504 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3505 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3506 getContext()));
3507 if (getLexer().is(AsmToken::LBrac)) {
3508 // There's no comma after matrix operand, so we can parse the next operand
3509 // immediately.
3510 if (parseOperand(Operands, false, false))
3511 return ParseStatus::NoMatch;
3512 }
3513 return ParseStatus::Success;
3514 }
3515
3516 // Try to parse matrix register.
3517 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3518 if (!Reg)
3519 return ParseStatus::NoMatch;
3520
3521 size_t DotPosition = Name.find('.');
3522 assert(DotPosition != StringRef::npos && "Unexpected register");
3523
3524 StringRef Head = Name.take_front(DotPosition);
3525 StringRef Tail = Name.drop_front(DotPosition);
3526 StringRef RowOrColumn = Head.take_back();
3527
3528 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3529 .Case("h", MatrixKind::Row)
3530 .Case("v", MatrixKind::Col)
3531 .Default(MatrixKind::Tile);
3532
3533 // Next up, parsing the suffix
3534 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3535 if (!KindRes)
3536 return TokError(
3537 "Expected the register to be followed by element width suffix");
3538 unsigned ElementWidth = KindRes->second;
3539
3540 Lex();
3541
3542 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3543 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3544
3545 if (getLexer().is(AsmToken::LBrac)) {
3546 // There's no comma after matrix operand, so we can parse the next operand
3547 // immediately.
3548 if (parseOperand(Operands, false, false))
3549 return ParseStatus::NoMatch;
3550 }
3551 return ParseStatus::Success;
3552}
3553
3554/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3555/// them if present.
3557AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3558 const AsmToken &Tok = getTok();
3559 std::string LowerID = Tok.getString().lower();
3562 .Case("lsl", AArch64_AM::LSL)
3563 .Case("lsr", AArch64_AM::LSR)
3564 .Case("asr", AArch64_AM::ASR)
3565 .Case("ror", AArch64_AM::ROR)
3566 .Case("msl", AArch64_AM::MSL)
3567 .Case("uxtb", AArch64_AM::UXTB)
3568 .Case("uxth", AArch64_AM::UXTH)
3569 .Case("uxtw", AArch64_AM::UXTW)
3570 .Case("uxtx", AArch64_AM::UXTX)
3571 .Case("sxtb", AArch64_AM::SXTB)
3572 .Case("sxth", AArch64_AM::SXTH)
3573 .Case("sxtw", AArch64_AM::SXTW)
3574 .Case("sxtx", AArch64_AM::SXTX)
3576
3578 return ParseStatus::NoMatch;
3579
3580 SMLoc S = Tok.getLoc();
3581 Lex();
3582
3583 bool Hash = parseOptionalToken(AsmToken::Hash);
3584
3585 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3586 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3587 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3588 ShOp == AArch64_AM::MSL) {
3589 // We expect a number here.
3590 return TokError("expected #imm after shift specifier");
3591 }
3592
3593 // "extend" type operations don't need an immediate, #0 is implicit.
3594 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3595 Operands.push_back(
3596 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3597 return ParseStatus::Success;
3598 }
3599
3600 // Make sure we do actually have a number, identifier or a parenthesized
3601 // expression.
3602 SMLoc E = getLoc();
3603 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3604 !getTok().is(AsmToken::Identifier))
3605 return Error(E, "expected integer shift amount");
3606
3607 const MCExpr *ImmVal;
3608 if (getParser().parseExpression(ImmVal))
3609 return ParseStatus::Failure;
3610
3611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3612 if (!MCE)
3613 return Error(E, "expected constant '#imm' after shift specifier");
3614
3615 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3616 Operands.push_back(AArch64Operand::CreateShiftExtend(
3617 ShOp, MCE->getValue(), true, S, E, getContext()));
3618 return ParseStatus::Success;
3619}
3620
3621static const struct Extension {
3622 const char *Name;
3624} ExtensionMap[] = {
3625 {"crc", {AArch64::FeatureCRC}},
3626 {"sm4", {AArch64::FeatureSM4}},
3627 {"sha3", {AArch64::FeatureSHA3}},
3628 {"sha2", {AArch64::FeatureSHA2}},
3629 {"aes", {AArch64::FeatureAES}},
3630 {"crypto", {AArch64::FeatureCrypto}},
3631 {"fp", {AArch64::FeatureFPARMv8}},
3632 {"simd", {AArch64::FeatureNEON}},
3633 {"ras", {AArch64::FeatureRAS}},
3634 {"rasv2", {AArch64::FeatureRASv2}},
3635 {"lse", {AArch64::FeatureLSE}},
3636 {"predres", {AArch64::FeaturePredRes}},
3637 {"predres2", {AArch64::FeatureSPECRES2}},
3638 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3639 {"mte", {AArch64::FeatureMTE}},
3640 {"memtag", {AArch64::FeatureMTE}},
3641 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3642 {"pan", {AArch64::FeaturePAN}},
3643 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3644 {"ccpp", {AArch64::FeatureCCPP}},
3645 {"rcpc", {AArch64::FeatureRCPC}},
3646 {"rng", {AArch64::FeatureRandGen}},
3647 {"sve", {AArch64::FeatureSVE}},
3648 {"sve2", {AArch64::FeatureSVE2}},
3649 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3650 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3651 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3652 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3653 {"sve2p1", {AArch64::FeatureSVE2p1}},
3654 {"b16b16", {AArch64::FeatureB16B16}},
3655 {"ls64", {AArch64::FeatureLS64}},
3656 {"xs", {AArch64::FeatureXS}},
3657 {"pauth", {AArch64::FeaturePAuth}},
3658 {"flagm", {AArch64::FeatureFlagM}},
3659 {"rme", {AArch64::FeatureRME}},
3660 {"sme", {AArch64::FeatureSME}},
3661 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3662 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3663 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3664 {"sme2", {AArch64::FeatureSME2}},
3665 {"sme2p1", {AArch64::FeatureSME2p1}},
3666 {"hbc", {AArch64::FeatureHBC}},
3667 {"mops", {AArch64::FeatureMOPS}},
3668 {"mec", {AArch64::FeatureMEC}},
3669 {"the", {AArch64::FeatureTHE}},
3670 {"d128", {AArch64::FeatureD128}},
3671 {"lse128", {AArch64::FeatureLSE128}},
3672 {"ite", {AArch64::FeatureITE}},
3673 {"cssc", {AArch64::FeatureCSSC}},
3674 {"rcpc3", {AArch64::FeatureRCPC3}},
3675 {"gcs", {AArch64::FeatureGCS}},
3676 {"bf16", {AArch64::FeatureBF16}},
3677 {"compnum", {AArch64::FeatureComplxNum}},
3678 {"dotprod", {AArch64::FeatureDotProd}},
3679 {"f32mm", {AArch64::FeatureMatMulFP32}},
3680 {"f64mm", {AArch64::FeatureMatMulFP64}},
3681 {"fp16", {AArch64::FeatureFullFP16}},
3682 {"fp16fml", {AArch64::FeatureFP16FML}},
3683 {"i8mm", {AArch64::FeatureMatMulInt8}},
3684 {"lor", {AArch64::FeatureLOR}},
3685 {"profile", {AArch64::FeatureSPE}},
3686 // "rdma" is the name documented by binutils for the feature, but
3687 // binutils also accepts incomplete prefixes of features, so "rdm"
3688 // works too. Support both spellings here.
3689 {"rdm", {AArch64::FeatureRDM}},
3690 {"rdma", {AArch64::FeatureRDM}},
3691 {"sb", {AArch64::FeatureSB}},
3692 {"ssbs", {AArch64::FeatureSSBS}},
3693 {"tme", {AArch64::FeatureTME}},
3694 {"fpmr", {AArch64::FeatureFPMR}},
3695 {"fp8", {AArch64::FeatureFP8}},
3696 {"faminmax", {AArch64::FeatureFAMINMAX}},
3697 {"fp8fma", {AArch64::FeatureFP8FMA}},
3698 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3699 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3700 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3701 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3702 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3703 {"lut", {AArch64::FeatureLUT}},
3704 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3705 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3706 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3707 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3708 {"cpa", {AArch64::FeatureCPA}},
3709 {"tlbiw", {AArch64::FeatureTLBIW}},
3711
3712static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3713 if (FBS[AArch64::HasV8_0aOps])
3714 Str += "ARMv8a";
3715 if (FBS[AArch64::HasV8_1aOps])
3716 Str += "ARMv8.1a";
3717 else if (FBS[AArch64::HasV8_2aOps])
3718 Str += "ARMv8.2a";
3719 else if (FBS[AArch64::HasV8_3aOps])
3720 Str += "ARMv8.3a";
3721 else if (FBS[AArch64::HasV8_4aOps])
3722 Str += "ARMv8.4a";
3723 else if (FBS[AArch64::HasV8_5aOps])
3724 Str += "ARMv8.5a";
3725 else if (FBS[AArch64::HasV8_6aOps])
3726 Str += "ARMv8.6a";
3727 else if (FBS[AArch64::HasV8_7aOps])
3728 Str += "ARMv8.7a";
3729 else if (FBS[AArch64::HasV8_8aOps])
3730 Str += "ARMv8.8a";
3731 else if (FBS[AArch64::HasV8_9aOps])
3732 Str += "ARMv8.9a";
3733 else if (FBS[AArch64::HasV9_0aOps])
3734 Str += "ARMv9-a";
3735 else if (FBS[AArch64::HasV9_1aOps])
3736 Str += "ARMv9.1a";
3737 else if (FBS[AArch64::HasV9_2aOps])
3738 Str += "ARMv9.2a";
3739 else if (FBS[AArch64::HasV9_3aOps])
3740 Str += "ARMv9.3a";
3741 else if (FBS[AArch64::HasV9_4aOps])
3742 Str += "ARMv9.4a";
3743 else if (FBS[AArch64::HasV9_5aOps])
3744 Str += "ARMv9.5a";
3745 else if (FBS[AArch64::HasV8_0rOps])
3746 Str += "ARMv8r";
3747 else {
3748 SmallVector<std::string, 2> ExtMatches;
3749 for (const auto& Ext : ExtensionMap) {
3750 // Use & in case multiple features are enabled
3751 if ((FBS & Ext.Features) != FeatureBitset())
3752 ExtMatches.push_back(Ext.Name);
3753 }
3754 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3755 }
3756}
3757
3758void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3759 SMLoc S) {
3760 const uint16_t Op2 = Encoding & 7;
3761 const uint16_t Cm = (Encoding & 0x78) >> 3;
3762 const uint16_t Cn = (Encoding & 0x780) >> 7;
3763 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3764
3765 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3766
3767 Operands.push_back(
3768 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3769 Operands.push_back(
3770 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3771 Operands.push_back(
3772 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3773 Expr = MCConstantExpr::create(Op2, getContext());
3774 Operands.push_back(
3775 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3776}
3777
3778/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3779/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3780bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3782 if (Name.contains('.'))
3783 return TokError("invalid operand");
3784
3785 Mnemonic = Name;
3786 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3787
3788 const AsmToken &Tok = getTok();
3789 StringRef Op = Tok.getString();
3790 SMLoc S = Tok.getLoc();
3791
3792 if (Mnemonic == "ic") {
3793 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3794 if (!IC)
3795 return TokError("invalid operand for IC instruction");
3796 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3797 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3799 return TokError(Str);
3800 }
3801 createSysAlias(IC->Encoding, Operands, S);
3802 } else if (Mnemonic == "dc") {
3803 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3804 if (!DC)
3805 return TokError("invalid operand for DC instruction");
3806 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3807 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3809 return TokError(Str);
3810 }
3811 createSysAlias(DC->Encoding, Operands, S);
3812 } else if (Mnemonic == "at") {
3813 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3814 if (!AT)
3815 return TokError("invalid operand for AT instruction");
3816 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3817 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3819 return TokError(Str);
3820 }
3821 createSysAlias(AT->Encoding, Operands, S);
3822 } else if (Mnemonic == "tlbi") {
3823 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3824 if (!TLBI)
3825 return TokError("invalid operand for TLBI instruction");
3826 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3827 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3829 return TokError(Str);
3830 }
3831 createSysAlias(TLBI->Encoding, Operands, S);
3832 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3833
3834 if (Op.lower() != "rctx")
3835 return TokError("invalid operand for prediction restriction instruction");
3836
3837 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3838 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3839 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3840
3841 if (Mnemonic == "cosp" && !hasSpecres2)
3842 return TokError("COSP requires: predres2");
3843 if (!hasPredres)
3844 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3845
3846 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3847 : Mnemonic == "dvp" ? 0b101
3848 : Mnemonic == "cosp" ? 0b110
3849 : Mnemonic == "cpp" ? 0b111
3850 : 0;
3851 assert(PRCTX_Op2 &&
3852 "Invalid mnemonic for prediction restriction instruction");
3853 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3854 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3855
3856 createSysAlias(Encoding, Operands, S);
3857 }
3858
3859 Lex(); // Eat operand.
3860
3861 bool ExpectRegister = !Op.contains_insensitive("all");
3862 bool HasRegister = false;
3863
3864 // Check for the optional register operand.
3865 if (parseOptionalToken(AsmToken::Comma)) {
3866 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3867 return TokError("expected register operand");
3868 HasRegister = true;
3869 }
3870
3871 if (ExpectRegister && !HasRegister)
3872 return TokError("specified " + Mnemonic + " op requires a register");
3873 else if (!ExpectRegister && HasRegister)
3874 return TokError("specified " + Mnemonic + " op does not use a register");
3875
3876 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3877 return true;
3878
3879 return false;
3880}
3881
3882/// parseSyspAlias - The TLBIP instructions are simple aliases for
3883/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3884bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3886 if (Name.contains('.'))
3887 return TokError("invalid operand");
3888
3889 Mnemonic = Name;
3890 Operands.push_back(
3891 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3892
3893 const AsmToken &Tok = getTok();
3894 StringRef Op = Tok.getString();
3895 SMLoc S = Tok.getLoc();
3896
3897 if (Mnemonic == "tlbip") {
3898 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3899 if (HasnXSQualifier) {
3900 Op = Op.drop_back(3);
3901 }
3902 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3903 if (!TLBIorig)
3904 return TokError("invalid operand for TLBIP instruction");
3905 const AArch64TLBI::TLBI TLBI(
3906 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3907 TLBIorig->NeedsReg,
3908 HasnXSQualifier
3909 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3910 : TLBIorig->FeaturesRequired);
3911 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3912 std::string Name =
3913 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3914 std::string Str("TLBIP " + Name + " requires: ");
3916 return TokError(Str);
3917 }
3918 createSysAlias(TLBI.Encoding, Operands, S);
3919 }
3920
3921 Lex(); // Eat operand.
3922
3923 if (parseComma())
3924 return true;
3925
3926 if (Tok.isNot(AsmToken::Identifier))
3927 return TokError("expected register identifier");
3928 auto Result = tryParseSyspXzrPair(Operands);
3929 if (Result.isNoMatch())
3930 Result = tryParseGPRSeqPair(Operands);
3931 if (!Result.isSuccess())
3932 return TokError("specified " + Mnemonic +
3933 " op requires a pair of registers");
3934
3935 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3936 return true;
3937
3938 return false;
3939}
3940
3941ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3942 MCAsmParser &Parser = getParser();
3943 const AsmToken &Tok = getTok();
3944
3945 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3946 return TokError("'csync' operand expected");
3947 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3948 // Immediate operand.
3949 const MCExpr *ImmVal;
3950 SMLoc ExprLoc = getLoc();
3951 AsmToken IntTok = Tok;
3952 if (getParser().parseExpression(ImmVal))
3953 return ParseStatus::Failure;
3954 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3955 if (!MCE)
3956 return Error(ExprLoc, "immediate value expected for barrier operand");
3957 int64_t Value = MCE->getValue();
3958 if (Mnemonic == "dsb" && Value > 15) {
3959 // This case is a no match here, but it might be matched by the nXS
3960 // variant. Deliberately not unlex the optional '#' as it is not necessary
3961 // to characterize an integer immediate.
3962 Parser.getLexer().UnLex(IntTok);
3963 return ParseStatus::NoMatch;
3964 }
3965 if (Value < 0 || Value > 15)
3966 return Error(ExprLoc, "barrier operand out of range");
3967 auto DB = AArch64DB::lookupDBByEncoding(Value);
3968 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3969 ExprLoc, getContext(),
3970 false /*hasnXSModifier*/));
3971 return ParseStatus::Success;
3972 }
3973
3974 if (Tok.isNot(AsmToken::Identifier))
3975 return TokError("invalid operand for instruction");
3976
3977 StringRef Operand = Tok.getString();
3978 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3979 auto DB = AArch64DB::lookupDBByName(Operand);
3980 // The only valid named option for ISB is 'sy'
3981 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
3982 return TokError("'sy' or #imm operand expected");
3983 // The only valid named option for TSB is 'csync'
3984 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
3985 return TokError("'csync' operand expected");
3986 if (!DB && !TSB) {
3987 if (Mnemonic == "dsb") {
3988 // This case is a no match here, but it might be matched by the nXS
3989 // variant.
3990 return ParseStatus::NoMatch;
3991 }
3992 return TokError("invalid barrier option name");
3993 }
3994
3995 Operands.push_back(AArch64Operand::CreateBarrier(
3996 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3997 getContext(), false /*hasnXSModifier*/));
3998 Lex(); // Consume the option
3999
4000 return ParseStatus::Success;
4001}
4002
4004AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4005 const AsmToken &Tok = getTok();
4006
4007 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4008 if (Mnemonic != "dsb")
4009 return ParseStatus::Failure;
4010
4011 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4012 // Immediate operand.
4013 const MCExpr *ImmVal;
4014 SMLoc ExprLoc = getLoc();
4015 if (getParser().parseExpression(ImmVal))
4016 return ParseStatus::Failure;
4017 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4018 if (!MCE)
4019 return Error(ExprLoc, "immediate value expected for barrier operand");
4020 int64_t Value = MCE->getValue();
4021 // v8.7-A DSB in the nXS variant accepts only the following immediate
4022 // values: 16, 20, 24, 28.
4023 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4024 return Error(ExprLoc, "barrier operand out of range");
4025 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4026 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4027 ExprLoc, getContext(),
4028 true /*hasnXSModifier*/));
4029 return ParseStatus::Success;
4030 }
4031
4032 if (Tok.isNot(AsmToken::Identifier))
4033 return TokError("invalid operand for instruction");
4034
4035 StringRef Operand = Tok.getString();
4036 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4037
4038 if (!DB)
4039 return TokError("invalid barrier option name");
4040
4041 Operands.push_back(
4042 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4043 getContext(), true /*hasnXSModifier*/));
4044 Lex(); // Consume the option
4045
4046 return ParseStatus::Success;
4047}
4048
4049ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4050 const AsmToken &Tok = getTok();
4051
4052 if (Tok.isNot(AsmToken::Identifier))
4053 return ParseStatus::NoMatch;
4054
4055 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4056 return ParseStatus::NoMatch;
4057
4058 int MRSReg, MSRReg;
4059 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4060 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4061 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4062 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4063 } else
4064 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4065
4066 unsigned PStateImm = -1;
4067 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4068 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4069 PStateImm = PState15->Encoding;
4070 if (!PState15) {
4071 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4072 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4073 PStateImm = PState1->Encoding;
4074 }
4075
4076 Operands.push_back(
4077 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4078 PStateImm, getContext()));
4079 Lex(); // Eat identifier
4080
4081 return ParseStatus::Success;
4082}
4083
4084/// tryParseNeonVectorRegister - Parse a vector register operand.
4085bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4086 if (getTok().isNot(AsmToken::Identifier))
4087 return true;
4088
4089 SMLoc S = getLoc();
4090 // Check for a vector register specifier first.
4093 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4094 if (!Res.isSuccess())
4095 return true;
4096
4097 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4098 if (!KindRes)
4099 return true;
4100
4101 unsigned ElementWidth = KindRes->second;
4102 Operands.push_back(
4103 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4104 S, getLoc(), getContext()));
4105
4106 // If there was an explicit qualifier, that goes on as a literal text
4107 // operand.
4108 if (!Kind.empty())
4109 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4110
4111 return tryParseVectorIndex(Operands).isFailure();
4112}
4113
4114ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4115 SMLoc SIdx = getLoc();
4116 if (parseOptionalToken(AsmToken::LBrac)) {
4117 const MCExpr *ImmVal;
4118 if (getParser().parseExpression(ImmVal))
4119 return ParseStatus::NoMatch;
4120 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4121 if (!MCE)
4122 return TokError("immediate value expected for vector index");
4123
4124 SMLoc E = getLoc();
4125
4126 if (parseToken(AsmToken::RBrac, "']' expected"))
4127 return ParseStatus::Failure;
4128
4129 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4130 E, getContext()));
4131 return ParseStatus::Success;
4132 }
4133
4134 return ParseStatus::NoMatch;
4135}
4136
4137// tryParseVectorRegister - Try to parse a vector register name with
4138// optional kind specifier. If it is a register specifier, eat the token
4139// and return it.
4140ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4141 StringRef &Kind,
4142 RegKind MatchKind) {
4143 const AsmToken &Tok = getTok();
4144
4145 if (Tok.isNot(AsmToken::Identifier))
4146 return ParseStatus::NoMatch;
4147
4148 StringRef Name = Tok.getString();
4149 // If there is a kind specifier, it's separated from the register name by
4150 // a '.'.
4151 size_t Start = 0, Next = Name.find('.');
4152 StringRef Head = Name.slice(Start, Next);
4153 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4154
4155 if (RegNum) {
4156 if (Next != StringRef::npos) {
4157 Kind = Name.slice(Next, StringRef::npos);
4158 if (!isValidVectorKind(Kind, MatchKind))
4159 return TokError("invalid vector kind qualifier");
4160 }
4161 Lex(); // Eat the register token.
4162
4163 Reg = RegNum;
4164 return ParseStatus::Success;
4165 }
4166
4167 return ParseStatus::NoMatch;
4168}
4169
4170/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4171template <RegKind RK>
4173AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4174 // Check for a SVE predicate register specifier first.
4175 const SMLoc S = getLoc();
4177 MCRegister RegNum;
4178 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4179 if (!Res.isSuccess())
4180 return Res;
4181
4182 const auto &KindRes = parseVectorKind(Kind, RK);
4183 if (!KindRes)
4184 return ParseStatus::NoMatch;
4185
4186 unsigned ElementWidth = KindRes->second;
4187 Operands.push_back(AArch64Operand::CreateVectorReg(
4188 RegNum, RK, ElementWidth, S,
4189 getLoc(), getContext()));
4190
4191 if (getLexer().is(AsmToken::LBrac)) {
4192 if (RK == RegKind::SVEPredicateAsCounter) {
4193 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4194 if (ResIndex.isSuccess())
4195 return ParseStatus::Success;
4196 } else {
4197 // Indexed predicate, there's no comma so try parse the next operand
4198 // immediately.
4199 if (parseOperand(Operands, false, false))
4200 return ParseStatus::NoMatch;
4201 }
4202 }
4203
4204 // Not all predicates are followed by a '/m' or '/z'.
4205 if (getTok().isNot(AsmToken::Slash))
4206 return ParseStatus::Success;
4207
4208 // But when they do they shouldn't have an element type suffix.
4209 if (!Kind.empty())
4210 return Error(S, "not expecting size suffix");
4211
4212 // Add a literal slash as operand
4213 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4214
4215 Lex(); // Eat the slash.
4216
4217 // Zeroing or merging?
4218 auto Pred = getTok().getString().lower();
4219 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4220 return Error(getLoc(), "expecting 'z' predication");
4221
4222 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4223 return Error(getLoc(), "expecting 'm' or 'z' predication");
4224
4225 // Add zero/merge token.
4226 const char *ZM = Pred == "z" ? "z" : "m";
4227 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4228
4229 Lex(); // Eat zero/merge token.
4230 return ParseStatus::Success;
4231}
4232
4233/// parseRegister - Parse a register operand.
4234bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4235 // Try for a Neon vector register.
4236 if (!tryParseNeonVectorRegister(Operands))
4237 return false;
4238
4239 if (tryParseZTOperand(Operands).isSuccess())
4240 return false;
4241
4242 // Otherwise try for a scalar register.
4243 if (tryParseGPROperand<false>(Operands).isSuccess())
4244 return false;
4245
4246 return true;
4247}
4248
4249bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4250 bool HasELFModifier = false;
4252
4253 if (parseOptionalToken(AsmToken::Colon)) {
4254 HasELFModifier = true;
4255
4256 if (getTok().isNot(AsmToken::Identifier))
4257 return TokError("expect relocation specifier in operand after ':'");
4258
4259 std::string LowerCase = getTok().getIdentifier().lower();
4260 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4262 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4263 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4264 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4265 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4266 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4267 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4268 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4269 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4270 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4271 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4272 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4273 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4274 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4275 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4276 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4277 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4278 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4279 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4280 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4281 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4282 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4283 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4284 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4285 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4286 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4287 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4288 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4289 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4290 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4291 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4292 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4293 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4294 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4295 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4296 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4298 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4299 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4301 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4302 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4303 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4305 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4306 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4308
4309 if (RefKind == AArch64MCExpr::VK_INVALID)
4310 return TokError("expect relocation specifier in operand after ':'");
4311
4312 Lex(); // Eat identifier
4313
4314 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4315 return true;
4316 }
4317
4318 if (getParser().parseExpression(ImmVal))
4319 return true;
4320
4321 if (HasELFModifier)
4322 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4323
4324 return false;
4325}
4326
4327ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4328 if (getTok().isNot(AsmToken::LCurly))
4329 return ParseStatus::NoMatch;
4330
4331 auto ParseMatrixTile = [this](unsigned &Reg,
4332 unsigned &ElementWidth) -> ParseStatus {
4333 StringRef Name = getTok().getString();
4334 size_t DotPosition = Name.find('.');
4335 if (DotPosition == StringRef::npos)
4336 return ParseStatus::NoMatch;
4337
4338 unsigned RegNum = matchMatrixTileListRegName(Name);
4339 if (!RegNum)
4340 return ParseStatus::NoMatch;
4341
4342 StringRef Tail = Name.drop_front(DotPosition);
4343 const std::optional<std::pair<int, int>> &KindRes =
4344 parseVectorKind(Tail, RegKind::Matrix);
4345 if (!KindRes)
4346 return TokError(
4347 "Expected the register to be followed by element width suffix");
4348 ElementWidth = KindRes->second;
4349 Reg = RegNum;
4350 Lex(); // Eat the register.
4351 return ParseStatus::Success;
4352 };
4353
4354 SMLoc S = getLoc();
4355 auto LCurly = getTok();
4356 Lex(); // Eat left bracket token.
4357
4358 // Empty matrix list
4359 if (parseOptionalToken(AsmToken::RCurly)) {
4360 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4361 /*RegMask=*/0, S, getLoc(), getContext()));
4362 return ParseStatus::Success;
4363 }
4364
4365 // Try parse {za} alias early
4366 if (getTok().getString().equals_insensitive("za")) {
4367 Lex(); // Eat 'za'
4368
4369 if (parseToken(AsmToken::RCurly, "'}' expected"))
4370 return ParseStatus::Failure;
4371
4372 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4373 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4374 return ParseStatus::Success;
4375 }
4376
4377 SMLoc TileLoc = getLoc();
4378
4379 unsigned FirstReg, ElementWidth;
4380 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4381 if (!ParseRes.isSuccess()) {
4382 getLexer().UnLex(LCurly);
4383 return ParseRes;
4384 }
4385
4386 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4387
4388 unsigned PrevReg = FirstReg;
4389
4391 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4392
4393 SmallSet<unsigned, 8> SeenRegs;
4394 SeenRegs.insert(FirstReg);
4395
4396 while (parseOptionalToken(AsmToken::Comma)) {
4397 TileLoc = getLoc();
4398 unsigned Reg, NextElementWidth;
4399 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4400 if (!ParseRes.isSuccess())
4401 return ParseRes;
4402
4403 // Element size must match on all regs in the list.
4404 if (ElementWidth != NextElementWidth)
4405 return Error(TileLoc, "mismatched register size suffix");
4406
4407 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4408 Warning(TileLoc, "tile list not in ascending order");
4409
4410 if (SeenRegs.contains(Reg))
4411 Warning(TileLoc, "duplicate tile in list");
4412 else {
4413 SeenRegs.insert(Reg);
4414 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4415 }
4416
4417 PrevReg = Reg;
4418 }
4419
4420 if (parseToken(AsmToken::RCurly, "'}' expected"))
4421 return ParseStatus::Failure;
4422
4423 unsigned RegMask = 0;
4424 for (auto Reg : DRegs)
4425 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4426 RI->getEncodingValue(AArch64::ZAD0));
4427 Operands.push_back(
4428 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4429
4430 return ParseStatus::Success;
4431}
4432
4433template <RegKind VectorKind>
4434ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4435 bool ExpectMatch) {
4436 MCAsmParser &Parser = getParser();
4437 if (!getTok().is(AsmToken::LCurly))
4438 return ParseStatus::NoMatch;
4439
4440 // Wrapper around parse function
4441 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4442 bool NoMatchIsError) -> ParseStatus {
4443 auto RegTok = getTok();
4444 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4445 if (ParseRes.isSuccess()) {
4446 if (parseVectorKind(Kind, VectorKind))
4447 return ParseRes;
4448 llvm_unreachable("Expected a valid vector kind");
4449 }
4450
4451 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4452 RegTok.getString().equals_insensitive("zt0"))
4453 return ParseStatus::NoMatch;
4454
4455 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4456 (ParseRes.isNoMatch() && NoMatchIsError &&
4457 !RegTok.getString().starts_with_insensitive("za")))
4458 return Error(Loc, "vector register expected");
4459
4460 return ParseStatus::NoMatch;
4461 };
4462
4463 int NumRegs = getNumRegsForRegKind(VectorKind);
4464 SMLoc S = getLoc();
4465 auto LCurly = getTok();
4466 Lex(); // Eat left bracket token.
4467
4469 MCRegister FirstReg;
4470 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4471
4472 // Put back the original left bracket if there was no match, so that
4473 // different types of list-operands can be matched (e.g. SVE, Neon).
4474 if (ParseRes.isNoMatch())
4475 Parser.getLexer().UnLex(LCurly);
4476
4477 if (!ParseRes.isSuccess())
4478 return ParseRes;
4479
4480 int64_t PrevReg = FirstReg;
4481 unsigned Count = 1;
4482
4483 int Stride = 1;
4484 if (parseOptionalToken(AsmToken::Minus)) {
4485 SMLoc Loc = getLoc();
4486 StringRef NextKind;
4487
4489 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4490 if (!ParseRes.isSuccess())
4491 return ParseRes;
4492
4493 // Any Kind suffices must match on all regs in the list.
4494 if (Kind != NextKind)
4495 return Error(Loc, "mismatched register size suffix");
4496
4497 unsigned Space =
4498 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4499
4500 if (Space == 0 || Space > 3)
4501 return Error(Loc, "invalid number of vectors");
4502
4503 Count += Space;
4504 }
4505 else {
4506 bool HasCalculatedStride = false;
4507 while (parseOptionalToken(AsmToken::Comma)) {
4508 SMLoc Loc = getLoc();
4509 StringRef NextKind;
4511 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4512 if (!ParseRes.isSuccess())
4513 return ParseRes;
4514
4515 // Any Kind suffices must match on all regs in the list.
4516 if (Kind != NextKind)
4517 return Error(Loc, "mismatched register size suffix");
4518
4519 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4520 unsigned PrevRegVal =
4521 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4522 if (!HasCalculatedStride) {
4523 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4524 : (RegVal + NumRegs - PrevRegVal);
4525 HasCalculatedStride = true;
4526 }
4527
4528 // Register must be incremental (with a wraparound at last register).
4529 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4530 return Error(Loc, "registers must have the same sequential stride");
4531
4532 PrevReg = Reg;
4533 ++Count;
4534 }
4535 }
4536
4537 if (parseToken(AsmToken::RCurly, "'}' expected"))
4538 return ParseStatus::Failure;
4539
4540 if (Count > 4)
4541 return Error(S, "invalid number of vectors");
4542
4543 unsigned NumElements = 0;
4544 unsigned ElementWidth = 0;
4545 if (!Kind.empty()) {
4546 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4547 std::tie(NumElements, ElementWidth) = *VK;
4548 }
4549
4550 Operands.push_back(AArch64Operand::CreateVectorList(
4551 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4552 getLoc(), getContext()));
4553
4554 return ParseStatus::Success;
4555}
4556
4557/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4558bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4559 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4560 if (!ParseRes.isSuccess())
4561 return true;
4562
4563 return tryParseVectorIndex(Operands).isFailure();
4564}
4565
4566ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4567 SMLoc StartLoc = getLoc();
4568
4569 MCRegister RegNum;
4570 ParseStatus Res = tryParseScalarRegister(RegNum);
4571 if (!Res.isSuccess())
4572 return Res;
4573
4574 if (!parseOptionalToken(AsmToken::Comma)) {
4575 Operands.push_back(AArch64Operand::CreateReg(
4576 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4577 return ParseStatus::Success;
4578 }
4579
4580 parseOptionalToken(AsmToken::Hash);
4581
4582 if (getTok().isNot(AsmToken::Integer))
4583 return Error(getLoc(), "index must be absent or #0");
4584
4585 const MCExpr *ImmVal;
4586 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4587 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4588 return Error(getLoc(), "index must be absent or #0");
4589
4590 Operands.push_back(AArch64Operand::CreateReg(
4591 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4592 return ParseStatus::Success;
4593}
4594
4595ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4596 SMLoc StartLoc = getLoc();
4597 const AsmToken &Tok = getTok();
4598 std::string Name = Tok.getString().lower();
4599
4600 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4601
4602 if (RegNum == 0)
4603 return ParseStatus::NoMatch;
4604
4605 Operands.push_back(AArch64Operand::CreateReg(
4606 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4607 Lex(); // Eat register.
4608
4609 // Check if register is followed by an index
4610 if (parseOptionalToken(AsmToken::LBrac)) {
4611 Operands.push_back(
4612 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4613 const MCExpr *ImmVal;
4614 if (getParser().parseExpression(ImmVal))
4615 return ParseStatus::NoMatch;
4616 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4617 if (!MCE)
4618 return TokError("immediate value expected for vector index");
4619 Operands.push_back(AArch64Operand::CreateImm(
4620 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4621 getLoc(), getContext()));
4622 if (parseOptionalToken(AsmToken::Comma))
4623 if (parseOptionalMulOperand(Operands))
4624 return ParseStatus::Failure;
4625 if (parseToken(AsmToken::RBrac, "']' expected"))
4626 return ParseStatus::Failure;
4627 Operands.push_back(
4628 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4629 }
4630 return ParseStatus::Success;
4631}
4632
4633template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4634ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4635 SMLoc StartLoc = getLoc();
4636
4637 MCRegister RegNum;
4638 ParseStatus Res = tryParseScalarRegister(RegNum);
4639 if (!Res.isSuccess())
4640 return Res;
4641
4642 // No shift/extend is the default.
4643 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4644 Operands.push_back(AArch64Operand::CreateReg(
4645 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4646 return ParseStatus::Success;
4647 }
4648
4649 // Eat the comma
4650 Lex();
4651
4652 // Match the shift
4654 Res = tryParseOptionalShiftExtend(ExtOpnd);
4655 if (!Res.isSuccess())
4656 return Res;
4657
4658 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4659 Operands.push_back(AArch64Operand::CreateReg(
4660 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4661 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4662 Ext->hasShiftExtendAmount()));
4663
4664 return ParseStatus::Success;
4665}
4666
4667bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4668 MCAsmParser &Parser = getParser();
4669
4670 // Some SVE instructions have a decoration after the immediate, i.e.
4671 // "mul vl". We parse them here and add tokens, which must be present in the
4672 // asm string in the tablegen instruction.
4673 bool NextIsVL =
4674 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4675 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4676 if (!getTok().getString().equals_insensitive("mul") ||
4677 !(NextIsVL || NextIsHash))
4678 return true;
4679
4680 Operands.push_back(
4681 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4682 Lex(); // Eat the "mul"
4683
4684 if (NextIsVL) {
4685 Operands.push_back(
4686 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4687 Lex(); // Eat the "vl"
4688 return false;
4689 }
4690
4691 if (NextIsHash) {
4692 Lex(); // Eat the #
4693 SMLoc S = getLoc();
4694
4695 // Parse immediate operand.
4696 const MCExpr *ImmVal;
4697 if (!Parser.parseExpression(ImmVal))
4698 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4699 Operands.push_back(AArch64Operand::CreateImm(
4700 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4701 getContext()));
4702 return false;
4703 }
4704 }
4705
4706 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4707}
4708
4709bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4710 StringRef &VecGroup) {
4711 MCAsmParser &Parser = getParser();
4712 auto Tok = Parser.getTok();
4713 if (Tok.isNot(AsmToken::Identifier))
4714 return true;
4715
4717 .Case("vgx2", "vgx2")
4718 .Case("vgx4", "vgx4")
4719 .Default("");
4720
4721 if (VG.empty())
4722 return true;
4723
4724 VecGroup = VG;
4725 Parser.Lex(); // Eat vgx[2|4]
4726 return false;
4727}
4728
4729bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4730 auto Tok = getTok();
4731 if (Tok.isNot(AsmToken::Identifier))
4732 return true;
4733
4734 auto Keyword = Tok.getString();
4736 .Case("sm", "sm")
4737 .Case("za", "za")
4738 .Default(Keyword);
4739 Operands.push_back(
4740 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4741
4742 Lex();
4743 return false;
4744}
4745
4746/// parseOperand - Parse a arm instruction operand. For now this parses the
4747/// operand regardless of the mnemonic.
4748bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4749 bool invertCondCode) {
4750 MCAsmParser &Parser = getParser();
4751
4752 ParseStatus ResTy =
4753 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4754
4755 // Check if the current operand has a custom associated parser, if so, try to
4756 // custom parse the operand, or fallback to the general approach.
4757 if (ResTy.isSuccess())
4758 return false;
4759 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4760 // there was a match, but an error occurred, in which case, just return that
4761 // the operand parsing failed.
4762 if (ResTy.isFailure())
4763 return true;
4764
4765 // Nothing custom, so do general case parsing.
4766 SMLoc S, E;
4767 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4768 if (parseOptionalToken(AsmToken::Comma)) {
4769 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4770 if (!Res.isNoMatch())
4771 return Res.isFailure();
4772 getLexer().UnLex(SavedTok);
4773 }
4774 return false;
4775 };
4776 switch (getLexer().getKind()) {
4777 default: {
4778 SMLoc S = getLoc();
4779 const MCExpr *Expr;
4780 if (parseSymbolicImmVal(Expr))
4781 return Error(S, "invalid operand");
4782
4783 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4784 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4785 return parseOptionalShiftExtend(getTok());
4786 }
4787 case AsmToken::LBrac: {
4788 Operands.push_back(
4789 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4790 Lex(); // Eat '['
4791
4792 // There's no comma after a '[', so we can parse the next operand
4793 // immediately.
4794 return parseOperand(Operands, false, false);
4795 }
4796 case AsmToken::LCurly: {
4797 if (!parseNeonVectorList(Operands))
4798 return false;
4799
4800 Operands.push_back(
4801 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4802 Lex(); // Eat '{'
4803
4804 // There's no comma after a '{', so we can parse the next operand
4805 // immediately.
4806 return parseOperand(Operands, false, false);
4807 }
4808 case AsmToken::Identifier: {
4809 // See if this is a "VG" decoration used by SME instructions.
4810 StringRef VecGroup;
4811 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4812 Operands.push_back(
4813 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4814 return false;
4815 }
4816 // If we're expecting a Condition Code operand, then just parse that.
4817 if (isCondCode)
4818 return parseCondCode(Operands, invertCondCode);
4819
4820 // If it's a register name, parse it.
4821 if (!parseRegister(Operands)) {
4822 // Parse an optional shift/extend modifier.
4823 AsmToken SavedTok = getTok();
4824 if (parseOptionalToken(AsmToken::Comma)) {
4825 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4826 // such cases and don't report an error when <label> happens to match a
4827 // shift/extend modifier.
4828 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4829 /*ParseForAllFeatures=*/true);
4830 if (!Res.isNoMatch())
4831 return Res.isFailure();
4832 Res = tryParseOptionalShiftExtend(Operands);
4833 if (!Res.isNoMatch())
4834 return Res.isFailure();
4835 getLexer().UnLex(SavedTok);
4836 }
4837 return false;
4838 }
4839
4840 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4841 // by SVE instructions.
4842 if (!parseOptionalMulOperand(Operands))
4843 return false;
4844
4845 // If this is a two-word mnemonic, parse its special keyword
4846 // operand as an identifier.
4847 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4848 Mnemonic == "gcsb")
4849 return parseKeywordOperand(Operands);
4850
4851 // This was not a register so parse other operands that start with an
4852 // identifier (like labels) as expressions and create them as immediates.
4853 const MCExpr *IdVal;
4854 S = getLoc();
4855 if (getParser().parseExpression(IdVal))
4856 return true;
4857 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4858 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4859 return false;
4860 }
4861 case AsmToken::Integer:
4862 case AsmToken::Real:
4863 case AsmToken::Hash: {
4864 // #42 -> immediate.
4865 S = getLoc();
4866
4867 parseOptionalToken(AsmToken::Hash);
4868
4869 // Parse a negative sign
4870 bool isNegative = false;
4871 if (getTok().is(AsmToken::Minus)) {
4872 isNegative = true;
4873 // We need to consume this token only when we have a Real, otherwise
4874 // we let parseSymbolicImmVal take care of it
4875 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4876 Lex();
4877 }
4878
4879 // The only Real that should come through here is a literal #0.0 for
4880 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4881 // so convert the value.
4882 const AsmToken &Tok = getTok();
4883 if (Tok.is(AsmToken::Real)) {
4884 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4885 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4886 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4887 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4888 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4889 return TokError("unexpected floating point literal");
4890 else if (IntVal != 0 || isNegative)
4891 return TokError("expected floating-point constant #0.0");
4892 Lex(); // Eat the token.
4893
4894 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4895 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4896 return false;
4897 }
4898
4899 const MCExpr *ImmVal;
4900 if (parseSymbolicImmVal(ImmVal))
4901 return true;
4902
4903 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4904 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4905
4906 // Parse an optional shift/extend modifier.
4907 return parseOptionalShiftExtend(Tok);
4908 }
4909 case AsmToken::Equal: {
4910