LLVM 22.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
165 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
166 std::string &Suggestion);
167 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
169 bool parseRegister(OperandVector &Operands);
170 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
171 bool parseNeonVectorList(OperandVector &Operands);
172 bool parseOptionalMulOperand(OperandVector &Operands);
173 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
174 bool parseKeywordOperand(OperandVector &Operands);
175 bool parseOperand(OperandVector &Operands, bool isCondCode,
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
178 bool parseComma();
179 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
180 unsigned Last);
181
182 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183 OperandVector &Operands);
184
185 bool parseDataExpr(const MCExpr *&Res) override;
186 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
187
188 bool parseDirectiveArch(SMLoc L);
189 bool parseDirectiveArchExtension(SMLoc L);
190 bool parseDirectiveCPU(SMLoc L);
191 bool parseDirectiveInst(SMLoc L);
192
193 bool parseDirectiveTLSDescCall(SMLoc L);
194
195 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
196 bool parseDirectiveLtorg(SMLoc L);
197
198 bool parseDirectiveReq(StringRef Name, SMLoc L);
199 bool parseDirectiveUnreq(SMLoc L);
200 bool parseDirectiveCFINegateRAState();
201 bool parseDirectiveCFINegateRAStateWithPC();
202 bool parseDirectiveCFIBKeyFrame();
203 bool parseDirectiveCFIMTETaggedFrame();
204
205 bool parseDirectiveVariantPCS(SMLoc L);
206
207 bool parseDirectiveSEHAllocStack(SMLoc L);
208 bool parseDirectiveSEHPrologEnd(SMLoc L);
209 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
210 bool parseDirectiveSEHSaveFPLR(SMLoc L);
211 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
212 bool parseDirectiveSEHSaveReg(SMLoc L);
213 bool parseDirectiveSEHSaveRegX(SMLoc L);
214 bool parseDirectiveSEHSaveRegP(SMLoc L);
215 bool parseDirectiveSEHSaveRegPX(SMLoc L);
216 bool parseDirectiveSEHSaveLRPair(SMLoc L);
217 bool parseDirectiveSEHSaveFReg(SMLoc L);
218 bool parseDirectiveSEHSaveFRegX(SMLoc L);
219 bool parseDirectiveSEHSaveFRegP(SMLoc L);
220 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
221 bool parseDirectiveSEHSetFP(SMLoc L);
222 bool parseDirectiveSEHAddFP(SMLoc L);
223 bool parseDirectiveSEHNop(SMLoc L);
224 bool parseDirectiveSEHSaveNext(SMLoc L);
225 bool parseDirectiveSEHEpilogStart(SMLoc L);
226 bool parseDirectiveSEHEpilogEnd(SMLoc L);
227 bool parseDirectiveSEHTrapFrame(SMLoc L);
228 bool parseDirectiveSEHMachineFrame(SMLoc L);
229 bool parseDirectiveSEHContext(SMLoc L);
230 bool parseDirectiveSEHECContext(SMLoc L);
231 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
232 bool parseDirectiveSEHPACSignLR(SMLoc L);
233 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
234 bool parseDirectiveSEHAllocZ(SMLoc L);
235 bool parseDirectiveSEHSaveZReg(SMLoc L);
236 bool parseDirectiveSEHSavePReg(SMLoc L);
237 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
238 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
239
240 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
241 SmallVectorImpl<SMLoc> &Loc);
242 unsigned getNumRegsForRegKind(RegKind K);
243 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
244 OperandVector &Operands, MCStreamer &Out,
245 uint64_t &ErrorInfo,
246 bool MatchingInlineAsm) override;
247 /// @name Auto-generated Match Functions
248 /// {
249
250#define GET_ASSEMBLER_HEADER
251#include "AArch64GenAsmMatcher.inc"
252
253 /// }
254
255 ParseStatus tryParseScalarRegister(MCRegister &Reg);
256 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
257 RegKind MatchKind);
258 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
259 ParseStatus tryParseSVCR(OperandVector &Operands);
260 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
261 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
262 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
263 ParseStatus tryParseSysReg(OperandVector &Operands);
264 ParseStatus tryParseSysCROperand(OperandVector &Operands);
265 template <bool IsSVEPrefetch = false>
266 ParseStatus tryParsePrefetch(OperandVector &Operands);
267 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
268 ParseStatus tryParsePSBHint(OperandVector &Operands);
269 ParseStatus tryParseBTIHint(OperandVector &Operands);
270 ParseStatus tryParseCMHPriorityHint(OperandVector &Operands);
271 ParseStatus tryParseTIndexHint(OperandVector &Operands);
272 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
273 ParseStatus tryParseAdrLabel(OperandVector &Operands);
274 template <bool AddFPZeroAsLiteral>
275 ParseStatus tryParseFPImm(OperandVector &Operands);
276 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
277 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
278 bool tryParseNeonVectorRegister(OperandVector &Operands);
279 ParseStatus tryParseVectorIndex(OperandVector &Operands);
280 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
281 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
282 template <bool ParseShiftExtend,
283 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
284 ParseStatus tryParseGPROperand(OperandVector &Operands);
285 ParseStatus tryParseZTOperand(OperandVector &Operands);
286 template <bool ParseShiftExtend, bool ParseSuffix>
287 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
288 template <RegKind RK>
289 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
291 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
292 template <RegKind VectorKind>
293 ParseStatus tryParseVectorList(OperandVector &Operands,
294 bool ExpectMatch = false);
295 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
296 ParseStatus tryParseSVEPattern(OperandVector &Operands);
297 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
298 ParseStatus tryParseGPR64x8(OperandVector &Operands);
299 ParseStatus tryParseImmRange(OperandVector &Operands);
300 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
301 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
302
303public:
304 enum AArch64MatchResultTy {
305 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
306#define GET_OPERAND_DIAGNOSTIC_TYPES
307#include "AArch64GenAsmMatcher.inc"
308 };
309 bool IsILP32;
310 bool IsWindowsArm64EC;
311
312 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
313 const MCInstrInfo &MII, const MCTargetOptions &Options)
314 : MCTargetAsmParser(Options, STI, MII) {
315 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
316 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
318 MCStreamer &S = getParser().getStreamer();
319 if (S.getTargetStreamer() == nullptr)
320 new AArch64TargetStreamer(S);
321
322 // Alias .hword/.word/.[dx]word to the target-independent
323 // .2byte/.4byte/.8byte directives as they have the same form and
324 // semantics:
325 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
326 Parser.addAliasForDirective(".hword", ".2byte");
327 Parser.addAliasForDirective(".word", ".4byte");
328 Parser.addAliasForDirective(".dword", ".8byte");
329 Parser.addAliasForDirective(".xword", ".8byte");
330
331 // Initialize the set of available features.
332 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
333 }
334
335 bool areEqualRegs(const MCParsedAsmOperand &Op1,
336 const MCParsedAsmOperand &Op2) const override;
337 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
338 SMLoc NameLoc, OperandVector &Operands) override;
339 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
340 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
341 SMLoc &EndLoc) override;
342 bool ParseDirective(AsmToken DirectiveID) override;
343 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
344 unsigned Kind) override;
345
346 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
347 AArch64::Specifier &DarwinSpec,
348 int64_t &Addend);
349};
350
351/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
352/// instruction.
353class AArch64Operand : public MCParsedAsmOperand {
354private:
355 enum KindTy {
356 k_Immediate,
357 k_ShiftedImm,
358 k_ImmRange,
359 k_CondCode,
360 k_Register,
361 k_MatrixRegister,
362 k_MatrixTileList,
363 k_SVCR,
364 k_VectorList,
365 k_VectorIndex,
366 k_Token,
367 k_SysReg,
368 k_SysCR,
369 k_Prefetch,
370 k_ShiftExtend,
371 k_FPImm,
372 k_Barrier,
373 k_PSBHint,
374 k_PHint,
375 k_BTIHint,
376 k_CMHPriorityHint,
377 k_TIndexHint,
378 } Kind;
379
380 SMLoc StartLoc, EndLoc;
381
382 struct TokOp {
383 const char *Data;
384 unsigned Length;
385 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
386 };
387
388 // Separate shift/extend operand.
389 struct ShiftExtendOp {
391 unsigned Amount;
392 bool HasExplicitAmount;
393 };
394
395 struct RegOp {
396 MCRegister Reg;
397 RegKind Kind;
398 int ElementWidth;
399
400 // The register may be allowed as a different register class,
401 // e.g. for GPR64as32 or GPR32as64.
402 RegConstraintEqualityTy EqualityTy;
403
404 // In some cases the shift/extend needs to be explicitly parsed together
405 // with the register, rather than as a separate operand. This is needed
406 // for addressing modes where the instruction as a whole dictates the
407 // scaling/extend, rather than specific bits in the instruction.
408 // By parsing them as a single operand, we avoid the need to pass an
409 // extra operand in all CodeGen patterns (because all operands need to
410 // have an associated value), and we avoid the need to update TableGen to
411 // accept operands that have no associated bits in the instruction.
412 //
413 // An added benefit of parsing them together is that the assembler
414 // can give a sensible diagnostic if the scaling is not correct.
415 //
416 // The default is 'lsl #0' (HasExplicitAmount = false) if no
417 // ShiftExtend is specified.
418 ShiftExtendOp ShiftExtend;
419 };
420
421 struct MatrixRegOp {
422 MCRegister Reg;
423 unsigned ElementWidth;
424 MatrixKind Kind;
425 };
426
427 struct MatrixTileListOp {
428 unsigned RegMask = 0;
429 };
430
431 struct VectorListOp {
432 MCRegister Reg;
433 unsigned Count;
434 unsigned Stride;
435 unsigned NumElements;
436 unsigned ElementWidth;
437 RegKind RegisterKind;
438 };
439
440 struct VectorIndexOp {
441 int Val;
442 };
443
444 struct ImmOp {
445 const MCExpr *Val;
446 };
447
448 struct ShiftedImmOp {
449 const MCExpr *Val;
450 unsigned ShiftAmount;
451 };
452
453 struct ImmRangeOp {
454 unsigned First;
455 unsigned Last;
456 };
457
458 struct CondCodeOp {
460 };
461
462 struct FPImmOp {
463 uint64_t Val; // APFloat value bitcasted to uint64_t.
464 bool IsExact; // describes whether parsed value was exact.
465 };
466
467 struct BarrierOp {
468 const char *Data;
469 unsigned Length;
470 unsigned Val; // Not the enum since not all values have names.
471 bool HasnXSModifier;
472 };
473
474 struct SysRegOp {
475 const char *Data;
476 unsigned Length;
477 uint32_t MRSReg;
478 uint32_t MSRReg;
479 uint32_t PStateField;
480 };
481
482 struct SysCRImmOp {
483 unsigned Val;
484 };
485
486 struct PrefetchOp {
487 const char *Data;
488 unsigned Length;
489 unsigned Val;
490 };
491
492 struct PSBHintOp {
493 const char *Data;
494 unsigned Length;
495 unsigned Val;
496 };
497 struct PHintOp {
498 const char *Data;
499 unsigned Length;
500 unsigned Val;
501 };
502 struct BTIHintOp {
503 const char *Data;
504 unsigned Length;
505 unsigned Val;
506 };
507 struct CMHPriorityHintOp {
508 const char *Data;
509 unsigned Length;
510 unsigned Val;
511 };
512 struct TIndexHintOp {
513 const char *Data;
514 unsigned Length;
515 unsigned Val;
516 };
517
518 struct SVCROp {
519 const char *Data;
520 unsigned Length;
521 unsigned PStateField;
522 };
523
524 union {
525 struct TokOp Tok;
526 struct RegOp Reg;
527 struct MatrixRegOp MatrixReg;
528 struct MatrixTileListOp MatrixTileList;
529 struct VectorListOp VectorList;
530 struct VectorIndexOp VectorIndex;
531 struct ImmOp Imm;
532 struct ShiftedImmOp ShiftedImm;
533 struct ImmRangeOp ImmRange;
534 struct CondCodeOp CondCode;
535 struct FPImmOp FPImm;
536 struct BarrierOp Barrier;
537 struct SysRegOp SysReg;
538 struct SysCRImmOp SysCRImm;
539 struct PrefetchOp Prefetch;
540 struct PSBHintOp PSBHint;
541 struct PHintOp PHint;
542 struct BTIHintOp BTIHint;
543 struct CMHPriorityHintOp CMHPriorityHint;
544 struct TIndexHintOp TIndexHint;
545 struct ShiftExtendOp ShiftExtend;
546 struct SVCROp SVCR;
547 };
548
549 // Keep the MCContext around as the MCExprs may need manipulated during
550 // the add<>Operands() calls.
551 MCContext &Ctx;
552
553public:
554 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
555
556 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
557 Kind = o.Kind;
558 StartLoc = o.StartLoc;
559 EndLoc = o.EndLoc;
560 switch (Kind) {
561 case k_Token:
562 Tok = o.Tok;
563 break;
564 case k_Immediate:
565 Imm = o.Imm;
566 break;
567 case k_ShiftedImm:
568 ShiftedImm = o.ShiftedImm;
569 break;
570 case k_ImmRange:
571 ImmRange = o.ImmRange;
572 break;
573 case k_CondCode:
574 CondCode = o.CondCode;
575 break;
576 case k_FPImm:
577 FPImm = o.FPImm;
578 break;
579 case k_Barrier:
580 Barrier = o.Barrier;
581 break;
582 case k_Register:
583 Reg = o.Reg;
584 break;
585 case k_MatrixRegister:
586 MatrixReg = o.MatrixReg;
587 break;
588 case k_MatrixTileList:
589 MatrixTileList = o.MatrixTileList;
590 break;
591 case k_VectorList:
592 VectorList = o.VectorList;
593 break;
594 case k_VectorIndex:
595 VectorIndex = o.VectorIndex;
596 break;
597 case k_SysReg:
598 SysReg = o.SysReg;
599 break;
600 case k_SysCR:
601 SysCRImm = o.SysCRImm;
602 break;
603 case k_Prefetch:
604 Prefetch = o.Prefetch;
605 break;
606 case k_PSBHint:
607 PSBHint = o.PSBHint;
608 break;
609 case k_PHint:
610 PHint = o.PHint;
611 break;
612 case k_BTIHint:
613 BTIHint = o.BTIHint;
614 break;
615 case k_CMHPriorityHint:
616 CMHPriorityHint = o.CMHPriorityHint;
617 break;
618 case k_TIndexHint:
619 TIndexHint = o.TIndexHint;
620 break;
621 case k_ShiftExtend:
622 ShiftExtend = o.ShiftExtend;
623 break;
624 case k_SVCR:
625 SVCR = o.SVCR;
626 break;
627 }
628 }
629
630 /// getStartLoc - Get the location of the first token of this operand.
631 SMLoc getStartLoc() const override { return StartLoc; }
632 /// getEndLoc - Get the location of the last token of this operand.
633 SMLoc getEndLoc() const override { return EndLoc; }
634
635 StringRef getToken() const {
636 assert(Kind == k_Token && "Invalid access!");
637 return StringRef(Tok.Data, Tok.Length);
638 }
639
640 bool isTokenSuffix() const {
641 assert(Kind == k_Token && "Invalid access!");
642 return Tok.IsSuffix;
643 }
644
645 const MCExpr *getImm() const {
646 assert(Kind == k_Immediate && "Invalid access!");
647 return Imm.Val;
648 }
649
650 const MCExpr *getShiftedImmVal() const {
651 assert(Kind == k_ShiftedImm && "Invalid access!");
652 return ShiftedImm.Val;
653 }
654
655 unsigned getShiftedImmShift() const {
656 assert(Kind == k_ShiftedImm && "Invalid access!");
657 return ShiftedImm.ShiftAmount;
658 }
659
660 unsigned getFirstImmVal() const {
661 assert(Kind == k_ImmRange && "Invalid access!");
662 return ImmRange.First;
663 }
664
665 unsigned getLastImmVal() const {
666 assert(Kind == k_ImmRange && "Invalid access!");
667 return ImmRange.Last;
668 }
669
671 assert(Kind == k_CondCode && "Invalid access!");
672 return CondCode.Code;
673 }
674
675 APFloat getFPImm() const {
676 assert (Kind == k_FPImm && "Invalid access!");
677 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
678 }
679
680 bool getFPImmIsExact() const {
681 assert (Kind == k_FPImm && "Invalid access!");
682 return FPImm.IsExact;
683 }
684
685 unsigned getBarrier() const {
686 assert(Kind == k_Barrier && "Invalid access!");
687 return Barrier.Val;
688 }
689
690 StringRef getBarrierName() const {
691 assert(Kind == k_Barrier && "Invalid access!");
692 return StringRef(Barrier.Data, Barrier.Length);
693 }
694
695 bool getBarriernXSModifier() const {
696 assert(Kind == k_Barrier && "Invalid access!");
697 return Barrier.HasnXSModifier;
698 }
699
700 MCRegister getReg() const override {
701 assert(Kind == k_Register && "Invalid access!");
702 return Reg.Reg;
703 }
704
705 MCRegister getMatrixReg() const {
706 assert(Kind == k_MatrixRegister && "Invalid access!");
707 return MatrixReg.Reg;
708 }
709
710 unsigned getMatrixElementWidth() const {
711 assert(Kind == k_MatrixRegister && "Invalid access!");
712 return MatrixReg.ElementWidth;
713 }
714
715 MatrixKind getMatrixKind() const {
716 assert(Kind == k_MatrixRegister && "Invalid access!");
717 return MatrixReg.Kind;
718 }
719
720 unsigned getMatrixTileListRegMask() const {
721 assert(isMatrixTileList() && "Invalid access!");
722 return MatrixTileList.RegMask;
723 }
724
725 RegConstraintEqualityTy getRegEqualityTy() const {
726 assert(Kind == k_Register && "Invalid access!");
727 return Reg.EqualityTy;
728 }
729
730 MCRegister getVectorListStart() const {
731 assert(Kind == k_VectorList && "Invalid access!");
732 return VectorList.Reg;
733 }
734
735 unsigned getVectorListCount() const {
736 assert(Kind == k_VectorList && "Invalid access!");
737 return VectorList.Count;
738 }
739
740 unsigned getVectorListStride() const {
741 assert(Kind == k_VectorList && "Invalid access!");
742 return VectorList.Stride;
743 }
744
745 int getVectorIndex() const {
746 assert(Kind == k_VectorIndex && "Invalid access!");
747 return VectorIndex.Val;
748 }
749
750 StringRef getSysReg() const {
751 assert(Kind == k_SysReg && "Invalid access!");
752 return StringRef(SysReg.Data, SysReg.Length);
753 }
754
755 unsigned getSysCR() const {
756 assert(Kind == k_SysCR && "Invalid access!");
757 return SysCRImm.Val;
758 }
759
760 unsigned getPrefetch() const {
761 assert(Kind == k_Prefetch && "Invalid access!");
762 return Prefetch.Val;
763 }
764
765 unsigned getPSBHint() const {
766 assert(Kind == k_PSBHint && "Invalid access!");
767 return PSBHint.Val;
768 }
769
770 unsigned getPHint() const {
771 assert(Kind == k_PHint && "Invalid access!");
772 return PHint.Val;
773 }
774
775 StringRef getPSBHintName() const {
776 assert(Kind == k_PSBHint && "Invalid access!");
777 return StringRef(PSBHint.Data, PSBHint.Length);
778 }
779
780 StringRef getPHintName() const {
781 assert(Kind == k_PHint && "Invalid access!");
782 return StringRef(PHint.Data, PHint.Length);
783 }
784
785 unsigned getBTIHint() const {
786 assert(Kind == k_BTIHint && "Invalid access!");
787 return BTIHint.Val;
788 }
789
790 StringRef getBTIHintName() const {
791 assert(Kind == k_BTIHint && "Invalid access!");
792 return StringRef(BTIHint.Data, BTIHint.Length);
793 }
794
795 unsigned getCMHPriorityHint() const {
796 assert(Kind == k_CMHPriorityHint && "Invalid access!");
797 return CMHPriorityHint.Val;
798 }
799
800 StringRef getCMHPriorityHintName() const {
801 assert(Kind == k_CMHPriorityHint && "Invalid access!");
802 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
803 }
804
805 unsigned getTIndexHint() const {
806 assert(Kind == k_TIndexHint && "Invalid access!");
807 return TIndexHint.Val;
808 }
809
810 StringRef getTIndexHintName() const {
811 assert(Kind == k_TIndexHint && "Invalid access!");
812 return StringRef(TIndexHint.Data, TIndexHint.Length);
813 }
814
815 StringRef getSVCR() const {
816 assert(Kind == k_SVCR && "Invalid access!");
817 return StringRef(SVCR.Data, SVCR.Length);
818 }
819
820 StringRef getPrefetchName() const {
821 assert(Kind == k_Prefetch && "Invalid access!");
822 return StringRef(Prefetch.Data, Prefetch.Length);
823 }
824
825 AArch64_AM::ShiftExtendType getShiftExtendType() const {
826 if (Kind == k_ShiftExtend)
827 return ShiftExtend.Type;
828 if (Kind == k_Register)
829 return Reg.ShiftExtend.Type;
830 llvm_unreachable("Invalid access!");
831 }
832
833 unsigned getShiftExtendAmount() const {
834 if (Kind == k_ShiftExtend)
835 return ShiftExtend.Amount;
836 if (Kind == k_Register)
837 return Reg.ShiftExtend.Amount;
838 llvm_unreachable("Invalid access!");
839 }
840
841 bool hasShiftExtendAmount() const {
842 if (Kind == k_ShiftExtend)
843 return ShiftExtend.HasExplicitAmount;
844 if (Kind == k_Register)
845 return Reg.ShiftExtend.HasExplicitAmount;
846 llvm_unreachable("Invalid access!");
847 }
848
849 bool isImm() const override { return Kind == k_Immediate; }
850 bool isMem() const override { return false; }
851
852 bool isUImm6() const {
853 if (!isImm())
854 return false;
855 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
856 if (!MCE)
857 return false;
858 int64_t Val = MCE->getValue();
859 return (Val >= 0 && Val < 64);
860 }
861
862 template <int Width> bool isSImm() const {
863 return bool(isSImmScaled<Width, 1>());
864 }
865
866 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
867 return isImmScaled<Bits, Scale>(true);
868 }
869
870 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
871 DiagnosticPredicate isUImmScaled() const {
872 if (IsRange && isImmRange() &&
873 (getLastImmVal() != getFirstImmVal() + Offset))
875
876 return isImmScaled<Bits, Scale, IsRange>(false);
877 }
878
879 template <int Bits, int Scale, bool IsRange = false>
880 DiagnosticPredicate isImmScaled(bool Signed) const {
881 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
882 (isImmRange() && !IsRange))
884
885 int64_t Val;
886 if (isImmRange())
887 Val = getFirstImmVal();
888 else {
889 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
890 if (!MCE)
892 Val = MCE->getValue();
893 }
894
895 int64_t MinVal, MaxVal;
896 if (Signed) {
897 int64_t Shift = Bits - 1;
898 MinVal = (int64_t(1) << Shift) * -Scale;
899 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
900 } else {
901 MinVal = 0;
902 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
903 }
904
905 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
907
909 }
910
911 DiagnosticPredicate isSVEPattern() const {
912 if (!isImm())
914 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
915 if (!MCE)
917 int64_t Val = MCE->getValue();
918 if (Val >= 0 && Val < 32)
921 }
922
923 DiagnosticPredicate isSVEVecLenSpecifier() const {
924 if (!isImm())
926 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
927 if (!MCE)
929 int64_t Val = MCE->getValue();
930 if (Val >= 0 && Val <= 1)
933 }
934
935 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
936 AArch64::Specifier ELFSpec;
937 AArch64::Specifier DarwinSpec;
938 int64_t Addend;
939 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
940 Addend)) {
941 // If we don't understand the expression, assume the best and
942 // let the fixup and relocation code deal with it.
943 return true;
944 }
945
946 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
954 ELFSpec)) {
955 // Note that we don't range-check the addend. It's adjusted modulo page
956 // size when converted, so there is no "out of range" condition when using
957 // @pageoff.
958 return true;
959 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
960 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
961 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
962 return Addend == 0;
963 }
964
965 return false;
966 }
967
968 template <int Scale> bool isUImm12Offset() const {
969 if (!isImm())
970 return false;
971
972 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
973 if (!MCE)
974 return isSymbolicUImm12Offset(getImm());
975
976 int64_t Val = MCE->getValue();
977 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
978 }
979
980 template <int N, int M>
981 bool isImmInRange() const {
982 if (!isImm())
983 return false;
984 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
985 if (!MCE)
986 return false;
987 int64_t Val = MCE->getValue();
988 return (Val >= N && Val <= M);
989 }
990
991 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
992 // a logical immediate can always be represented when inverted.
993 template <typename T>
994 bool isLogicalImm() const {
995 if (!isImm())
996 return false;
997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
998 if (!MCE)
999 return false;
1000
1001 int64_t Val = MCE->getValue();
1002 // Avoid left shift by 64 directly.
1003 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
1004 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
1005 if ((Val & Upper) && (Val & Upper) != Upper)
1006 return false;
1007
1008 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
1009 }
1010
1011 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
1012
1013 bool isImmRange() const { return Kind == k_ImmRange; }
1014
1015 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
1016 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
1017 /// immediate that can be shifted by 'Shift'.
1018 template <unsigned Width>
1019 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
1020 if (isShiftedImm() && Width == getShiftedImmShift())
1021 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
1022 return std::make_pair(CE->getValue(), Width);
1023
1024 if (isImm())
1025 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
1026 int64_t Val = CE->getValue();
1027 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1028 return std::make_pair(Val >> Width, Width);
1029 else
1030 return std::make_pair(Val, 0u);
1031 }
1032
1033 return {};
1034 }
1035
1036 bool isAddSubImm() const {
1037 if (!isShiftedImm() && !isImm())
1038 return false;
1039
1040 const MCExpr *Expr;
1041
1042 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1043 if (isShiftedImm()) {
1044 unsigned Shift = ShiftedImm.ShiftAmount;
1045 Expr = ShiftedImm.Val;
1046 if (Shift != 0 && Shift != 12)
1047 return false;
1048 } else {
1049 Expr = getImm();
1050 }
1051
1052 AArch64::Specifier ELFSpec;
1053 AArch64::Specifier DarwinSpec;
1054 int64_t Addend;
1055 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1056 Addend)) {
1057 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1058 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1059 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1067 ELFSpec);
1068 }
1069
1070 // If it's a constant, it should be a real immediate in range.
1071 if (auto ShiftedVal = getShiftedVal<12>())
1072 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1073
1074 // If it's an expression, we hope for the best and let the fixup/relocation
1075 // code deal with it.
1076 return true;
1077 }
1078
1079 bool isAddSubImmNeg() const {
1080 if (!isShiftedImm() && !isImm())
1081 return false;
1082
1083 // Otherwise it should be a real negative immediate in range.
1084 if (auto ShiftedVal = getShiftedVal<12>())
1085 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1086
1087 return false;
1088 }
1089
1090 // Signed value in the range -128 to +127. For element widths of
1091 // 16 bits or higher it may also be a signed multiple of 256 in the
1092 // range -32768 to +32512.
1093 // For element-width of 8 bits a range of -128 to 255 is accepted,
1094 // since a copy of a byte can be either signed/unsigned.
1095 template <typename T>
1096 DiagnosticPredicate isSVECpyImm() const {
1097 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1099
1100 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1101 std::is_same<int8_t, T>::value;
1102 if (auto ShiftedImm = getShiftedVal<8>())
1103 if (!(IsByte && ShiftedImm->second) &&
1104 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1105 << ShiftedImm->second))
1107
1109 }
1110
1111 // Unsigned value in the range 0 to 255. For element widths of
1112 // 16 bits or higher it may also be a signed multiple of 256 in the
1113 // range 0 to 65280.
1114 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1115 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1117
1118 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1119 std::is_same<int8_t, T>::value;
1120 if (auto ShiftedImm = getShiftedVal<8>())
1121 if (!(IsByte && ShiftedImm->second) &&
1122 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1123 << ShiftedImm->second))
1125
1127 }
1128
1129 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1130 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1133 }
1134
1135 bool isCondCode() const { return Kind == k_CondCode; }
1136
1137 bool isSIMDImmType10() const {
1138 if (!isImm())
1139 return false;
1140 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1141 if (!MCE)
1142 return false;
1144 }
1145
1146 template<int N>
1147 bool isBranchTarget() const {
1148 if (!isImm())
1149 return false;
1150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1151 if (!MCE)
1152 return true;
1153 int64_t Val = MCE->getValue();
1154 if (Val & 0x3)
1155 return false;
1156 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1157 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1158 }
1159
1160 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1161 if (!isImm())
1162 return false;
1163
1164 AArch64::Specifier ELFSpec;
1165 AArch64::Specifier DarwinSpec;
1166 int64_t Addend;
1167 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1168 Addend)) {
1169 return false;
1170 }
1171 if (DarwinSpec != AArch64::S_None)
1172 return false;
1173
1174 return llvm::is_contained(AllowedModifiers, ELFSpec);
1175 }
1176
1177 bool isMovWSymbolG3() const {
1178 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1179 }
1180
1181 bool isMovWSymbolG2() const {
1182 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1186 }
1187
1188 bool isMovWSymbolG1() const {
1189 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1194 }
1195
1196 bool isMovWSymbolG0() const {
1197 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1202 }
1203
1204 template<int RegWidth, int Shift>
1205 bool isMOVZMovAlias() const {
1206 if (!isImm()) return false;
1207
1208 const MCExpr *E = getImm();
1209 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1210 uint64_t Value = CE->getValue();
1211
1212 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1213 }
1214 // Only supports the case of Shift being 0 if an expression is used as an
1215 // operand
1216 return !Shift && E;
1217 }
1218
1219 template<int RegWidth, int Shift>
1220 bool isMOVNMovAlias() const {
1221 if (!isImm()) return false;
1222
1223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1224 if (!CE) return false;
1225 uint64_t Value = CE->getValue();
1226
1227 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1228 }
1229
1230 bool isFPImm() const {
1231 return Kind == k_FPImm &&
1232 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1233 }
1234
1235 bool isBarrier() const {
1236 return Kind == k_Barrier && !getBarriernXSModifier();
1237 }
1238 bool isBarriernXS() const {
1239 return Kind == k_Barrier && getBarriernXSModifier();
1240 }
1241 bool isSysReg() const { return Kind == k_SysReg; }
1242
1243 bool isMRSSystemRegister() const {
1244 if (!isSysReg()) return false;
1245
1246 return SysReg.MRSReg != -1U;
1247 }
1248
1249 bool isMSRSystemRegister() const {
1250 if (!isSysReg()) return false;
1251 return SysReg.MSRReg != -1U;
1252 }
1253
1254 bool isSystemPStateFieldWithImm0_1() const {
1255 if (!isSysReg()) return false;
1256 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1257 }
1258
1259 bool isSystemPStateFieldWithImm0_15() const {
1260 if (!isSysReg())
1261 return false;
1262 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1263 }
1264
1265 bool isSVCR() const {
1266 if (Kind != k_SVCR)
1267 return false;
1268 return SVCR.PStateField != -1U;
1269 }
1270
1271 bool isReg() const override {
1272 return Kind == k_Register;
1273 }
1274
1275 bool isVectorList() const { return Kind == k_VectorList; }
1276
1277 bool isScalarReg() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1279 }
1280
1281 bool isNeonVectorReg() const {
1282 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1283 }
1284
1285 bool isNeonVectorRegLo() const {
1286 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1287 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1288 Reg.Reg) ||
1289 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1290 Reg.Reg));
1291 }
1292
1293 bool isNeonVectorReg0to7() const {
1294 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1295 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1296 Reg.Reg));
1297 }
1298
1299 bool isMatrix() const { return Kind == k_MatrixRegister; }
1300 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1301
1302 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1303 RegKind RK;
1304 switch (Class) {
1305 case AArch64::PPRRegClassID:
1306 case AArch64::PPR_3bRegClassID:
1307 case AArch64::PPR_p8to15RegClassID:
1308 case AArch64::PNRRegClassID:
1309 case AArch64::PNR_p8to15RegClassID:
1310 case AArch64::PPRorPNRRegClassID:
1311 RK = RegKind::SVEPredicateAsCounter;
1312 break;
1313 default:
1314 llvm_unreachable("Unsupported register class");
1315 }
1316
1317 return (Kind == k_Register && Reg.Kind == RK) &&
1318 AArch64MCRegisterClasses[Class].contains(getReg());
1319 }
1320
1321 template <unsigned Class> bool isSVEVectorReg() const {
1322 RegKind RK;
1323 switch (Class) {
1324 case AArch64::ZPRRegClassID:
1325 case AArch64::ZPR_3bRegClassID:
1326 case AArch64::ZPR_4bRegClassID:
1327 case AArch64::ZPRMul2_LoRegClassID:
1328 case AArch64::ZPRMul2_HiRegClassID:
1329 case AArch64::ZPR_KRegClassID:
1330 RK = RegKind::SVEDataVector;
1331 break;
1332 case AArch64::PPRRegClassID:
1333 case AArch64::PPR_3bRegClassID:
1334 case AArch64::PPR_p8to15RegClassID:
1335 case AArch64::PNRRegClassID:
1336 case AArch64::PNR_p8to15RegClassID:
1337 case AArch64::PPRorPNRRegClassID:
1338 RK = RegKind::SVEPredicateVector;
1339 break;
1340 default:
1341 llvm_unreachable("Unsupported register class");
1342 }
1343
1344 return (Kind == k_Register && Reg.Kind == RK) &&
1345 AArch64MCRegisterClasses[Class].contains(getReg());
1346 }
1347
1348 template <unsigned Class> bool isFPRasZPR() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[Class].contains(getReg());
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1357
1358 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1360
1362 }
1363
1364 template <int ElementWidth, unsigned Class>
1365 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1366 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1367 Reg.Kind != RegKind::SVEPredicateVector))
1369
1370 if ((isSVEPredicateAsCounterReg<Class>() ||
1371 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1372 Reg.ElementWidth == ElementWidth)
1374
1376 }
1377
1378 template <int ElementWidth, unsigned Class>
1379 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1380 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1382
1383 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1385
1387 }
1388
1389 template <int ElementWidth, unsigned Class>
1390 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1391 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1393
1394 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1396
1398 }
1399
1400 template <int ElementWidth, unsigned Class,
1401 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1402 bool ShiftWidthAlwaysSame>
1403 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1404 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1405 if (!VectorMatch.isMatch())
1407
1408 // Give a more specific diagnostic when the user has explicitly typed in
1409 // a shift-amount that does not match what is expected, but for which
1410 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1411 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1412 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1413 ShiftExtendTy == AArch64_AM::SXTW) &&
1414 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1416
1417 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1419
1421 }
1422
1423 bool isGPR32as64() const {
1424 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1425 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
1426 }
1427
1428 bool isGPR64as32() const {
1429 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1430 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
1431 }
1432
1433 bool isGPR64x8() const {
1434 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1435 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1436 Reg.Reg);
1437 }
1438
1439 bool isWSeqPair() const {
1440 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1441 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1442 Reg.Reg);
1443 }
1444
1445 bool isXSeqPair() const {
1446 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1447 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1448 Reg.Reg);
1449 }
1450
1451 bool isSyspXzrPair() const {
1452 return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
1453 }
1454
1455 template<int64_t Angle, int64_t Remainder>
1456 DiagnosticPredicate isComplexRotation() const {
1457 if (!isImm())
1459
1460 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1461 if (!CE)
1463 uint64_t Value = CE->getValue();
1464
1465 if (Value % Angle == Remainder && Value <= 270)
1468 }
1469
1470 template <unsigned RegClassID> bool isGPR64() const {
1471 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1472 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1473 }
1474
1475 template <unsigned RegClassID, int ExtWidth>
1476 DiagnosticPredicate isGPR64WithShiftExtend() const {
1477 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1479
1480 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1481 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1484 }
1485
1486 /// Is this a vector list with the type implicit (presumably attached to the
1487 /// instruction itself)?
1488 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1489 bool isImplicitlyTypedVectorList() const {
1490 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1491 VectorList.NumElements == 0 &&
1492 VectorList.RegisterKind == VectorKind &&
1493 (!IsConsecutive || (VectorList.Stride == 1));
1494 }
1495
1496 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1497 unsigned ElementWidth, unsigned Stride = 1>
1498 bool isTypedVectorList() const {
1499 if (Kind != k_VectorList)
1500 return false;
1501 if (VectorList.Count != NumRegs)
1502 return false;
1503 if (VectorList.RegisterKind != VectorKind)
1504 return false;
1505 if (VectorList.ElementWidth != ElementWidth)
1506 return false;
1507 if (VectorList.Stride != Stride)
1508 return false;
1509 return VectorList.NumElements == NumElements;
1510 }
1511
1512 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1513 unsigned ElementWidth, unsigned RegClass>
1514 DiagnosticPredicate isTypedVectorListMultiple() const {
1515 bool Res =
1516 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1517 if (!Res)
1519 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
1522 }
1523
1524 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1525 unsigned ElementWidth>
1526 DiagnosticPredicate isTypedVectorListStrided() const {
1527 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1528 ElementWidth, Stride>();
1529 if (!Res)
1531 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1532 ((VectorList.Reg >= AArch64::Z16) &&
1533 (VectorList.Reg < (AArch64::Z16 + Stride))))
1536 }
1537
1538 template <int Min, int Max>
1539 DiagnosticPredicate isVectorIndex() const {
1540 if (Kind != k_VectorIndex)
1542 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1545 }
1546
1547 bool isToken() const override { return Kind == k_Token; }
1548
1549 bool isTokenEqual(StringRef Str) const {
1550 return Kind == k_Token && getToken() == Str;
1551 }
1552 bool isSysCR() const { return Kind == k_SysCR; }
1553 bool isPrefetch() const { return Kind == k_Prefetch; }
1554 bool isPSBHint() const { return Kind == k_PSBHint; }
1555 bool isPHint() const { return Kind == k_PHint; }
1556 bool isBTIHint() const { return Kind == k_BTIHint; }
1557 bool isCMHPriorityHint() const { return Kind == k_CMHPriorityHint; }
1558 bool isTIndexHint() const { return Kind == k_TIndexHint; }
1559 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1560 bool isShifter() const {
1561 if (!isShiftExtend())
1562 return false;
1563
1564 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1565 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1566 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1567 ST == AArch64_AM::MSL);
1568 }
1569
1570 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1571 if (Kind != k_FPImm)
1573
1574 if (getFPImmIsExact()) {
1575 // Lookup the immediate from table of supported immediates.
1576 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1577 assert(Desc && "Unknown enum value");
1578
1579 // Calculate its FP value.
1580 APFloat RealVal(APFloat::IEEEdouble());
1581 auto StatusOrErr =
1582 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1583 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1584 llvm_unreachable("FP immediate is not exact");
1585
1586 if (getFPImm().bitwiseIsEqual(RealVal))
1588 }
1589
1591 }
1592
1593 template <unsigned ImmA, unsigned ImmB>
1594 DiagnosticPredicate isExactFPImm() const {
1595 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1596 if ((Res = isExactFPImm<ImmA>()))
1598 if ((Res = isExactFPImm<ImmB>()))
1600 return Res;
1601 }
1602
1603 bool isExtend() const {
1604 if (!isShiftExtend())
1605 return false;
1606
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1609 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1610 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1611 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1612 ET == AArch64_AM::LSL) &&
1613 getShiftExtendAmount() <= 4;
1614 }
1615
1616 bool isExtend64() const {
1617 if (!isExtend())
1618 return false;
1619 // Make sure the extend expects a 32-bit source register.
1620 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1621 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1622 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1623 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1624 }
1625
1626 bool isExtendLSL64() const {
1627 if (!isExtend())
1628 return false;
1629 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1630 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1631 ET == AArch64_AM::LSL) &&
1632 getShiftExtendAmount() <= 4;
1633 }
1634
1635 bool isLSLImm3Shift() const {
1636 if (!isShiftExtend())
1637 return false;
1638 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1639 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1640 }
1641
1642 template<int Width> bool isMemXExtend() const {
1643 if (!isExtend())
1644 return false;
1645 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1646 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1647 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1648 getShiftExtendAmount() == 0);
1649 }
1650
1651 template<int Width> bool isMemWExtend() const {
1652 if (!isExtend())
1653 return false;
1654 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1655 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1656 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1657 getShiftExtendAmount() == 0);
1658 }
1659
1660 template <unsigned width>
1661 bool isArithmeticShifter() const {
1662 if (!isShifter())
1663 return false;
1664
1665 // An arithmetic shifter is LSL, LSR, or ASR.
1666 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1667 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1668 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1669 }
1670
1671 template <unsigned width>
1672 bool isLogicalShifter() const {
1673 if (!isShifter())
1674 return false;
1675
1676 // A logical shifter is LSL, LSR, ASR or ROR.
1677 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1678 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1679 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1680 getShiftExtendAmount() < width;
1681 }
1682
1683 bool isMovImm32Shifter() const {
1684 if (!isShifter())
1685 return false;
1686
1687 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1688 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1689 if (ST != AArch64_AM::LSL)
1690 return false;
1691 uint64_t Val = getShiftExtendAmount();
1692 return (Val == 0 || Val == 16);
1693 }
1694
1695 bool isMovImm64Shifter() const {
1696 if (!isShifter())
1697 return false;
1698
1699 // A MOVi shifter is LSL of 0 or 16.
1700 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1701 if (ST != AArch64_AM::LSL)
1702 return false;
1703 uint64_t Val = getShiftExtendAmount();
1704 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1705 }
1706
1707 bool isLogicalVecShifter() const {
1708 if (!isShifter())
1709 return false;
1710
1711 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1712 unsigned Shift = getShiftExtendAmount();
1713 return getShiftExtendType() == AArch64_AM::LSL &&
1714 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1715 }
1716
1717 bool isLogicalVecHalfWordShifter() const {
1718 if (!isLogicalVecShifter())
1719 return false;
1720
1721 // A logical vector shifter is a left shift by 0 or 8.
1722 unsigned Shift = getShiftExtendAmount();
1723 return getShiftExtendType() == AArch64_AM::LSL &&
1724 (Shift == 0 || Shift == 8);
1725 }
1726
1727 bool isMoveVecShifter() const {
1728 if (!isShiftExtend())
1729 return false;
1730
1731 // A logical vector shifter is a left shift by 8 or 16.
1732 unsigned Shift = getShiftExtendAmount();
1733 return getShiftExtendType() == AArch64_AM::MSL &&
1734 (Shift == 8 || Shift == 16);
1735 }
1736
1737 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1738 // to LDUR/STUR when the offset is not legal for the former but is for
1739 // the latter. As such, in addition to checking for being a legal unscaled
1740 // address, also check that it is not a legal scaled address. This avoids
1741 // ambiguity in the matcher.
1742 template<int Width>
1743 bool isSImm9OffsetFB() const {
1744 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1745 }
1746
1747 bool isAdrpLabel() const {
1748 // Validation was handled during parsing, so we just verify that
1749 // something didn't go haywire.
1750 if (!isImm())
1751 return false;
1752
1753 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1754 int64_t Val = CE->getValue();
1755 int64_t Min = - (4096 * (1LL << (21 - 1)));
1756 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1757 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1758 }
1759
1760 return true;
1761 }
1762
1763 bool isAdrLabel() const {
1764 // Validation was handled during parsing, so we just verify that
1765 // something didn't go haywire.
1766 if (!isImm())
1767 return false;
1768
1769 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1770 int64_t Val = CE->getValue();
1771 int64_t Min = - (1LL << (21 - 1));
1772 int64_t Max = ((1LL << (21 - 1)) - 1);
1773 return Val >= Min && Val <= Max;
1774 }
1775
1776 return true;
1777 }
1778
1779 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1780 DiagnosticPredicate isMatrixRegOperand() const {
1781 if (!isMatrix())
1783 if (getMatrixKind() != Kind ||
1784 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1785 EltSize != getMatrixElementWidth())
1788 }
1789
1790 bool isPAuthPCRelLabel16Operand() const {
1791 // PAuth PCRel16 operands are similar to regular branch targets, but only
1792 // negative values are allowed for concrete immediates as signing instr
1793 // should be in a lower address.
1794 if (!isImm())
1795 return false;
1796 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1797 if (!MCE)
1798 return true;
1799 int64_t Val = MCE->getValue();
1800 if (Val & 0b11)
1801 return false;
1802 return (Val <= 0) && (Val > -(1 << 18));
1803 }
1804
1805 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1806 // Add as immediates when possible. Null MCExpr = 0.
1807 if (!Expr)
1809 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1810 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1811 else
1813 }
1814
1815 void addRegOperands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1818 }
1819
1820 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1821 assert(N == 1 && "Invalid number of operands!");
1822 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1823 }
1824
1825 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1826 assert(N == 1 && "Invalid number of operands!");
1827 assert(
1828 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1829
1830 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1831 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1833
1835 }
1836
1837 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1838 assert(N == 1 && "Invalid number of operands!");
1839 assert(
1840 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1841
1842 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1843 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1845
1847 }
1848
1849 template <int Width>
1850 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1851 unsigned Base;
1852 switch (Width) {
1853 case 8: Base = AArch64::B0; break;
1854 case 16: Base = AArch64::H0; break;
1855 case 32: Base = AArch64::S0; break;
1856 case 64: Base = AArch64::D0; break;
1857 case 128: Base = AArch64::Q0; break;
1858 default:
1859 llvm_unreachable("Unsupported width");
1860 }
1861 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1862 }
1863
1864 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1865 assert(N == 1 && "Invalid number of operands!");
1866 MCRegister Reg = getReg();
1867 // Normalise to PPR
1868 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1869 Reg = Reg - AArch64::PN0 + AArch64::P0;
1871 }
1872
1873 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1874 assert(N == 1 && "Invalid number of operands!");
1875 Inst.addOperand(
1876 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1877 }
1878
1879 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1880 assert(N == 1 && "Invalid number of operands!");
1881 assert(
1882 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1883 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1884 }
1885
1886 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1887 assert(N == 1 && "Invalid number of operands!");
1888 assert(
1889 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1891 }
1892
1893 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1894 assert(N == 1 && "Invalid number of operands!");
1896 }
1897
1898 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!");
1901 }
1902
1903 enum VecListIndexType {
1904 VecListIdx_DReg = 0,
1905 VecListIdx_QReg = 1,
1906 VecListIdx_ZReg = 2,
1907 VecListIdx_PReg = 3,
1908 };
1909
1910 template <VecListIndexType RegTy, unsigned NumRegs,
1911 bool IsConsecutive = false>
1912 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1913 assert(N == 1 && "Invalid number of operands!");
1914 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1915 "Expected consecutive registers");
1916 static const unsigned FirstRegs[][5] = {
1917 /* DReg */ { AArch64::Q0,
1918 AArch64::D0, AArch64::D0_D1,
1919 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1920 /* QReg */ { AArch64::Q0,
1921 AArch64::Q0, AArch64::Q0_Q1,
1922 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1923 /* ZReg */ { AArch64::Z0,
1924 AArch64::Z0, AArch64::Z0_Z1,
1925 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1926 /* PReg */ { AArch64::P0,
1927 AArch64::P0, AArch64::P0_P1 }
1928 };
1929
1930 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1931 " NumRegs must be <= 4 for ZRegs");
1932
1933 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1934 " NumRegs must be <= 2 for PRegs");
1935
1936 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1937 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1938 FirstRegs[(unsigned)RegTy][0]));
1939 }
1940
1941 template <unsigned NumRegs>
1942 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1943 assert(N == 1 && "Invalid number of operands!");
1944 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1945
1946 switch (NumRegs) {
1947 case 2:
1948 if (getVectorListStart() < AArch64::Z16) {
1949 assert((getVectorListStart() < AArch64::Z8) &&
1950 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1952 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1953 } else {
1954 assert((getVectorListStart() < AArch64::Z24) &&
1955 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1957 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1958 }
1959 break;
1960 case 4:
1961 if (getVectorListStart() < AArch64::Z16) {
1962 assert((getVectorListStart() < AArch64::Z4) &&
1963 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1965 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1966 } else {
1967 assert((getVectorListStart() < AArch64::Z20) &&
1968 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1970 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1971 }
1972 break;
1973 default:
1974 llvm_unreachable("Unsupported number of registers for strided vec list");
1975 }
1976 }
1977
1978 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1979 assert(N == 1 && "Invalid number of operands!");
1980 unsigned RegMask = getMatrixTileListRegMask();
1981 assert(RegMask <= 0xFF && "Invalid mask!");
1982 Inst.addOperand(MCOperand::createImm(RegMask));
1983 }
1984
1985 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1986 assert(N == 1 && "Invalid number of operands!");
1987 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1988 }
1989
1990 template <unsigned ImmIs0, unsigned ImmIs1>
1991 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1994 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1995 }
1996
1997 void addImmOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!");
1999 // If this is a pageoff symrefexpr with an addend, adjust the addend
2000 // to be only the page-offset portion. Otherwise, just add the expr
2001 // as-is.
2002 addExpr(Inst, getImm());
2003 }
2004
2005 template <int Shift>
2006 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 2 && "Invalid number of operands!");
2008 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2009 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
2010 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2011 } else if (isShiftedImm()) {
2012 addExpr(Inst, getShiftedImmVal());
2013 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
2014 } else {
2015 addExpr(Inst, getImm());
2017 }
2018 }
2019
2020 template <int Shift>
2021 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
2022 assert(N == 2 && "Invalid number of operands!");
2023 if (auto ShiftedVal = getShiftedVal<Shift>()) {
2024 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
2025 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
2026 } else
2027 llvm_unreachable("Not a shifted negative immediate");
2028 }
2029
2030 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2031 assert(N == 1 && "Invalid number of operands!");
2033 }
2034
2035 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2038 if (!MCE)
2039 addExpr(Inst, getImm());
2040 else
2041 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2042 }
2043
2044 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2045 addImmOperands(Inst, N);
2046 }
2047
2048 template<int Scale>
2049 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2052
2053 if (!MCE) {
2055 return;
2056 }
2057 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2058 }
2059
2060 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2064 }
2065
2066 template <int Scale>
2067 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2070 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2071 }
2072
2073 template <int Scale>
2074 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2075 assert(N == 1 && "Invalid number of operands!");
2076 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2077 }
2078
2079 template <typename T>
2080 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2081 assert(N == 1 && "Invalid number of operands!");
2082 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2083 std::make_unsigned_t<T> Val = MCE->getValue();
2084 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2085 Inst.addOperand(MCOperand::createImm(encoding));
2086 }
2087
2088 template <typename T>
2089 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2090 assert(N == 1 && "Invalid number of operands!");
2091 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2092 std::make_unsigned_t<T> Val = ~MCE->getValue();
2093 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2094 Inst.addOperand(MCOperand::createImm(encoding));
2095 }
2096
2097 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2098 assert(N == 1 && "Invalid number of operands!");
2099 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2100 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2101 Inst.addOperand(MCOperand::createImm(encoding));
2102 }
2103
2104 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2105 // Branch operands don't encode the low bits, so shift them off
2106 // here. If it's a label, however, just put it on directly as there's
2107 // not enough information now to do anything.
2108 assert(N == 1 && "Invalid number of operands!");
2109 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2110 if (!MCE) {
2111 addExpr(Inst, getImm());
2112 return;
2113 }
2114 assert(MCE && "Invalid constant immediate operand!");
2115 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2116 }
2117
2118 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2119 // PC-relative operands don't encode the low bits, so shift them off
2120 // here. If it's a label, however, just put it on directly as there's
2121 // not enough information now to do anything.
2122 assert(N == 1 && "Invalid number of operands!");
2123 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2124 if (!MCE) {
2125 addExpr(Inst, getImm());
2126 return;
2127 }
2128 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2129 }
2130
2131 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2132 // Branch operands don't encode the low bits, so shift them off
2133 // here. If it's a label, however, just put it on directly as there's
2134 // not enough information now to do anything.
2135 assert(N == 1 && "Invalid number of operands!");
2136 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2137 if (!MCE) {
2138 addExpr(Inst, getImm());
2139 return;
2140 }
2141 assert(MCE && "Invalid constant immediate operand!");
2142 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2143 }
2144
2145 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2146 // Branch operands don't encode the low bits, so shift them off
2147 // here. If it's a label, however, just put it on directly as there's
2148 // not enough information now to do anything.
2149 assert(N == 1 && "Invalid number of operands!");
2150 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2151 if (!MCE) {
2152 addExpr(Inst, getImm());
2153 return;
2154 }
2155 assert(MCE && "Invalid constant immediate operand!");
2156 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2157 }
2158
2159 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2160 // Branch operands don't encode the low bits, so shift them off
2161 // here. If it's a label, however, just put it on directly as there's
2162 // not enough information now to do anything.
2163 assert(N == 1 && "Invalid number of operands!");
2164 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2165 if (!MCE) {
2166 addExpr(Inst, getImm());
2167 return;
2168 }
2169 assert(MCE && "Invalid constant immediate operand!");
2170 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2171 }
2172
2173 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2176 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2177 }
2178
2179 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(MCOperand::createImm(getBarrier()));
2182 }
2183
2184 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(MCOperand::createImm(getBarrier()));
2187 }
2188
2189 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191
2192 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2193 }
2194
2195 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 1 && "Invalid number of operands!");
2197
2198 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2199 }
2200
2201 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2202 assert(N == 1 && "Invalid number of operands!");
2203
2204 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2205 }
2206
2207 void addSVCROperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209
2210 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2211 }
2212
2213 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215
2216 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2217 }
2218
2219 void addSysCROperands(MCInst &Inst, unsigned N) const {
2220 assert(N == 1 && "Invalid number of operands!");
2221 Inst.addOperand(MCOperand::createImm(getSysCR()));
2222 }
2223
2224 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2225 assert(N == 1 && "Invalid number of operands!");
2226 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2227 }
2228
2229 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2232 }
2233
2234 void addPHintOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 Inst.addOperand(MCOperand::createImm(getPHint()));
2237 }
2238
2239 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2240 assert(N == 1 && "Invalid number of operands!");
2241 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2242 }
2243
2244 void addCMHPriorityHintOperands(MCInst &Inst, unsigned N) const {
2245 assert(N == 1 && "Invalid number of operands!");
2246 Inst.addOperand(MCOperand::createImm(getCMHPriorityHint()));
2247 }
2248
2249 void addTIndexHintOperands(MCInst &Inst, unsigned N) const {
2250 assert(N == 1 && "Invalid number of operands!");
2251 Inst.addOperand(MCOperand::createImm(getTIndexHint()));
2252 }
2253
2254 void addShifterOperands(MCInst &Inst, unsigned N) const {
2255 assert(N == 1 && "Invalid number of operands!");
2256 unsigned Imm =
2257 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2259 }
2260
2261 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2262 assert(N == 1 && "Invalid number of operands!");
2263 unsigned Imm = getShiftExtendAmount();
2265 }
2266
2267 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2268 assert(N == 1 && "Invalid number of operands!");
2269
2270 if (!isScalarReg())
2271 return;
2272
2273 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2274 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2276 if (Reg != AArch64::XZR)
2277 llvm_unreachable("wrong register");
2278
2279 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2280 }
2281
2282 void addExtendOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2285 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2286 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2288 }
2289
2290 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2293 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2294 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2296 }
2297
2298 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2299 assert(N == 2 && "Invalid number of operands!");
2300 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2301 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2302 Inst.addOperand(MCOperand::createImm(IsSigned));
2303 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2304 }
2305
2306 // For 8-bit load/store instructions with a register offset, both the
2307 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2308 // they're disambiguated by whether the shift was explicit or implicit rather
2309 // than its size.
2310 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2311 assert(N == 2 && "Invalid number of operands!");
2312 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2313 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2314 Inst.addOperand(MCOperand::createImm(IsSigned));
2315 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2316 }
2317
2318 template<int Shift>
2319 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2320 assert(N == 1 && "Invalid number of operands!");
2321
2322 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2323 if (CE) {
2324 uint64_t Value = CE->getValue();
2325 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2326 } else {
2327 addExpr(Inst, getImm());
2328 }
2329 }
2330
2331 template<int Shift>
2332 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2333 assert(N == 1 && "Invalid number of operands!");
2334
2335 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2336 uint64_t Value = CE->getValue();
2337 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2338 }
2339
2340 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2341 assert(N == 1 && "Invalid number of operands!");
2342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2343 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2344 }
2345
2346 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2347 assert(N == 1 && "Invalid number of operands!");
2348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2349 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2350 }
2351
2352 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2353
2354 static std::unique_ptr<AArch64Operand>
2355 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2356 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2357 Op->Tok.Data = Str.data();
2358 Op->Tok.Length = Str.size();
2359 Op->Tok.IsSuffix = IsSuffix;
2360 Op->StartLoc = S;
2361 Op->EndLoc = S;
2362 return Op;
2363 }
2364
2365 static std::unique_ptr<AArch64Operand>
2366 CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2367 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2369 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2370 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2371 Op->Reg.Reg = Reg;
2372 Op->Reg.Kind = Kind;
2373 Op->Reg.ElementWidth = 0;
2374 Op->Reg.EqualityTy = EqTy;
2375 Op->Reg.ShiftExtend.Type = ExtTy;
2376 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2377 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2378 Op->StartLoc = S;
2379 Op->EndLoc = E;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2384 MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
2385 MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2386 unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
2387 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2388 Kind == RegKind::SVEPredicateVector ||
2389 Kind == RegKind::SVEPredicateAsCounter) &&
2390 "Invalid vector kind");
2391 auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2392 HasExplicitAmount);
2393 Op->Reg.ElementWidth = ElementWidth;
2394 return Op;
2395 }
2396
2397 static std::unique_ptr<AArch64Operand>
2398 CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
2399 unsigned NumElements, unsigned ElementWidth,
2400 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2401 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2402 Op->VectorList.Reg = Reg;
2403 Op->VectorList.Count = Count;
2404 Op->VectorList.Stride = Stride;
2405 Op->VectorList.NumElements = NumElements;
2406 Op->VectorList.ElementWidth = ElementWidth;
2407 Op->VectorList.RegisterKind = RegisterKind;
2408 Op->StartLoc = S;
2409 Op->EndLoc = E;
2410 return Op;
2411 }
2412
2413 static std::unique_ptr<AArch64Operand>
2414 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2415 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2416 Op->VectorIndex.Val = Idx;
2417 Op->StartLoc = S;
2418 Op->EndLoc = E;
2419 return Op;
2420 }
2421
2422 static std::unique_ptr<AArch64Operand>
2423 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2424 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2425 Op->MatrixTileList.RegMask = RegMask;
2426 Op->StartLoc = S;
2427 Op->EndLoc = E;
2428 return Op;
2429 }
2430
2431 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2432 const unsigned ElementWidth) {
2433 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2434 RegMap = {
2435 {{0, AArch64::ZAB0},
2436 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2437 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2438 {{8, AArch64::ZAB0},
2439 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2440 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2441 {{16, AArch64::ZAH0},
2442 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2443 {{16, AArch64::ZAH1},
2444 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2445 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2446 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2447 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2448 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2449 };
2450
2451 if (ElementWidth == 64)
2452 OutRegs.insert(Reg);
2453 else {
2454 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2455 assert(!Regs.empty() && "Invalid tile or element width!");
2456 OutRegs.insert_range(Regs);
2457 }
2458 }
2459
2460 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2461 SMLoc E, MCContext &Ctx) {
2462 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2463 Op->Imm.Val = Val;
2464 Op->StartLoc = S;
2465 Op->EndLoc = E;
2466 return Op;
2467 }
2468
2469 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2470 unsigned ShiftAmount,
2471 SMLoc S, SMLoc E,
2472 MCContext &Ctx) {
2473 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2474 Op->ShiftedImm .Val = Val;
2475 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2476 Op->StartLoc = S;
2477 Op->EndLoc = E;
2478 return Op;
2479 }
2480
2481 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2482 unsigned Last, SMLoc S,
2483 SMLoc E,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2486 Op->ImmRange.First = First;
2487 Op->ImmRange.Last = Last;
2488 Op->EndLoc = E;
2489 return Op;
2490 }
2491
2492 static std::unique_ptr<AArch64Operand>
2493 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2495 Op->CondCode.Code = Code;
2496 Op->StartLoc = S;
2497 Op->EndLoc = E;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand>
2502 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2504 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2505 Op->FPImm.IsExact = IsExact;
2506 Op->StartLoc = S;
2507 Op->EndLoc = S;
2508 return Op;
2509 }
2510
2511 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2512 StringRef Str,
2513 SMLoc S,
2514 MCContext &Ctx,
2515 bool HasnXSModifier) {
2516 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2517 Op->Barrier.Val = Val;
2518 Op->Barrier.Data = Str.data();
2519 Op->Barrier.Length = Str.size();
2520 Op->Barrier.HasnXSModifier = HasnXSModifier;
2521 Op->StartLoc = S;
2522 Op->EndLoc = S;
2523 return Op;
2524 }
2525
2526 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2527 uint32_t MRSReg,
2528 uint32_t MSRReg,
2529 uint32_t PStateField,
2530 MCContext &Ctx) {
2531 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2532 Op->SysReg.Data = Str.data();
2533 Op->SysReg.Length = Str.size();
2534 Op->SysReg.MRSReg = MRSReg;
2535 Op->SysReg.MSRReg = MSRReg;
2536 Op->SysReg.PStateField = PStateField;
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand>
2543 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2544 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2545 Op->PHint.Val = Val;
2546 Op->PHint.Data = Str.data();
2547 Op->PHint.Length = Str.size();
2548 Op->StartLoc = S;
2549 Op->EndLoc = S;
2550 return Op;
2551 }
2552
2553 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2554 SMLoc E, MCContext &Ctx) {
2555 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2556 Op->SysCRImm.Val = Val;
2557 Op->StartLoc = S;
2558 Op->EndLoc = E;
2559 return Op;
2560 }
2561
2562 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2563 StringRef Str,
2564 SMLoc S,
2565 MCContext &Ctx) {
2566 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2567 Op->Prefetch.Val = Val;
2568 Op->Barrier.Data = Str.data();
2569 Op->Barrier.Length = Str.size();
2570 Op->StartLoc = S;
2571 Op->EndLoc = S;
2572 return Op;
2573 }
2574
2575 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2576 StringRef Str,
2577 SMLoc S,
2578 MCContext &Ctx) {
2579 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2580 Op->PSBHint.Val = Val;
2581 Op->PSBHint.Data = Str.data();
2582 Op->PSBHint.Length = Str.size();
2583 Op->StartLoc = S;
2584 Op->EndLoc = S;
2585 return Op;
2586 }
2587
2588 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2589 StringRef Str,
2590 SMLoc S,
2591 MCContext &Ctx) {
2592 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2593 Op->BTIHint.Val = Val | 32;
2594 Op->BTIHint.Data = Str.data();
2595 Op->BTIHint.Length = Str.size();
2596 Op->StartLoc = S;
2597 Op->EndLoc = S;
2598 return Op;
2599 }
2600
2601 static std::unique_ptr<AArch64Operand>
2602 CreateCMHPriorityHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2603 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2604 Op->CMHPriorityHint.Val = Val;
2605 Op->CMHPriorityHint.Data = Str.data();
2606 Op->CMHPriorityHint.Length = Str.size();
2607 Op->StartLoc = S;
2608 Op->EndLoc = S;
2609 return Op;
2610 }
2611
2612 static std::unique_ptr<AArch64Operand>
2613 CreateTIndexHint(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2614 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2615 Op->TIndexHint.Val = Val;
2616 Op->TIndexHint.Data = Str.data();
2617 Op->TIndexHint.Length = Str.size();
2618 Op->StartLoc = S;
2619 Op->EndLoc = S;
2620 return Op;
2621 }
2622
2623 static std::unique_ptr<AArch64Operand>
2624 CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
2625 SMLoc S, SMLoc E, MCContext &Ctx) {
2626 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2627 Op->MatrixReg.Reg = Reg;
2628 Op->MatrixReg.ElementWidth = ElementWidth;
2629 Op->MatrixReg.Kind = Kind;
2630 Op->StartLoc = S;
2631 Op->EndLoc = E;
2632 return Op;
2633 }
2634
2635 static std::unique_ptr<AArch64Operand>
2636 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2637 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2638 Op->SVCR.PStateField = PStateField;
2639 Op->SVCR.Data = Str.data();
2640 Op->SVCR.Length = Str.size();
2641 Op->StartLoc = S;
2642 Op->EndLoc = S;
2643 return Op;
2644 }
2645
2646 static std::unique_ptr<AArch64Operand>
2647 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2648 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2649 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2650 Op->ShiftExtend.Type = ShOp;
2651 Op->ShiftExtend.Amount = Val;
2652 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2653 Op->StartLoc = S;
2654 Op->EndLoc = E;
2655 return Op;
2656 }
2657};
2658
2659} // end anonymous namespace.
2660
2661void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2662 switch (Kind) {
2663 case k_FPImm:
2664 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2665 if (!getFPImmIsExact())
2666 OS << " (inexact)";
2667 OS << ">";
2668 break;
2669 case k_Barrier: {
2670 StringRef Name = getBarrierName();
2671 if (!Name.empty())
2672 OS << "<barrier " << Name << ">";
2673 else
2674 OS << "<barrier invalid #" << getBarrier() << ">";
2675 break;
2676 }
2677 case k_Immediate:
2678 MAI.printExpr(OS, *getImm());
2679 break;
2680 case k_ShiftedImm: {
2681 unsigned Shift = getShiftedImmShift();
2682 OS << "<shiftedimm ";
2683 MAI.printExpr(OS, *getShiftedImmVal());
2684 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2685 break;
2686 }
2687 case k_ImmRange: {
2688 OS << "<immrange ";
2689 OS << getFirstImmVal();
2690 OS << ":" << getLastImmVal() << ">";
2691 break;
2692 }
2693 case k_CondCode:
2694 OS << "<condcode " << getCondCode() << ">";
2695 break;
2696 case k_VectorList: {
2697 OS << "<vectorlist ";
2698 MCRegister Reg = getVectorListStart();
2699 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2700 OS << Reg.id() + i * getVectorListStride() << " ";
2701 OS << ">";
2702 break;
2703 }
2704 case k_VectorIndex:
2705 OS << "<vectorindex " << getVectorIndex() << ">";
2706 break;
2707 case k_SysReg:
2708 OS << "<sysreg: " << getSysReg() << '>';
2709 break;
2710 case k_Token:
2711 OS << "'" << getToken() << "'";
2712 break;
2713 case k_SysCR:
2714 OS << "c" << getSysCR();
2715 break;
2716 case k_Prefetch: {
2717 StringRef Name = getPrefetchName();
2718 if (!Name.empty())
2719 OS << "<prfop " << Name << ">";
2720 else
2721 OS << "<prfop invalid #" << getPrefetch() << ">";
2722 break;
2723 }
2724 case k_PSBHint:
2725 OS << getPSBHintName();
2726 break;
2727 case k_PHint:
2728 OS << getPHintName();
2729 break;
2730 case k_BTIHint:
2731 OS << getBTIHintName();
2732 break;
2733 case k_CMHPriorityHint:
2734 OS << getCMHPriorityHintName();
2735 break;
2736 case k_TIndexHint:
2737 OS << getTIndexHintName();
2738 break;
2739 case k_MatrixRegister:
2740 OS << "<matrix " << getMatrixReg().id() << ">";
2741 break;
2742 case k_MatrixTileList: {
2743 OS << "<matrixlist ";
2744 unsigned RegMask = getMatrixTileListRegMask();
2745 unsigned MaxBits = 8;
2746 for (unsigned I = MaxBits; I > 0; --I)
2747 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2748 OS << '>';
2749 break;
2750 }
2751 case k_SVCR: {
2752 OS << getSVCR();
2753 break;
2754 }
2755 case k_Register:
2756 OS << "<register " << getReg().id() << ">";
2757 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2758 break;
2759 [[fallthrough]];
2760 case k_ShiftExtend:
2761 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2762 << getShiftExtendAmount();
2763 if (!hasShiftExtendAmount())
2764 OS << "<imp>";
2765 OS << '>';
2766 break;
2767 }
2768}
2769
2770/// @name Auto-generated Match Functions
2771/// {
2772
2774
2775/// }
2776
2777static unsigned MatchNeonVectorRegName(StringRef Name) {
2778 return StringSwitch<unsigned>(Name.lower())
2779 .Case("v0", AArch64::Q0)
2780 .Case("v1", AArch64::Q1)
2781 .Case("v2", AArch64::Q2)
2782 .Case("v3", AArch64::Q3)
2783 .Case("v4", AArch64::Q4)
2784 .Case("v5", AArch64::Q5)
2785 .Case("v6", AArch64::Q6)
2786 .Case("v7", AArch64::Q7)
2787 .Case("v8", AArch64::Q8)
2788 .Case("v9", AArch64::Q9)
2789 .Case("v10", AArch64::Q10)
2790 .Case("v11", AArch64::Q11)
2791 .Case("v12", AArch64::Q12)
2792 .Case("v13", AArch64::Q13)
2793 .Case("v14", AArch64::Q14)
2794 .Case("v15", AArch64::Q15)
2795 .Case("v16", AArch64::Q16)
2796 .Case("v17", AArch64::Q17)
2797 .Case("v18", AArch64::Q18)
2798 .Case("v19", AArch64::Q19)
2799 .Case("v20", AArch64::Q20)
2800 .Case("v21", AArch64::Q21)
2801 .Case("v22", AArch64::Q22)
2802 .Case("v23", AArch64::Q23)
2803 .Case("v24", AArch64::Q24)
2804 .Case("v25", AArch64::Q25)
2805 .Case("v26", AArch64::Q26)
2806 .Case("v27", AArch64::Q27)
2807 .Case("v28", AArch64::Q28)
2808 .Case("v29", AArch64::Q29)
2809 .Case("v30", AArch64::Q30)
2810 .Case("v31", AArch64::Q31)
2811 .Default(0);
2812}
2813
2814/// Returns an optional pair of (#elements, element-width) if Suffix
2815/// is a valid vector kind. Where the number of elements in a vector
2816/// or the vector width is implicit or explicitly unknown (but still a
2817/// valid suffix kind), 0 is used.
2818static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2819 RegKind VectorKind) {
2820 std::pair<int, int> Res = {-1, -1};
2821
2822 switch (VectorKind) {
2823 case RegKind::NeonVector:
2825 .Case("", {0, 0})
2826 .Case(".1d", {1, 64})
2827 .Case(".1q", {1, 128})
2828 // '.2h' needed for fp16 scalar pairwise reductions
2829 .Case(".2h", {2, 16})
2830 .Case(".2b", {2, 8})
2831 .Case(".2s", {2, 32})
2832 .Case(".2d", {2, 64})
2833 // '.4b' is another special case for the ARMv8.2a dot product
2834 // operand
2835 .Case(".4b", {4, 8})
2836 .Case(".4h", {4, 16})
2837 .Case(".4s", {4, 32})
2838 .Case(".8b", {8, 8})
2839 .Case(".8h", {8, 16})
2840 .Case(".16b", {16, 8})
2841 // Accept the width neutral ones, too, for verbose syntax. If
2842 // those aren't used in the right places, the token operand won't
2843 // match so all will work out.
2844 .Case(".b", {0, 8})
2845 .Case(".h", {0, 16})
2846 .Case(".s", {0, 32})
2847 .Case(".d", {0, 64})
2848 .Default({-1, -1});
2849 break;
2850 case RegKind::SVEPredicateAsCounter:
2851 case RegKind::SVEPredicateVector:
2852 case RegKind::SVEDataVector:
2853 case RegKind::Matrix:
2855 .Case("", {0, 0})
2856 .Case(".b", {0, 8})
2857 .Case(".h", {0, 16})
2858 .Case(".s", {0, 32})
2859 .Case(".d", {0, 64})
2860 .Case(".q", {0, 128})
2861 .Default({-1, -1});
2862 break;
2863 default:
2864 llvm_unreachable("Unsupported RegKind");
2865 }
2866
2867 if (Res == std::make_pair(-1, -1))
2868 return std::nullopt;
2869
2870 return std::optional<std::pair<int, int>>(Res);
2871}
2872
2873static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2874 return parseVectorKind(Suffix, VectorKind).has_value();
2875}
2876
2878 return StringSwitch<unsigned>(Name.lower())
2879 .Case("z0", AArch64::Z0)
2880 .Case("z1", AArch64::Z1)
2881 .Case("z2", AArch64::Z2)
2882 .Case("z3", AArch64::Z3)
2883 .Case("z4", AArch64::Z4)
2884 .Case("z5", AArch64::Z5)
2885 .Case("z6", AArch64::Z6)
2886 .Case("z7", AArch64::Z7)
2887 .Case("z8", AArch64::Z8)
2888 .Case("z9", AArch64::Z9)
2889 .Case("z10", AArch64::Z10)
2890 .Case("z11", AArch64::Z11)
2891 .Case("z12", AArch64::Z12)
2892 .Case("z13", AArch64::Z13)
2893 .Case("z14", AArch64::Z14)
2894 .Case("z15", AArch64::Z15)
2895 .Case("z16", AArch64::Z16)
2896 .Case("z17", AArch64::Z17)
2897 .Case("z18", AArch64::Z18)
2898 .Case("z19", AArch64::Z19)
2899 .Case("z20", AArch64::Z20)
2900 .Case("z21", AArch64::Z21)
2901 .Case("z22", AArch64::Z22)
2902 .Case("z23", AArch64::Z23)
2903 .Case("z24", AArch64::Z24)
2904 .Case("z25", AArch64::Z25)
2905 .Case("z26", AArch64::Z26)
2906 .Case("z27", AArch64::Z27)
2907 .Case("z28", AArch64::Z28)
2908 .Case("z29", AArch64::Z29)
2909 .Case("z30", AArch64::Z30)
2910 .Case("z31", AArch64::Z31)
2911 .Default(0);
2912}
2913
2915 return StringSwitch<unsigned>(Name.lower())
2916 .Case("p0", AArch64::P0)
2917 .Case("p1", AArch64::P1)
2918 .Case("p2", AArch64::P2)
2919 .Case("p3", AArch64::P3)
2920 .Case("p4", AArch64::P4)
2921 .Case("p5", AArch64::P5)
2922 .Case("p6", AArch64::P6)
2923 .Case("p7", AArch64::P7)
2924 .Case("p8", AArch64::P8)
2925 .Case("p9", AArch64::P9)
2926 .Case("p10", AArch64::P10)
2927 .Case("p11", AArch64::P11)
2928 .Case("p12", AArch64::P12)
2929 .Case("p13", AArch64::P13)
2930 .Case("p14", AArch64::P14)
2931 .Case("p15", AArch64::P15)
2932 .Default(0);
2933}
2934
2936 return StringSwitch<unsigned>(Name.lower())
2937 .Case("pn0", AArch64::PN0)
2938 .Case("pn1", AArch64::PN1)
2939 .Case("pn2", AArch64::PN2)
2940 .Case("pn3", AArch64::PN3)
2941 .Case("pn4", AArch64::PN4)
2942 .Case("pn5", AArch64::PN5)
2943 .Case("pn6", AArch64::PN6)
2944 .Case("pn7", AArch64::PN7)
2945 .Case("pn8", AArch64::PN8)
2946 .Case("pn9", AArch64::PN9)
2947 .Case("pn10", AArch64::PN10)
2948 .Case("pn11", AArch64::PN11)
2949 .Case("pn12", AArch64::PN12)
2950 .Case("pn13", AArch64::PN13)
2951 .Case("pn14", AArch64::PN14)
2952 .Case("pn15", AArch64::PN15)
2953 .Default(0);
2954}
2955
2957 return StringSwitch<unsigned>(Name.lower())
2958 .Case("za0.d", AArch64::ZAD0)
2959 .Case("za1.d", AArch64::ZAD1)
2960 .Case("za2.d", AArch64::ZAD2)
2961 .Case("za3.d", AArch64::ZAD3)
2962 .Case("za4.d", AArch64::ZAD4)
2963 .Case("za5.d", AArch64::ZAD5)
2964 .Case("za6.d", AArch64::ZAD6)
2965 .Case("za7.d", AArch64::ZAD7)
2966 .Case("za0.s", AArch64::ZAS0)
2967 .Case("za1.s", AArch64::ZAS1)
2968 .Case("za2.s", AArch64::ZAS2)
2969 .Case("za3.s", AArch64::ZAS3)
2970 .Case("za0.h", AArch64::ZAH0)
2971 .Case("za1.h", AArch64::ZAH1)
2972 .Case("za0.b", AArch64::ZAB0)
2973 .Default(0);
2974}
2975
2976static unsigned matchMatrixRegName(StringRef Name) {
2977 return StringSwitch<unsigned>(Name.lower())
2978 .Case("za", AArch64::ZA)
2979 .Case("za0.q", AArch64::ZAQ0)
2980 .Case("za1.q", AArch64::ZAQ1)
2981 .Case("za2.q", AArch64::ZAQ2)
2982 .Case("za3.q", AArch64::ZAQ3)
2983 .Case("za4.q", AArch64::ZAQ4)
2984 .Case("za5.q", AArch64::ZAQ5)
2985 .Case("za6.q", AArch64::ZAQ6)
2986 .Case("za7.q", AArch64::ZAQ7)
2987 .Case("za8.q", AArch64::ZAQ8)
2988 .Case("za9.q", AArch64::ZAQ9)
2989 .Case("za10.q", AArch64::ZAQ10)
2990 .Case("za11.q", AArch64::ZAQ11)
2991 .Case("za12.q", AArch64::ZAQ12)
2992 .Case("za13.q", AArch64::ZAQ13)
2993 .Case("za14.q", AArch64::ZAQ14)
2994 .Case("za15.q", AArch64::ZAQ15)
2995 .Case("za0.d", AArch64::ZAD0)
2996 .Case("za1.d", AArch64::ZAD1)
2997 .Case("za2.d", AArch64::ZAD2)
2998 .Case("za3.d", AArch64::ZAD3)
2999 .Case("za4.d", AArch64::ZAD4)
3000 .Case("za5.d", AArch64::ZAD5)
3001 .Case("za6.d", AArch64::ZAD6)
3002 .Case("za7.d", AArch64::ZAD7)
3003 .Case("za0.s", AArch64::ZAS0)
3004 .Case("za1.s", AArch64::ZAS1)
3005 .Case("za2.s", AArch64::ZAS2)
3006 .Case("za3.s", AArch64::ZAS3)
3007 .Case("za0.h", AArch64::ZAH0)
3008 .Case("za1.h", AArch64::ZAH1)
3009 .Case("za0.b", AArch64::ZAB0)
3010 .Case("za0h.q", AArch64::ZAQ0)
3011 .Case("za1h.q", AArch64::ZAQ1)
3012 .Case("za2h.q", AArch64::ZAQ2)
3013 .Case("za3h.q", AArch64::ZAQ3)
3014 .Case("za4h.q", AArch64::ZAQ4)
3015 .Case("za5h.q", AArch64::ZAQ5)
3016 .Case("za6h.q", AArch64::ZAQ6)
3017 .Case("za7h.q", AArch64::ZAQ7)
3018 .Case("za8h.q", AArch64::ZAQ8)
3019 .Case("za9h.q", AArch64::ZAQ9)
3020 .Case("za10h.q", AArch64::ZAQ10)
3021 .Case("za11h.q", AArch64::ZAQ11)
3022 .Case("za12h.q", AArch64::ZAQ12)
3023 .Case("za13h.q", AArch64::ZAQ13)
3024 .Case("za14h.q", AArch64::ZAQ14)
3025 .Case("za15h.q", AArch64::ZAQ15)
3026 .Case("za0h.d", AArch64::ZAD0)
3027 .Case("za1h.d", AArch64::ZAD1)
3028 .Case("za2h.d", AArch64::ZAD2)
3029 .Case("za3h.d", AArch64::ZAD3)
3030 .Case("za4h.d", AArch64::ZAD4)
3031 .Case("za5h.d", AArch64::ZAD5)
3032 .Case("za6h.d", AArch64::ZAD6)
3033 .Case("za7h.d", AArch64::ZAD7)
3034 .Case("za0h.s", AArch64::ZAS0)
3035 .Case("za1h.s", AArch64::ZAS1)
3036 .Case("za2h.s", AArch64::ZAS2)
3037 .Case("za3h.s", AArch64::ZAS3)
3038 .Case("za0h.h", AArch64::ZAH0)
3039 .Case("za1h.h", AArch64::ZAH1)
3040 .Case("za0h.b", AArch64::ZAB0)
3041 .Case("za0v.q", AArch64::ZAQ0)
3042 .Case("za1v.q", AArch64::ZAQ1)
3043 .Case("za2v.q", AArch64::ZAQ2)
3044 .Case("za3v.q", AArch64::ZAQ3)
3045 .Case("za4v.q", AArch64::ZAQ4)
3046 .Case("za5v.q", AArch64::ZAQ5)
3047 .Case("za6v.q", AArch64::ZAQ6)
3048 .Case("za7v.q", AArch64::ZAQ7)
3049 .Case("za8v.q", AArch64::ZAQ8)
3050 .Case("za9v.q", AArch64::ZAQ9)
3051 .Case("za10v.q", AArch64::ZAQ10)
3052 .Case("za11v.q", AArch64::ZAQ11)
3053 .Case("za12v.q", AArch64::ZAQ12)
3054 .Case("za13v.q", AArch64::ZAQ13)
3055 .Case("za14v.q", AArch64::ZAQ14)
3056 .Case("za15v.q", AArch64::ZAQ15)
3057 .Case("za0v.d", AArch64::ZAD0)
3058 .Case("za1v.d", AArch64::ZAD1)
3059 .Case("za2v.d", AArch64::ZAD2)
3060 .Case("za3v.d", AArch64::ZAD3)
3061 .Case("za4v.d", AArch64::ZAD4)
3062 .Case("za5v.d", AArch64::ZAD5)
3063 .Case("za6v.d", AArch64::ZAD6)
3064 .Case("za7v.d", AArch64::ZAD7)
3065 .Case("za0v.s", AArch64::ZAS0)
3066 .Case("za1v.s", AArch64::ZAS1)
3067 .Case("za2v.s", AArch64::ZAS2)
3068 .Case("za3v.s", AArch64::ZAS3)
3069 .Case("za0v.h", AArch64::ZAH0)
3070 .Case("za1v.h", AArch64::ZAH1)
3071 .Case("za0v.b", AArch64::ZAB0)
3072 .Default(0);
3073}
3074
3075bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3076 SMLoc &EndLoc) {
3077 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3078}
3079
3080ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3081 SMLoc &EndLoc) {
3082 StartLoc = getLoc();
3083 ParseStatus Res = tryParseScalarRegister(Reg);
3084 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3085 return Res;
3086}
3087
3088// Matches a register name or register alias previously defined by '.req'
3089MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3090 RegKind Kind) {
3091 MCRegister Reg = MCRegister();
3092 if ((Reg = matchSVEDataVectorRegName(Name)))
3093 return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
3094
3095 if ((Reg = matchSVEPredicateVectorRegName(Name)))
3096 return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
3097
3099 return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
3100
3101 if ((Reg = MatchNeonVectorRegName(Name)))
3102 return Kind == RegKind::NeonVector ? Reg : MCRegister();
3103
3104 if ((Reg = matchMatrixRegName(Name)))
3105 return Kind == RegKind::Matrix ? Reg : MCRegister();
3106
3107 if (Name.equals_insensitive("zt0"))
3108 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3109
3110 // The parsed register must be of RegKind Scalar
3111 if ((Reg = MatchRegisterName(Name)))
3112 return (Kind == RegKind::Scalar) ? Reg : MCRegister();
3113
3114 if (!Reg) {
3115 // Handle a few common aliases of registers.
3116 if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
3117 .Case("fp", AArch64::FP)
3118 .Case("lr", AArch64::LR)
3119 .Case("x31", AArch64::XZR)
3120 .Case("w31", AArch64::WZR)
3121 .Default(0))
3122 return Kind == RegKind::Scalar ? Reg : MCRegister();
3123
3124 // Check for aliases registered via .req. Canonicalize to lower case.
3125 // That's more consistent since register names are case insensitive, and
3126 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3127 auto Entry = RegisterReqs.find(Name.lower());
3128 if (Entry == RegisterReqs.end())
3129 return MCRegister();
3130
3131 // set Reg if the match is the right kind of register
3132 if (Kind == Entry->getValue().first)
3133 Reg = Entry->getValue().second;
3134 }
3135 return Reg;
3136}
3137
3138unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3139 switch (K) {
3140 case RegKind::Scalar:
3141 case RegKind::NeonVector:
3142 case RegKind::SVEDataVector:
3143 return 32;
3144 case RegKind::Matrix:
3145 case RegKind::SVEPredicateVector:
3146 case RegKind::SVEPredicateAsCounter:
3147 return 16;
3148 case RegKind::LookupTable:
3149 return 1;
3150 }
3151 llvm_unreachable("Unsupported RegKind");
3152}
3153
3154/// tryParseScalarRegister - Try to parse a register name. The token must be an
3155/// Identifier when called, and if it is a register name the token is eaten and
3156/// the register is added to the operand list.
3157ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3158 const AsmToken &Tok = getTok();
3159 if (Tok.isNot(AsmToken::Identifier))
3160 return ParseStatus::NoMatch;
3161
3162 std::string lowerCase = Tok.getString().lower();
3163 MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3164 if (!Reg)
3165 return ParseStatus::NoMatch;
3166
3167 RegNum = Reg;
3168 Lex(); // Eat identifier token.
3169 return ParseStatus::Success;
3170}
3171
3172/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3173ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3174 SMLoc S = getLoc();
3175
3176 if (getTok().isNot(AsmToken::Identifier))
3177 return Error(S, "Expected cN operand where 0 <= N <= 15");
3178
3179 StringRef Tok = getTok().getIdentifier();
3180 if (Tok[0] != 'c' && Tok[0] != 'C')
3181 return Error(S, "Expected cN operand where 0 <= N <= 15");
3182
3183 uint32_t CRNum;
3184 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3185 if (BadNum || CRNum > 15)
3186 return Error(S, "Expected cN operand where 0 <= N <= 15");
3187
3188 Lex(); // Eat identifier token.
3189 Operands.push_back(
3190 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3191 return ParseStatus::Success;
3192}
3193
3194// Either an identifier for named values or a 6-bit immediate.
3195ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3196 SMLoc S = getLoc();
3197 const AsmToken &Tok = getTok();
3198
3199 unsigned MaxVal = 63;
3200
3201 // Immediate case, with optional leading hash:
3202 if (parseOptionalToken(AsmToken::Hash) ||
3203 Tok.is(AsmToken::Integer)) {
3204 const MCExpr *ImmVal;
3205 if (getParser().parseExpression(ImmVal))
3206 return ParseStatus::Failure;
3207
3208 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3209 if (!MCE)
3210 return TokError("immediate value expected for prefetch operand");
3211 unsigned prfop = MCE->getValue();
3212 if (prfop > MaxVal)
3213 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3214 "] expected");
3215
3216 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3217 Operands.push_back(AArch64Operand::CreatePrefetch(
3218 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3219 return ParseStatus::Success;
3220 }
3221
3222 if (Tok.isNot(AsmToken::Identifier))
3223 return TokError("prefetch hint expected");
3224
3225 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3226 if (!RPRFM)
3227 return TokError("prefetch hint expected");
3228
3229 Operands.push_back(AArch64Operand::CreatePrefetch(
3230 RPRFM->Encoding, Tok.getString(), S, getContext()));
3231 Lex(); // Eat identifier token.
3232 return ParseStatus::Success;
3233}
3234
3235/// tryParsePrefetch - Try to parse a prefetch operand.
3236template <bool IsSVEPrefetch>
3237ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3238 SMLoc S = getLoc();
3239 const AsmToken &Tok = getTok();
3240
3241 auto LookupByName = [](StringRef N) {
3242 if (IsSVEPrefetch) {
3243 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3244 return std::optional<unsigned>(Res->Encoding);
3245 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3246 return std::optional<unsigned>(Res->Encoding);
3247 return std::optional<unsigned>();
3248 };
3249
3250 auto LookupByEncoding = [](unsigned E) {
3251 if (IsSVEPrefetch) {
3252 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3253 return std::optional<StringRef>(Res->Name);
3254 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3255 return std::optional<StringRef>(Res->Name);
3256 return std::optional<StringRef>();
3257 };
3258 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3259
3260 // Either an identifier for named values or a 5-bit immediate.
3261 // Eat optional hash.
3262 if (parseOptionalToken(AsmToken::Hash) ||
3263 Tok.is(AsmToken::Integer)) {
3264 const MCExpr *ImmVal;
3265 if (getParser().parseExpression(ImmVal))
3266 return ParseStatus::Failure;
3267
3268 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3269 if (!MCE)
3270 return TokError("immediate value expected for prefetch operand");
3271 unsigned prfop = MCE->getValue();
3272 if (prfop > MaxVal)
3273 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3274 "] expected");
3275
3276 auto PRFM = LookupByEncoding(MCE->getValue());
3277 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3278 S, getContext()));
3279 return ParseStatus::Success;
3280 }
3281
3282 if (Tok.isNot(AsmToken::Identifier))
3283 return TokError("prefetch hint expected");
3284
3285 auto PRFM = LookupByName(Tok.getString());
3286 if (!PRFM)
3287 return TokError("prefetch hint expected");
3288
3289 Operands.push_back(AArch64Operand::CreatePrefetch(
3290 *PRFM, Tok.getString(), S, getContext()));
3291 Lex(); // Eat identifier token.
3292 return ParseStatus::Success;
3293}
3294
3295/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3296ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3297 SMLoc S = getLoc();
3298 const AsmToken &Tok = getTok();
3299 if (Tok.isNot(AsmToken::Identifier))
3300 return TokError("invalid operand for instruction");
3301
3302 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3303 if (!PSB)
3304 return TokError("invalid operand for instruction");
3305
3306 Operands.push_back(AArch64Operand::CreatePSBHint(
3307 PSB->Encoding, Tok.getString(), S, getContext()));
3308 Lex(); // Eat identifier token.
3309 return ParseStatus::Success;
3310}
3311
3312ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3313 SMLoc StartLoc = getLoc();
3314
3315 MCRegister RegNum;
3316
3317 // The case where xzr, xzr is not present is handled by an InstAlias.
3318
3319 auto RegTok = getTok(); // in case we need to backtrack
3320 if (!tryParseScalarRegister(RegNum).isSuccess())
3321 return ParseStatus::NoMatch;
3322
3323 if (RegNum != AArch64::XZR) {
3324 getLexer().UnLex(RegTok);
3325 return ParseStatus::NoMatch;
3326 }
3327
3328 if (parseComma())
3329 return ParseStatus::Failure;
3330
3331 if (!tryParseScalarRegister(RegNum).isSuccess())
3332 return TokError("expected register operand");
3333
3334 if (RegNum != AArch64::XZR)
3335 return TokError("xzr must be followed by xzr");
3336
3337 // We need to push something, since we claim this is an operand in .td.
3338 // See also AArch64AsmParser::parseKeywordOperand.
3339 Operands.push_back(AArch64Operand::CreateReg(
3340 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3341
3342 return ParseStatus::Success;
3343}
3344
3345/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3346ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3347 SMLoc S = getLoc();
3348 const AsmToken &Tok = getTok();
3349 if (Tok.isNot(AsmToken::Identifier))
3350 return TokError("invalid operand for instruction");
3351
3352 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3353 if (!BTI)
3354 return TokError("invalid operand for instruction");
3355
3356 Operands.push_back(AArch64Operand::CreateBTIHint(
3357 BTI->Encoding, Tok.getString(), S, getContext()));
3358 Lex(); // Eat identifier token.
3359 return ParseStatus::Success;
3360}
3361
3362/// tryParseCMHPriorityHint - Try to parse a CMHPriority operand
3363ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(OperandVector &Operands) {
3364 SMLoc S = getLoc();
3365 const AsmToken &Tok = getTok();
3366 if (Tok.isNot(AsmToken::Identifier))
3367 return TokError("invalid operand for instruction");
3368
3369 auto CMHPriority =
3370 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.getString());
3371 if (!CMHPriority)
3372 return TokError("invalid operand for instruction");
3373
3374 Operands.push_back(AArch64Operand::CreateCMHPriorityHint(
3375 CMHPriority->Encoding, Tok.getString(), S, getContext()));
3376 Lex(); // Eat identifier token.
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseTIndexHint - Try to parse a TIndex operand
3381ParseStatus AArch64AsmParser::tryParseTIndexHint(OperandVector &Operands) {
3382 SMLoc S = getLoc();
3383 const AsmToken &Tok = getTok();
3384 if (Tok.isNot(AsmToken::Identifier))
3385 return TokError("invalid operand for instruction");
3386
3387 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.getString());
3388 if (!TIndex)
3389 return TokError("invalid operand for instruction");
3390
3391 Operands.push_back(AArch64Operand::CreateTIndexHint(
3392 TIndex->Encoding, Tok.getString(), S, getContext()));
3393 Lex(); // Eat identifier token.
3394 return ParseStatus::Success;
3395}
3396
3397/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3398/// instruction.
3399ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3400 SMLoc S = getLoc();
3401 const MCExpr *Expr = nullptr;
3402
3403 if (getTok().is(AsmToken::Hash)) {
3404 Lex(); // Eat hash token.
3405 }
3406
3407 if (parseSymbolicImmVal(Expr))
3408 return ParseStatus::Failure;
3409
3410 AArch64::Specifier ELFSpec;
3411 AArch64::Specifier DarwinSpec;
3412 int64_t Addend;
3413 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3414 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3415 // No modifier was specified at all; this is the syntax for an ELF basic
3416 // ADRP relocation (unfortunately).
3417 Expr =
3419 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3420 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3421 Addend != 0) {
3422 return Error(S, "gotpage label reference not allowed an addend");
3423 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3424 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3425 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3426 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3427 ELFSpec != AArch64::S_GOT_PAGE &&
3428 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3429 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3430 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3431 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3432 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3433 // The operand must be an @page or @gotpage qualified symbolref.
3434 return Error(S, "page or gotpage label reference expected");
3435 }
3436 }
3437
3438 // We have either a label reference possibly with addend or an immediate. The
3439 // addend is a raw value here. The linker will adjust it to only reference the
3440 // page.
3441 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3442 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3443
3444 return ParseStatus::Success;
3445}
3446
3447/// tryParseAdrLabel - Parse and validate a source label for the ADR
3448/// instruction.
3449ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3450 SMLoc S = getLoc();
3451 const MCExpr *Expr = nullptr;
3452
3453 // Leave anything with a bracket to the default for SVE
3454 if (getTok().is(AsmToken::LBrac))
3455 return ParseStatus::NoMatch;
3456
3457 if (getTok().is(AsmToken::Hash))
3458 Lex(); // Eat hash token.
3459
3460 if (parseSymbolicImmVal(Expr))
3461 return ParseStatus::Failure;
3462
3463 AArch64::Specifier ELFSpec;
3464 AArch64::Specifier DarwinSpec;
3465 int64_t Addend;
3466 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3467 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3468 // No modifier was specified at all; this is the syntax for an ELF basic
3469 // ADR relocation (unfortunately).
3471 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3472 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3473 // adr. It's not actually GOT entry page address but the GOT address
3474 // itself - we just share the same variant kind with :got_auth: operator
3475 // applied for adrp.
3476 // TODO: can we somehow get current TargetMachine object to call
3477 // getCodeModel() on it to ensure we are using tiny code model?
3478 return Error(S, "unexpected adr label");
3479 }
3480 }
3481
3482 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3483 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3484 return ParseStatus::Success;
3485}
3486
3487/// tryParseFPImm - A floating point immediate expression operand.
3488template <bool AddFPZeroAsLiteral>
3489ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3490 SMLoc S = getLoc();
3491
3492 bool Hash = parseOptionalToken(AsmToken::Hash);
3493
3494 // Handle negation, as that still comes through as a separate token.
3495 bool isNegative = parseOptionalToken(AsmToken::Minus);
3496
3497 const AsmToken &Tok = getTok();
3498 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3499 if (!Hash)
3500 return ParseStatus::NoMatch;
3501 return TokError("invalid floating point immediate");
3502 }
3503
3504 // Parse hexadecimal representation.
3505 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3506 if (Tok.getIntVal() > 255 || isNegative)
3507 return TokError("encoded floating point value out of range");
3508
3510 Operands.push_back(
3511 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3512 } else {
3513 // Parse FP representation.
3514 APFloat RealVal(APFloat::IEEEdouble());
3515 auto StatusOrErr =
3516 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3517 if (errorToBool(StatusOrErr.takeError()))
3518 return TokError("invalid floating point representation");
3519
3520 if (isNegative)
3521 RealVal.changeSign();
3522
3523 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3524 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3525 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3526 } else
3527 Operands.push_back(AArch64Operand::CreateFPImm(
3528 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3529 }
3530
3531 Lex(); // Eat the token.
3532
3533 return ParseStatus::Success;
3534}
3535
3536/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3537/// a shift suffix, for example '#1, lsl #12'.
3538ParseStatus
3539AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3540 SMLoc S = getLoc();
3541
3542 if (getTok().is(AsmToken::Hash))
3543 Lex(); // Eat '#'
3544 else if (getTok().isNot(AsmToken::Integer))
3545 // Operand should start from # or should be integer, emit error otherwise.
3546 return ParseStatus::NoMatch;
3547
3548 if (getTok().is(AsmToken::Integer) &&
3549 getLexer().peekTok().is(AsmToken::Colon))
3550 return tryParseImmRange(Operands);
3551
3552 const MCExpr *Imm = nullptr;
3553 if (parseSymbolicImmVal(Imm))
3554 return ParseStatus::Failure;
3555 else if (getTok().isNot(AsmToken::Comma)) {
3556 Operands.push_back(
3557 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3558 return ParseStatus::Success;
3559 }
3560
3561 // Eat ','
3562 Lex();
3563 StringRef VecGroup;
3564 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3565 Operands.push_back(
3566 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3567 Operands.push_back(
3568 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3569 return ParseStatus::Success;
3570 }
3571
3572 // The optional operand must be "lsl #N" where N is non-negative.
3573 if (!getTok().is(AsmToken::Identifier) ||
3574 !getTok().getIdentifier().equals_insensitive("lsl"))
3575 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3576
3577 // Eat 'lsl'
3578 Lex();
3579
3580 parseOptionalToken(AsmToken::Hash);
3581
3582 if (getTok().isNot(AsmToken::Integer))
3583 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3584
3585 int64_t ShiftAmount = getTok().getIntVal();
3586
3587 if (ShiftAmount < 0)
3588 return Error(getLoc(), "positive shift amount required");
3589 Lex(); // Eat the number
3590
3591 // Just in case the optional lsl #0 is used for immediates other than zero.
3592 if (ShiftAmount == 0 && Imm != nullptr) {
3593 Operands.push_back(
3594 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3595 return ParseStatus::Success;
3596 }
3597
3598 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3599 getLoc(), getContext()));
3600 return ParseStatus::Success;
3601}
3602
3603/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3604/// suggestion to help common typos.
3606AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3607 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3608 .Case("eq", AArch64CC::EQ)
3609 .Case("ne", AArch64CC::NE)
3610 .Case("cs", AArch64CC::HS)
3611 .Case("hs", AArch64CC::HS)
3612 .Case("cc", AArch64CC::LO)
3613 .Case("lo", AArch64CC::LO)
3614 .Case("mi", AArch64CC::MI)
3615 .Case("pl", AArch64CC::PL)
3616 .Case("vs", AArch64CC::VS)
3617 .Case("vc", AArch64CC::VC)
3618 .Case("hi", AArch64CC::HI)
3619 .Case("ls", AArch64CC::LS)
3620 .Case("ge", AArch64CC::GE)
3621 .Case("lt", AArch64CC::LT)
3622 .Case("gt", AArch64CC::GT)
3623 .Case("le", AArch64CC::LE)
3624 .Case("al", AArch64CC::AL)
3625 .Case("nv", AArch64CC::NV)
3626 // SVE condition code aliases:
3627 .Case("none", AArch64CC::EQ)
3628 .Case("any", AArch64CC::NE)
3629 .Case("nlast", AArch64CC::HS)
3630 .Case("last", AArch64CC::LO)
3631 .Case("first", AArch64CC::MI)
3632 .Case("nfrst", AArch64CC::PL)
3633 .Case("pmore", AArch64CC::HI)
3634 .Case("plast", AArch64CC::LS)
3635 .Case("tcont", AArch64CC::GE)
3636 .Case("tstop", AArch64CC::LT)
3637 .Default(AArch64CC::Invalid);
3638
3639 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3640 Suggestion = "nfrst";
3641
3642 return CC;
3643}
3644
3645/// parseCondCode - Parse a Condition Code operand.
3646bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3647 bool invertCondCode) {
3648 SMLoc S = getLoc();
3649 const AsmToken &Tok = getTok();
3650 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3651
3652 StringRef Cond = Tok.getString();
3653 std::string Suggestion;
3654 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3655 if (CC == AArch64CC::Invalid) {
3656 std::string Msg = "invalid condition code";
3657 if (!Suggestion.empty())
3658 Msg += ", did you mean " + Suggestion + "?";
3659 return TokError(Msg);
3660 }
3661 Lex(); // Eat identifier token.
3662
3663 if (invertCondCode) {
3664 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3665 return TokError("condition codes AL and NV are invalid for this instruction");
3667 }
3668
3669 Operands.push_back(
3670 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3671 return false;
3672}
3673
3674ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3675 const AsmToken &Tok = getTok();
3676 SMLoc S = getLoc();
3677
3678 if (Tok.isNot(AsmToken::Identifier))
3679 return TokError("invalid operand for instruction");
3680
3681 unsigned PStateImm = -1;
3682 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3683 if (!SVCR)
3684 return ParseStatus::NoMatch;
3685 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3686 PStateImm = SVCR->Encoding;
3687
3688 Operands.push_back(
3689 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3690 Lex(); // Eat identifier token.
3691 return ParseStatus::Success;
3692}
3693
3694ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3695 const AsmToken &Tok = getTok();
3696 SMLoc S = getLoc();
3697
3698 StringRef Name = Tok.getString();
3699
3700 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3701 Lex(); // eat "za[.(b|h|s|d)]"
3702 unsigned ElementWidth = 0;
3703 auto DotPosition = Name.find('.');
3704 if (DotPosition != StringRef::npos) {
3705 const auto &KindRes =
3706 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3707 if (!KindRes)
3708 return TokError(
3709 "Expected the register to be followed by element width suffix");
3710 ElementWidth = KindRes->second;
3711 }
3712 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3713 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3714 getContext()));
3715 if (getLexer().is(AsmToken::LBrac)) {
3716 // There's no comma after matrix operand, so we can parse the next operand
3717 // immediately.
3718 if (parseOperand(Operands, false, false))
3719 return ParseStatus::NoMatch;
3720 }
3721 return ParseStatus::Success;
3722 }
3723
3724 // Try to parse matrix register.
3725 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3726 if (!Reg)
3727 return ParseStatus::NoMatch;
3728
3729 size_t DotPosition = Name.find('.');
3730 assert(DotPosition != StringRef::npos && "Unexpected register");
3731
3732 StringRef Head = Name.take_front(DotPosition);
3733 StringRef Tail = Name.drop_front(DotPosition);
3734 StringRef RowOrColumn = Head.take_back();
3735
3736 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3737 .Case("h", MatrixKind::Row)
3738 .Case("v", MatrixKind::Col)
3739 .Default(MatrixKind::Tile);
3740
3741 // Next up, parsing the suffix
3742 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3743 if (!KindRes)
3744 return TokError(
3745 "Expected the register to be followed by element width suffix");
3746 unsigned ElementWidth = KindRes->second;
3747
3748 Lex();
3749
3750 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3751 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3752
3753 if (getLexer().is(AsmToken::LBrac)) {
3754 // There's no comma after matrix operand, so we can parse the next operand
3755 // immediately.
3756 if (parseOperand(Operands, false, false))
3757 return ParseStatus::NoMatch;
3758 }
3759 return ParseStatus::Success;
3760}
3761
3762/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3763/// them if present.
3764ParseStatus
3765AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3766 const AsmToken &Tok = getTok();
3767 std::string LowerID = Tok.getString().lower();
3769 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3770 .Case("lsl", AArch64_AM::LSL)
3771 .Case("lsr", AArch64_AM::LSR)
3772 .Case("asr", AArch64_AM::ASR)
3773 .Case("ror", AArch64_AM::ROR)
3774 .Case("msl", AArch64_AM::MSL)
3775 .Case("uxtb", AArch64_AM::UXTB)
3776 .Case("uxth", AArch64_AM::UXTH)
3777 .Case("uxtw", AArch64_AM::UXTW)
3778 .Case("uxtx", AArch64_AM::UXTX)
3779 .Case("sxtb", AArch64_AM::SXTB)
3780 .Case("sxth", AArch64_AM::SXTH)
3781 .Case("sxtw", AArch64_AM::SXTW)
3782 .Case("sxtx", AArch64_AM::SXTX)
3784
3786 return ParseStatus::NoMatch;
3787
3788 SMLoc S = Tok.getLoc();
3789 Lex();
3790
3791 bool Hash = parseOptionalToken(AsmToken::Hash);
3792
3793 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3794 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3795 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3796 ShOp == AArch64_AM::MSL) {
3797 // We expect a number here.
3798 return TokError("expected #imm after shift specifier");
3799 }
3800
3801 // "extend" type operations don't need an immediate, #0 is implicit.
3802 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3803 Operands.push_back(
3804 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3805 return ParseStatus::Success;
3806 }
3807
3808 // Make sure we do actually have a number, identifier or a parenthesized
3809 // expression.
3810 SMLoc E = getLoc();
3811 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3812 !getTok().is(AsmToken::Identifier))
3813 return Error(E, "expected integer shift amount");
3814
3815 const MCExpr *ImmVal;
3816 if (getParser().parseExpression(ImmVal))
3817 return ParseStatus::Failure;
3818
3819 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3820 if (!MCE)
3821 return Error(E, "expected constant '#imm' after shift specifier");
3822
3823 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3824 Operands.push_back(AArch64Operand::CreateShiftExtend(
3825 ShOp, MCE->getValue(), true, S, E, getContext()));
3826 return ParseStatus::Success;
3827}
3828
3829static const struct Extension {
3830 const char *Name;
3832} ExtensionMap[] = {
3833 {"crc", {AArch64::FeatureCRC}},
3834 {"sm4", {AArch64::FeatureSM4}},
3835 {"sha3", {AArch64::FeatureSHA3}},
3836 {"sha2", {AArch64::FeatureSHA2}},
3837 {"aes", {AArch64::FeatureAES}},
3838 {"crypto", {AArch64::FeatureCrypto}},
3839 {"fp", {AArch64::FeatureFPARMv8}},
3840 {"simd", {AArch64::FeatureNEON}},
3841 {"ras", {AArch64::FeatureRAS}},
3842 {"rasv2", {AArch64::FeatureRASv2}},
3843 {"lse", {AArch64::FeatureLSE}},
3844 {"predres", {AArch64::FeaturePredRes}},
3845 {"predres2", {AArch64::FeatureSPECRES2}},
3846 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3847 {"mte", {AArch64::FeatureMTE}},
3848 {"memtag", {AArch64::FeatureMTE}},
3849 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3850 {"pan", {AArch64::FeaturePAN}},
3851 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3852 {"ccpp", {AArch64::FeatureCCPP}},
3853 {"rcpc", {AArch64::FeatureRCPC}},
3854 {"rng", {AArch64::FeatureRandGen}},
3855 {"sve", {AArch64::FeatureSVE}},
3856 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3857 {"sve2", {AArch64::FeatureSVE2}},
3858 {"sve-aes", {AArch64::FeatureSVEAES}},
3859 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3860 {"sve-sm4", {AArch64::FeatureSVESM4}},
3861 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3862 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3863 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3864 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3865 {"sve2-bitperm",
3866 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3867 AArch64::FeatureSVE2}},
3868 {"sve2p1", {AArch64::FeatureSVE2p1}},
3869 {"ls64", {AArch64::FeatureLS64}},
3870 {"xs", {AArch64::FeatureXS}},
3871 {"pauth", {AArch64::FeaturePAuth}},
3872 {"flagm", {AArch64::FeatureFlagM}},
3873 {"rme", {AArch64::FeatureRME}},
3874 {"sme", {AArch64::FeatureSME}},
3875 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3876 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3877 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3878 {"sme2", {AArch64::FeatureSME2}},
3879 {"sme2p1", {AArch64::FeatureSME2p1}},
3880 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3881 {"hbc", {AArch64::FeatureHBC}},
3882 {"mops", {AArch64::FeatureMOPS}},
3883 {"mec", {AArch64::FeatureMEC}},
3884 {"the", {AArch64::FeatureTHE}},
3885 {"d128", {AArch64::FeatureD128}},
3886 {"lse128", {AArch64::FeatureLSE128}},
3887 {"ite", {AArch64::FeatureITE}},
3888 {"cssc", {AArch64::FeatureCSSC}},
3889 {"rcpc3", {AArch64::FeatureRCPC3}},
3890 {"gcs", {AArch64::FeatureGCS}},
3891 {"bf16", {AArch64::FeatureBF16}},
3892 {"compnum", {AArch64::FeatureComplxNum}},
3893 {"dotprod", {AArch64::FeatureDotProd}},
3894 {"f32mm", {AArch64::FeatureMatMulFP32}},
3895 {"f64mm", {AArch64::FeatureMatMulFP64}},
3896 {"fp16", {AArch64::FeatureFullFP16}},
3897 {"fp16fml", {AArch64::FeatureFP16FML}},
3898 {"i8mm", {AArch64::FeatureMatMulInt8}},
3899 {"lor", {AArch64::FeatureLOR}},
3900 {"profile", {AArch64::FeatureSPE}},
3901 // "rdma" is the name documented by binutils for the feature, but
3902 // binutils also accepts incomplete prefixes of features, so "rdm"
3903 // works too. Support both spellings here.
3904 {"rdm", {AArch64::FeatureRDM}},
3905 {"rdma", {AArch64::FeatureRDM}},
3906 {"sb", {AArch64::FeatureSB}},
3907 {"ssbs", {AArch64::FeatureSSBS}},
3908 {"fp8", {AArch64::FeatureFP8}},
3909 {"faminmax", {AArch64::FeatureFAMINMAX}},
3910 {"fp8fma", {AArch64::FeatureFP8FMA}},
3911 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3912 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3913 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3914 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3915 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3916 {"lut", {AArch64::FeatureLUT}},
3917 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3918 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3919 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3920 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3921 {"cpa", {AArch64::FeatureCPA}},
3922 {"tlbiw", {AArch64::FeatureTLBIW}},
3923 {"pops", {AArch64::FeaturePoPS}},
3924 {"cmpbr", {AArch64::FeatureCMPBR}},
3925 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3926 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3927 {"fprcvt", {AArch64::FeatureFPRCVT}},
3928 {"lsfe", {AArch64::FeatureLSFE}},
3929 {"sme2p2", {AArch64::FeatureSME2p2}},
3930 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3931 {"sve2p2", {AArch64::FeatureSVE2p2}},
3932 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3933 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3934 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3935 {"lsui", {AArch64::FeatureLSUI}},
3936 {"occmo", {AArch64::FeatureOCCMO}},
3937 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3938 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {"cmh", {AArch64::FeatureCMH}},
3942 {"lscp", {AArch64::FeatureLSCP}},
3943 {"tlbid", {AArch64::FeatureTLBID}},
3944 {"mpamv2", {AArch64::FeatureMPAMv2}},
3945 {"mtetc", {AArch64::FeatureMTETC}},
3946 {"gcie", {AArch64::FeatureGCIE}},
3947 {"sme2p3", {AArch64::FeatureSME2p3}},
3948 {"sve2p3", {AArch64::FeatureSVE2p3}},
3949 {"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3950 {"f16mm", {AArch64::FeatureF16MM}},
3951 {"f16f32dot", {AArch64::FeatureF16F32DOT}},
3952 {"f16f32mm", {AArch64::FeatureF16F32MM}},
3953 {"mops-go", {AArch64::FeatureMOPS_GO}},
3954 {"poe2", {AArch64::FeatureS1POE2}},
3955 {"tev", {AArch64::FeatureTEV}},
3956 {"btie", {AArch64::FeatureBTIE}},
3958
3959static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3960 if (FBS[AArch64::HasV8_0aOps])
3961 Str += "ARMv8a";
3962 if (FBS[AArch64::HasV8_1aOps])
3963 Str += "ARMv8.1a";
3964 else if (FBS[AArch64::HasV8_2aOps])
3965 Str += "ARMv8.2a";
3966 else if (FBS[AArch64::HasV8_3aOps])
3967 Str += "ARMv8.3a";
3968 else if (FBS[AArch64::HasV8_4aOps])
3969 Str += "ARMv8.4a";
3970 else if (FBS[AArch64::HasV8_5aOps])
3971 Str += "ARMv8.5a";
3972 else if (FBS[AArch64::HasV8_6aOps])
3973 Str += "ARMv8.6a";
3974 else if (FBS[AArch64::HasV8_7aOps])
3975 Str += "ARMv8.7a";
3976 else if (FBS[AArch64::HasV8_8aOps])
3977 Str += "ARMv8.8a";
3978 else if (FBS[AArch64::HasV8_9aOps])
3979 Str += "ARMv8.9a";
3980 else if (FBS[AArch64::HasV9_0aOps])
3981 Str += "ARMv9-a";
3982 else if (FBS[AArch64::HasV9_1aOps])
3983 Str += "ARMv9.1a";
3984 else if (FBS[AArch64::HasV9_2aOps])
3985 Str += "ARMv9.2a";
3986 else if (FBS[AArch64::HasV9_3aOps])
3987 Str += "ARMv9.3a";
3988 else if (FBS[AArch64::HasV9_4aOps])
3989 Str += "ARMv9.4a";
3990 else if (FBS[AArch64::HasV9_5aOps])
3991 Str += "ARMv9.5a";
3992 else if (FBS[AArch64::HasV9_6aOps])
3993 Str += "ARMv9.6a";
3994 else if (FBS[AArch64::HasV9_7aOps])
3995 Str += "ARMv9.7a";
3996 else if (FBS[AArch64::HasV8_0rOps])
3997 Str += "ARMv8r";
3998 else {
3999 SmallVector<std::string, 2> ExtMatches;
4000 for (const auto& Ext : ExtensionMap) {
4001 // Use & in case multiple features are enabled
4002 if ((FBS & Ext.Features) != FeatureBitset())
4003 ExtMatches.push_back(Ext.Name);
4004 }
4005 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
4006 }
4007}
4008
4009void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
4010 SMLoc S) {
4011 const uint16_t Op2 = Encoding & 7;
4012 const uint16_t Cm = (Encoding & 0x78) >> 3;
4013 const uint16_t Cn = (Encoding & 0x780) >> 7;
4014 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4015
4016 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
4017
4018 Operands.push_back(
4019 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4020 Operands.push_back(
4021 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
4022 Operands.push_back(
4023 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
4024 Expr = MCConstantExpr::create(Op2, getContext());
4025 Operands.push_back(
4026 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
4027}
4028
4029/// parseSysAlias - The IC, DC, AT, TLBI, MLBI and GIC{R} and GSB instructions
4030/// are simple aliases for the SYS instruction. Parse them specially so that
4031/// we create a SYS MCInst.
4032bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4033 OperandVector &Operands) {
4034 if (Name.contains('.'))
4035 return TokError("invalid operand");
4036
4037 Mnemonic = Name;
4038 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
4039
4040 const AsmToken &Tok = getTok();
4041 StringRef Op = Tok.getString();
4042 SMLoc S = Tok.getLoc();
4043 bool ExpectRegister = true;
4044 bool OptionalRegister = false;
4045 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4046 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4047
4048 if (Mnemonic == "ic") {
4049 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
4050 if (!IC)
4051 return TokError("invalid operand for IC instruction");
4052 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
4053 std::string Str("IC " + std::string(IC->Name) + " requires: ");
4055 return TokError(Str);
4056 }
4057 ExpectRegister = IC->NeedsReg;
4058 createSysAlias(IC->Encoding, Operands, S);
4059 } else if (Mnemonic == "dc") {
4060 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
4061 if (!DC)
4062 return TokError("invalid operand for DC instruction");
4063 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
4064 std::string Str("DC " + std::string(DC->Name) + " requires: ");
4066 return TokError(Str);
4067 }
4068 createSysAlias(DC->Encoding, Operands, S);
4069 } else if (Mnemonic == "at") {
4070 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
4071 if (!AT)
4072 return TokError("invalid operand for AT instruction");
4073 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
4074 std::string Str("AT " + std::string(AT->Name) + " requires: ");
4076 return TokError(Str);
4077 }
4078 createSysAlias(AT->Encoding, Operands, S);
4079 } else if (Mnemonic == "tlbi") {
4080 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
4081 if (!TLBI)
4082 return TokError("invalid operand for TLBI instruction");
4083 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
4084 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
4086 return TokError(Str);
4087 }
4088 ExpectRegister = TLBI->NeedsReg;
4089 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4090 if (hasAll || hasTLBID) {
4091 OptionalRegister = TLBI->OptionalReg;
4092 }
4093 createSysAlias(TLBI->Encoding, Operands, S);
4094 } else if (Mnemonic == "mlbi") {
4095 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(Op);
4096 if (!MLBI)
4097 return TokError("invalid operand for MLBI instruction");
4098 else if (!MLBI->haveFeatures(getSTI().getFeatureBits())) {
4099 std::string Str("MLBI " + std::string(MLBI->Name) + " requires: ");
4101 return TokError(Str);
4102 }
4103 ExpectRegister = MLBI->NeedsReg;
4104 createSysAlias(MLBI->Encoding, Operands, S);
4105 } else if (Mnemonic == "gic") {
4106 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(Op);
4107 if (!GIC)
4108 return TokError("invalid operand for GIC instruction");
4109 else if (!GIC->haveFeatures(getSTI().getFeatureBits())) {
4110 std::string Str("GIC " + std::string(GIC->Name) + " requires: ");
4112 return TokError(Str);
4113 }
4114 ExpectRegister = GIC->NeedsReg;
4115 createSysAlias(GIC->Encoding, Operands, S);
4116 } else if (Mnemonic == "gsb") {
4117 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(Op);
4118 if (!GSB)
4119 return TokError("invalid operand for GSB instruction");
4120 else if (!GSB->haveFeatures(getSTI().getFeatureBits())) {
4121 std::string Str("GSB " + std::string(GSB->Name) + " requires: ");
4123 return TokError(Str);
4124 }
4125 ExpectRegister = false;
4126 createSysAlias(GSB->Encoding, Operands, S);
4127 } else if (Mnemonic == "plbi") {
4128 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(Op);
4129 if (!PLBI)
4130 return TokError("invalid operand for PLBI instruction");
4131 else if (!PLBI->haveFeatures(getSTI().getFeatureBits())) {
4132 std::string Str("PLBI " + std::string(PLBI->Name) + " requires: ");
4134 return TokError(Str);
4135 }
4136 ExpectRegister = PLBI->NeedsReg;
4137 if (hasAll || hasTLBID) {
4138 OptionalRegister = PLBI->OptionalReg;
4139 }
4140 createSysAlias(PLBI->Encoding, Operands, S);
4141 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" ||
4142 Mnemonic == "cosp") {
4143
4144 if (Op.lower() != "rctx")
4145 return TokError("invalid operand for prediction restriction instruction");
4146
4147 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4148 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4149
4150 if (Mnemonic == "cosp" && !hasSpecres2)
4151 return TokError("COSP requires: predres2");
4152 if (!hasPredres)
4153 return TokError(Mnemonic.upper() + "RCTX requires: predres");
4154
4155 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
4156 : Mnemonic == "dvp" ? 0b101
4157 : Mnemonic == "cosp" ? 0b110
4158 : Mnemonic == "cpp" ? 0b111
4159 : 0;
4160 assert(PRCTX_Op2 &&
4161 "Invalid mnemonic for prediction restriction instruction");
4162 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
4163 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4164
4165 createSysAlias(Encoding, Operands, S);
4166 }
4167
4168 Lex(); // Eat operand.
4169
4170 bool HasRegister = false;
4171
4172 // Check for the optional register operand.
4173 if (parseOptionalToken(AsmToken::Comma)) {
4174 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
4175 return TokError("expected register operand");
4176 HasRegister = true;
4177 }
4178
4179 if (!OptionalRegister) {
4180 if (ExpectRegister && !HasRegister)
4181 return TokError("specified " + Mnemonic + " op requires a register");
4182 else if (!ExpectRegister && HasRegister)
4183 return TokError("specified " + Mnemonic + " op does not use a register");
4184 }
4185
4186 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4187 return true;
4188
4189 return false;
4190}
4191
4192/// parseSyslAlias - The GICR instructions are simple aliases for
4193/// the SYSL instruction. Parse them specially so that we create a
4194/// SYS MCInst.
4195bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4196 OperandVector &Operands) {
4197
4198 Mnemonic = Name;
4199 Operands.push_back(
4200 AArch64Operand::CreateToken("sysl", NameLoc, getContext()));
4201
4202 // Now expect two operands (identifier + register)
4203 SMLoc startLoc = getLoc();
4204 const AsmToken &regTok = getTok();
4205 StringRef reg = regTok.getString();
4206 MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
4207 if (!Reg)
4208 return TokError("expected register operand");
4209
4210 Operands.push_back(AArch64Operand::CreateReg(
4211 Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
4212
4213 Lex(); // Eat token
4214 if (parseToken(AsmToken::Comma))
4215 return true;
4216
4217 // Check for identifier
4218 const AsmToken &operandTok = getTok();
4219 StringRef Op = operandTok.getString();
4220 SMLoc S2 = operandTok.getLoc();
4221 Lex(); // Eat token
4222
4223 if (Mnemonic == "gicr") {
4224 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(Op);
4225 if (!GICR)
4226 return Error(S2, "invalid operand for GICR instruction");
4227 else if (!GICR->haveFeatures(getSTI().getFeatureBits())) {
4228 std::string Str("GICR " + std::string(GICR->Name) + " requires: ");
4230 return Error(S2, Str);
4231 }
4232 createSysAlias(GICR->Encoding, Operands, S2);
4233 }
4234
4235 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4236 return true;
4237
4238 return false;
4239}
4240
4241/// parseSyspAlias - The TLBIP instructions are simple aliases for
4242/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4243bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4244 OperandVector &Operands) {
4245 if (Name.contains('.'))
4246 return TokError("invalid operand");
4247
4248 Mnemonic = Name;
4249 Operands.push_back(
4250 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4251
4252 const AsmToken &Tok = getTok();
4253 StringRef Op = Tok.getString();
4254 SMLoc S = Tok.getLoc();
4255
4256 if (Mnemonic == "tlbip") {
4257 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4258 if (HasnXSQualifier) {
4259 Op = Op.drop_back(3);
4260 }
4261 const AArch64TLBIP::TLBIP *TLBIPorig = AArch64TLBIP::lookupTLBIPByName(Op);
4262 if (!TLBIPorig)
4263 return TokError("invalid operand for TLBIP instruction");
4264 const AArch64TLBIP::TLBIP TLBIP(
4265 TLBIPorig->Name, TLBIPorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4266 TLBIPorig->NeedsReg, TLBIPorig->OptionalReg,
4267 HasnXSQualifier
4268 ? TLBIPorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4269 : TLBIPorig->FeaturesRequired);
4270 if (!TLBIP.haveFeatures(getSTI().getFeatureBits())) {
4271 std::string Name =
4272 std::string(TLBIP.Name) + (HasnXSQualifier ? "nXS" : "");
4273 std::string Str("TLBIP " + Name + " requires: ");
4274 setRequiredFeatureString(TLBIP.getRequiredFeatures(), Str);
4275 return TokError(Str);
4276 }
4277 createSysAlias(TLBIP.Encoding, Operands, S);
4278 }
4279
4280 Lex(); // Eat operand.
4281
4282 if (parseComma())
4283 return true;
4284
4285 if (Tok.isNot(AsmToken::Identifier))
4286 return TokError("expected register identifier");
4287 auto Result = tryParseSyspXzrPair(Operands);
4288 if (Result.isNoMatch())
4289 Result = tryParseGPRSeqPair(Operands);
4290 if (!Result.isSuccess())
4291 return TokError("specified " + Mnemonic +
4292 " op requires a pair of registers");
4293
4294 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4295 return true;
4296
4297 return false;
4298}
4299
4300ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4301 MCAsmParser &Parser = getParser();
4302 const AsmToken &Tok = getTok();
4303
4304 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4305 return TokError("'csync' operand expected");
4306 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4307 // Immediate operand.
4308 const MCExpr *ImmVal;
4309 SMLoc ExprLoc = getLoc();
4310 AsmToken IntTok = Tok;
4311 if (getParser().parseExpression(ImmVal))
4312 return ParseStatus::Failure;
4313 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4314 if (!MCE)
4315 return Error(ExprLoc, "immediate value expected for barrier operand");
4316 int64_t Value = MCE->getValue();
4317 if (Mnemonic == "dsb" && Value > 15) {
4318 // This case is a no match here, but it might be matched by the nXS
4319 // variant. Deliberately not unlex the optional '#' as it is not necessary
4320 // to characterize an integer immediate.
4321 Parser.getLexer().UnLex(IntTok);
4322 return ParseStatus::NoMatch;
4323 }
4324 if (Value < 0 || Value > 15)
4325 return Error(ExprLoc, "barrier operand out of range");
4326 auto DB = AArch64DB::lookupDBByEncoding(Value);
4327 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4328 ExprLoc, getContext(),
4329 false /*hasnXSModifier*/));
4330 return ParseStatus::Success;
4331 }
4332
4333 if (Tok.isNot(AsmToken::Identifier))
4334 return TokError("invalid operand for instruction");
4335
4336 StringRef Operand = Tok.getString();
4337 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4338 auto DB = AArch64DB::lookupDBByName(Operand);
4339 // The only valid named option for ISB is 'sy'
4340 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4341 return TokError("'sy' or #imm operand expected");
4342 // The only valid named option for TSB is 'csync'
4343 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4344 return TokError("'csync' operand expected");
4345 if (!DB && !TSB) {
4346 if (Mnemonic == "dsb") {
4347 // This case is a no match here, but it might be matched by the nXS
4348 // variant.
4349 return ParseStatus::NoMatch;
4350 }
4351 return TokError("invalid barrier option name");
4352 }
4353
4354 Operands.push_back(AArch64Operand::CreateBarrier(
4355 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4356 getContext(), false /*hasnXSModifier*/));
4357 Lex(); // Consume the option
4358
4359 return ParseStatus::Success;
4360}
4361
4362ParseStatus
4363AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4364 const AsmToken &Tok = getTok();
4365
4366 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4367 if (Mnemonic != "dsb")
4368 return ParseStatus::Failure;
4369
4370 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4371 // Immediate operand.
4372 const MCExpr *ImmVal;
4373 SMLoc ExprLoc = getLoc();
4374 if (getParser().parseExpression(ImmVal))
4375 return ParseStatus::Failure;
4376 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4377 if (!MCE)
4378 return Error(ExprLoc, "immediate value expected for barrier operand");
4379 int64_t Value = MCE->getValue();
4380 // v8.7-A DSB in the nXS variant accepts only the following immediate
4381 // values: 16, 20, 24, 28.
4382 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4383 return Error(ExprLoc, "barrier operand out of range");
4384 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4385 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4386 ExprLoc, getContext(),
4387 true /*hasnXSModifier*/));
4388 return ParseStatus::Success;
4389 }
4390
4391 if (Tok.isNot(AsmToken::Identifier))
4392 return TokError("invalid operand for instruction");
4393
4394 StringRef Operand = Tok.getString();
4395 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4396
4397 if (!DB)
4398 return TokError("invalid barrier option name");
4399
4400 Operands.push_back(
4401 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4402 getContext(), true /*hasnXSModifier*/));
4403 Lex(); // Consume the option
4404
4405 return ParseStatus::Success;
4406}
4407
4408ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4409 const AsmToken &Tok = getTok();
4410
4411 if (Tok.isNot(AsmToken::Identifier))
4412 return ParseStatus::NoMatch;
4413
4414 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4415 return ParseStatus::NoMatch;
4416
4417 int MRSReg, MSRReg;
4418 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4419 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4420 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4421 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4422 } else
4423 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4424
4425 unsigned PStateImm = -1;
4426 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4427 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4428 PStateImm = PState15->Encoding;
4429 if (!PState15) {
4430 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4431 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4432 PStateImm = PState1->Encoding;
4433 }
4434
4435 Operands.push_back(
4436 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4437 PStateImm, getContext()));
4438 Lex(); // Eat identifier
4439
4440 return ParseStatus::Success;
4441}
4442
4443ParseStatus
4444AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4445 SMLoc S = getLoc();
4446 const AsmToken &Tok = getTok();
4447 if (Tok.isNot(AsmToken::Identifier))
4448 return TokError("invalid operand for instruction");
4449
4451 if (!PH)
4452 return TokError("invalid operand for instruction");
4453
4454 Operands.push_back(AArch64Operand::CreatePHintInst(
4455 PH->Encoding, Tok.getString(), S, getContext()));
4456 Lex(); // Eat identifier token.
4457 return ParseStatus::Success;
4458}
4459
4460/// tryParseNeonVectorRegister - Parse a vector register operand.
4461bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4462 if (getTok().isNot(AsmToken::Identifier))
4463 return true;
4464
4465 SMLoc S = getLoc();
4466 // Check for a vector register specifier first.
4467 StringRef Kind;
4468 MCRegister Reg;
4469 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4470 if (!Res.isSuccess())
4471 return true;
4472
4473 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4474 if (!KindRes)
4475 return true;
4476
4477 unsigned ElementWidth = KindRes->second;
4478 Operands.push_back(
4479 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4480 S, getLoc(), getContext()));
4481
4482 // If there was an explicit qualifier, that goes on as a literal text
4483 // operand.
4484 if (!Kind.empty())
4485 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4486
4487 return tryParseVectorIndex(Operands).isFailure();
4488}
4489
4490ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4491 SMLoc SIdx = getLoc();
4492 if (parseOptionalToken(AsmToken::LBrac)) {
4493 const MCExpr *ImmVal;
4494 if (getParser().parseExpression(ImmVal))
4495 return ParseStatus::NoMatch;
4496 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4497 if (!MCE)
4498 return TokError("immediate value expected for vector index");
4499
4500 SMLoc E = getLoc();
4501
4502 if (parseToken(AsmToken::RBrac, "']' expected"))
4503 return ParseStatus::Failure;
4504
4505 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4506 E, getContext()));
4507 return ParseStatus::Success;
4508 }
4509
4510 return ParseStatus::NoMatch;
4511}
4512
4513// tryParseVectorRegister - Try to parse a vector register name with
4514// optional kind specifier. If it is a register specifier, eat the token
4515// and return it.
4516ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4517 StringRef &Kind,
4518 RegKind MatchKind) {
4519 const AsmToken &Tok = getTok();
4520
4521 if (Tok.isNot(AsmToken::Identifier))
4522 return ParseStatus::NoMatch;
4523
4524 StringRef Name = Tok.getString();
4525 // If there is a kind specifier, it's separated from the register name by
4526 // a '.'.
4527 size_t Start = 0, Next = Name.find('.');
4528 StringRef Head = Name.slice(Start, Next);
4529 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4530
4531 if (RegNum) {
4532 if (Next != StringRef::npos) {
4533 Kind = Name.substr(Next);
4534 if (!isValidVectorKind(Kind, MatchKind))
4535 return TokError("invalid vector kind qualifier");
4536 }
4537 Lex(); // Eat the register token.
4538
4539 Reg = RegNum;
4540 return ParseStatus::Success;
4541 }
4542
4543 return ParseStatus::NoMatch;
4544}
4545
4546ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4547 OperandVector &Operands) {
4548 ParseStatus Status =
4549 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4550 if (!Status.isSuccess())
4551 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4552 return Status;
4553}
4554
4555/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4556template <RegKind RK>
4557ParseStatus
4558AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4559 // Check for a SVE predicate register specifier first.
4560 const SMLoc S = getLoc();
4561 StringRef Kind;
4562 MCRegister RegNum;
4563 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4564 if (!Res.isSuccess())
4565 return Res;
4566
4567 const auto &KindRes = parseVectorKind(Kind, RK);
4568 if (!KindRes)
4569 return ParseStatus::NoMatch;
4570
4571 unsigned ElementWidth = KindRes->second;
4572 Operands.push_back(AArch64Operand::CreateVectorReg(
4573 RegNum, RK, ElementWidth, S,
4574 getLoc(), getContext()));
4575
4576 if (getLexer().is(AsmToken::LBrac)) {
4577 if (RK == RegKind::SVEPredicateAsCounter) {
4578 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4579 if (ResIndex.isSuccess())
4580 return ParseStatus::Success;
4581 } else {
4582 // Indexed predicate, there's no comma so try parse the next operand
4583 // immediately.
4584 if (parseOperand(Operands, false, false))
4585 return ParseStatus::NoMatch;
4586 }
4587 }
4588
4589 // Not all predicates are followed by a '/m' or '/z'.
4590 if (getTok().isNot(AsmToken::Slash))
4591 return ParseStatus::Success;
4592
4593 // But when they do they shouldn't have an element type suffix.
4594 if (!Kind.empty())
4595 return Error(S, "not expecting size suffix");
4596
4597 // Add a literal slash as operand
4598 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4599
4600 Lex(); // Eat the slash.
4601
4602 // Zeroing or merging?
4603 auto Pred = getTok().getString().lower();
4604 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4605 return Error(getLoc(), "expecting 'z' predication");
4606
4607 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4608 return Error(getLoc(), "expecting 'm' or 'z' predication");
4609
4610 // Add zero/merge token.
4611 const char *ZM = Pred == "z" ? "z" : "m";
4612 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4613
4614 Lex(); // Eat zero/merge token.
4615 return ParseStatus::Success;
4616}
4617
4618/// parseRegister - Parse a register operand.
4619bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4620 // Try for a Neon vector register.
4621 if (!tryParseNeonVectorRegister(Operands))
4622 return false;
4623
4624 if (tryParseZTOperand(Operands).isSuccess())
4625 return false;
4626
4627 // Otherwise try for a scalar register.
4628 if (tryParseGPROperand<false>(Operands).isSuccess())
4629 return false;
4630
4631 return true;
4632}
4633
4634bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4635 bool HasELFModifier = false;
4636 AArch64::Specifier RefKind;
4637 SMLoc Loc = getLexer().getLoc();
4638 if (parseOptionalToken(AsmToken::Colon)) {
4639 HasELFModifier = true;
4640
4641 if (getTok().isNot(AsmToken::Identifier))
4642 return TokError("expect relocation specifier in operand after ':'");
4643
4644 std::string LowerCase = getTok().getIdentifier().lower();
4645 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4646 .Case("lo12", AArch64::S_LO12)
4647 .Case("abs_g3", AArch64::S_ABS_G3)
4648 .Case("abs_g2", AArch64::S_ABS_G2)
4649 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4650 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4651 .Case("abs_g1", AArch64::S_ABS_G1)
4652 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4653 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4654 .Case("abs_g0", AArch64::S_ABS_G0)
4655 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4656 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4657 .Case("prel_g3", AArch64::S_PREL_G3)
4658 .Case("prel_g2", AArch64::S_PREL_G2)
4659 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4660 .Case("prel_g1", AArch64::S_PREL_G1)
4661 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4662 .Case("prel_g0", AArch64::S_PREL_G0)
4663 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4664 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4665 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4666 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4667 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4668 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4669 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4670 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4671 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4672 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4673 .Case("tprel_g2", AArch64::S_TPREL_G2)
4674 .Case("tprel_g1", AArch64::S_TPREL_G1)
4675 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4676 .Case("tprel_g0", AArch64::S_TPREL_G0)
4677 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4678 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4679 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4680 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4681 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4682 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4683 .Case("got", AArch64::S_GOT_PAGE)
4684 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4685 .Case("got_lo12", AArch64::S_GOT_LO12)
4686 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4687 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4688 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4689 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4690 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4691 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4692 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4693 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4694 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4695 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4696 .Default(AArch64::S_INVALID);
4697
4698 if (RefKind == AArch64::S_INVALID)
4699 return TokError("expect relocation specifier in operand after ':'");
4700
4701 Lex(); // Eat identifier
4702
4703 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4704 return true;
4705 }
4706
4707 if (getParser().parseExpression(ImmVal))
4708 return true;
4709
4710 if (HasELFModifier)
4711 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4712
4713 SMLoc EndLoc;
4714 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4715 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4716 return true;
4717 const MCExpr *Term;
4718 MCBinaryExpr::Opcode Opcode;
4719 if (parseOptionalToken(AsmToken::Plus))
4720 Opcode = MCBinaryExpr::Add;
4721 else if (parseOptionalToken(AsmToken::Minus))
4722 Opcode = MCBinaryExpr::Sub;
4723 else
4724 return false;
4725 if (getParser().parsePrimaryExpr(Term, EndLoc))
4726 return true;
4727 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4728 }
4729
4730 return false;
4731}
4732
4733ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4734 if (getTok().isNot(AsmToken::LCurly))
4735 return ParseStatus::NoMatch;
4736
4737 auto ParseMatrixTile = [this](unsigned &Reg,
4738 unsigned &ElementWidth) -> ParseStatus {
4739 StringRef Name = getTok().getString();
4740 size_t DotPosition = Name.find('.');
4741 if (DotPosition == StringRef::npos)
4742 return ParseStatus::NoMatch;
4743
4744 unsigned RegNum = matchMatrixTileListRegName(Name);
4745 if (!RegNum)
4746 return ParseStatus::NoMatch;
4747
4748 StringRef Tail = Name.drop_front(DotPosition);
4749 const std::optional<std::pair<int, int>> &KindRes =
4750 parseVectorKind(Tail, RegKind::Matrix);
4751 if (!KindRes)
4752 return TokError(
4753 "Expected the register to be followed by element width suffix");
4754 ElementWidth = KindRes->second;
4755 Reg = RegNum;
4756 Lex(); // Eat the register.
4757 return ParseStatus::Success;
4758 };
4759
4760 SMLoc S = getLoc();
4761 auto LCurly = getTok();
4762 Lex(); // Eat left bracket token.
4763
4764 // Empty matrix list
4765 if (parseOptionalToken(AsmToken::RCurly)) {
4766 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4767 /*RegMask=*/0, S, getLoc(), getContext()));
4768 return ParseStatus::Success;
4769 }
4770
4771 // Try parse {za} alias early
4772 if (getTok().getString().equals_insensitive("za")) {
4773 Lex(); // Eat 'za'
4774
4775 if (parseToken(AsmToken::RCurly, "'}' expected"))
4776 return ParseStatus::Failure;
4777
4778 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4779 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4780 return ParseStatus::Success;
4781 }
4782
4783 SMLoc TileLoc = getLoc();
4784
4785 unsigned FirstReg, ElementWidth;
4786 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4787 if (!ParseRes.isSuccess()) {
4788 getLexer().UnLex(LCurly);
4789 return ParseRes;
4790 }
4791
4792 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4793
4794 unsigned PrevReg = FirstReg;
4795
4796 SmallSet<unsigned, 8> DRegs;
4797 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4798
4799 SmallSet<unsigned, 8> SeenRegs;
4800 SeenRegs.insert(FirstReg);
4801
4802 while (parseOptionalToken(AsmToken::Comma)) {
4803 TileLoc = getLoc();
4804 unsigned Reg, NextElementWidth;
4805 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4806 if (!ParseRes.isSuccess())
4807 return ParseRes;
4808
4809 // Element size must match on all regs in the list.
4810 if (ElementWidth != NextElementWidth)
4811 return Error(TileLoc, "mismatched register size suffix");
4812
4813 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4814 Warning(TileLoc, "tile list not in ascending order");
4815
4816 if (SeenRegs.contains(Reg))
4817 Warning(TileLoc, "duplicate tile in list");
4818 else {
4819 SeenRegs.insert(Reg);
4820 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4821 }
4822
4823 PrevReg = Reg;
4824 }
4825
4826 if (parseToken(AsmToken::RCurly, "'}' expected"))
4827 return ParseStatus::Failure;
4828
4829 unsigned RegMask = 0;
4830 for (auto Reg : DRegs)
4831 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4832 RI->getEncodingValue(AArch64::ZAD0));
4833 Operands.push_back(
4834 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4835
4836 return ParseStatus::Success;
4837}
4838
4839template <RegKind VectorKind>
4840ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4841 bool ExpectMatch) {
4842 MCAsmParser &Parser = getParser();
4843 if (!getTok().is(AsmToken::LCurly))
4844 return ParseStatus::NoMatch;
4845
4846 // Wrapper around parse function
4847 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4848 bool NoMatchIsError) -> ParseStatus {
4849 auto RegTok = getTok();
4850 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4851 if (ParseRes.isSuccess()) {
4852 if (parseVectorKind(Kind, VectorKind))
4853 return ParseRes;
4854 llvm_unreachable("Expected a valid vector kind");
4855 }
4856
4857 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4858 RegTok.getString().equals_insensitive("zt0"))
4859 return ParseStatus::NoMatch;
4860
4861 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4862 (ParseRes.isNoMatch() && NoMatchIsError &&
4863 !RegTok.getString().starts_with_insensitive("za")))
4864 return Error(Loc, "vector register expected");
4865
4866 return ParseStatus::NoMatch;
4867 };
4868
4869 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4870 SMLoc S = getLoc();
4871 auto LCurly = getTok();
4872 Lex(); // Eat left bracket token.
4873
4874 StringRef Kind;
4875 MCRegister FirstReg;
4876 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4877
4878 // Put back the original left bracket if there was no match, so that
4879 // different types of list-operands can be matched (e.g. SVE, Neon).
4880 if (ParseRes.isNoMatch())
4881 Parser.getLexer().UnLex(LCurly);
4882
4883 if (!ParseRes.isSuccess())
4884 return ParseRes;
4885
4886 MCRegister PrevReg = FirstReg;
4887 unsigned Count = 1;
4888
4889 unsigned Stride = 1;
4890 if (parseOptionalToken(AsmToken::Minus)) {
4891 SMLoc Loc = getLoc();
4892 StringRef NextKind;
4893
4894 MCRegister Reg;
4895 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4896 if (!ParseRes.isSuccess())
4897 return ParseRes;
4898
4899 // Any Kind suffices must match on all regs in the list.
4900 if (Kind != NextKind)
4901 return Error(Loc, "mismatched register size suffix");
4902
4903 unsigned Space =
4904 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4905
4906 if (Space == 0 || Space > 3)
4907 return Error(Loc, "invalid number of vectors");
4908
4909 Count += Space;
4910 }
4911 else {
4912 bool HasCalculatedStride = false;
4913 while (parseOptionalToken(AsmToken::Comma)) {
4914 SMLoc Loc = getLoc();
4915 StringRef NextKind;
4916 MCRegister Reg;
4917 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4918 if (!ParseRes.isSuccess())
4919 return ParseRes;
4920
4921 // Any Kind suffices must match on all regs in the list.
4922 if (Kind != NextKind)
4923 return Error(Loc, "mismatched register size suffix");
4924
4925 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4926 unsigned PrevRegVal =
4927 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4928 if (!HasCalculatedStride) {
4929 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4930 : (NumRegs - (PrevRegVal - RegVal));
4931 HasCalculatedStride = true;
4932 }
4933
4934 // Register must be incremental (with a wraparound at last register).
4935 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4936 return Error(Loc, "registers must have the same sequential stride");
4937
4938 PrevReg = Reg;
4939 ++Count;
4940 }
4941 }
4942
4943 if (parseToken(AsmToken::RCurly, "'}' expected"))
4944 return ParseStatus::Failure;
4945
4946 if (Count > 4)
4947 return Error(S, "invalid number of vectors");
4948
4949 unsigned NumElements = 0;
4950 unsigned ElementWidth = 0;
4951 if (!Kind.empty()) {
4952 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4953 std::tie(NumElements, ElementWidth) = *VK;
4954 }
4955
4956 Operands.push_back(AArch64Operand::CreateVectorList(
4957 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4958 getLoc(), getContext()));
4959
4960 if (getTok().is(AsmToken::LBrac)) {
4961 ParseStatus Res = tryParseVectorIndex(Operands);
4962 if (Res.isFailure())
4963 return ParseStatus::Failure;
4964 return ParseStatus::Success;
4965 }
4966
4967 return ParseStatus::Success;
4968}
4969
4970/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4971bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4972 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4973 if (!ParseRes.isSuccess())
4974 return true;
4975
4976 return tryParseVectorIndex(Operands).isFailure();
4977}
4978
4979ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4980 SMLoc StartLoc = getLoc();
4981
4982 MCRegister RegNum;
4983 ParseStatus Res = tryParseScalarRegister(RegNum);
4984 if (!Res.isSuccess())
4985 return Res;
4986
4987 if (!parseOptionalToken(AsmToken::Comma)) {
4988 Operands.push_back(AArch64Operand::CreateReg(
4989 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4990 return ParseStatus::Success;
4991 }
4992
4993 parseOptionalToken(AsmToken::Hash);
4994
4995 if (getTok().isNot(AsmToken::Integer))
4996 return Error(getLoc(), "index must be absent or #0");
4997
4998 const MCExpr *ImmVal;
4999 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
5000 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
5001 return Error(getLoc(), "index must be absent or #0");
5002
5003 Operands.push_back(AArch64Operand::CreateReg(
5004 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
5005 return ParseStatus::Success;
5006}
5007
5008ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
5009 SMLoc StartLoc = getLoc();
5010 const AsmToken &Tok = getTok();
5011 std::string Name = Tok.getString().lower();
5012
5013 MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5014
5015 if (!Reg)
5016 return ParseStatus::NoMatch;
5017
5018 Operands.push_back(AArch64Operand::CreateReg(
5019 Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
5020 Lex(); // Eat register.
5021
5022 // Check if register is followed by an index
5023 if (parseOptionalToken(AsmToken::LBrac)) {
5024 Operands.push_back(
5025 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5026 const MCExpr *ImmVal;
5027 if (getParser().parseExpression(ImmVal))
5028 return ParseStatus::NoMatch;
5029 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5030 if (!MCE)
5031 return TokError("immediate value expected for vector index");
5032 Operands.push_back(AArch64Operand::CreateImm(
5033 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
5034 getLoc(), getContext()));
5035 if (parseOptionalToken(AsmToken::Comma))
5036 if (parseOptionalMulOperand(Operands))
5037 return ParseStatus::Failure;
5038 if (parseToken(AsmToken::RBrac, "']' expected"))
5039 return ParseStatus::Failure;
5040 Operands.push_back(
5041 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5042 }
5043 return ParseStatus::Success;
5044}
5045
5046template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
5047ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
5048 SMLoc StartLoc = getLoc();
5049
5050 MCRegister RegNum;
5051 ParseStatus Res = tryParseScalarRegister(RegNum);
5052 if (!Res.isSuccess())
5053 return Res;
5054
5055 // No shift/extend is the default.
5056 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
5057 Operands.push_back(AArch64Operand::CreateReg(
5058 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
5059 return ParseStatus::Success;
5060 }
5061
5062 // Eat the comma
5063 Lex();
5064
5065 // Match the shift
5067 Res = tryParseOptionalShiftExtend(ExtOpnd);
5068 if (!Res.isSuccess())
5069 return Res;
5070
5071 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
5072 Operands.push_back(AArch64Operand::CreateReg(
5073 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
5074 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5075 Ext->hasShiftExtendAmount()));
5076
5077 return ParseStatus::Success;
5078}
5079
5080bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
5081 MCAsmParser &Parser = getParser();
5082
5083 // Some SVE instructions have a decoration after the immediate, i.e.
5084 // "mul vl". We parse them here and add tokens, which must be present in the
5085 // asm string in the tablegen instruction.
5086 bool NextIsVL =
5087 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
5088 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
5089 if (!getTok().getString().equals_insensitive("mul") ||
5090 !(NextIsVL || NextIsHash))
5091 return true;
5092
5093 Operands.push_back(
5094 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
5095 Lex(); // Eat the "mul"
5096
5097 if (NextIsVL) {
5098 Operands.push_back(
5099 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
5100 Lex(); // Eat the "vl"
5101 return false;
5102 }
5103
5104 if (NextIsHash) {
5105 Lex(); // Eat the #
5106 SMLoc S = getLoc();
5107
5108 // Parse immediate operand.
5109 const MCExpr *ImmVal;
5110 if (!Parser.parseExpression(ImmVal))
5111 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
5112 Operands.push_back(AArch64Operand::CreateImm(
5113 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
5114 getContext()));
5115 return false;
5116 }
5117 }
5118
5119 return Error(getLoc(), "expected 'vl' or '#<imm>'");
5120}
5121
5122bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
5123 StringRef &VecGroup) {
5124 MCAsmParser &Parser = getParser();
5125 auto Tok = Parser.getTok();
5126 if (Tok.isNot(AsmToken::Identifier))
5127 return true;
5128
5129 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
5130 .Case("vgx2", "vgx2")
5131 .Case("vgx4", "vgx4")
5132 .Default("");
5133
5134 if (VG.empty())
5135 return true;
5136
5137 VecGroup = VG;
5138 Parser.Lex(); // Eat vgx[2|4]
5139 return false;
5140}
5141
5142bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
5143 auto Tok = getTok();
5144 if (Tok.isNot(AsmToken::Identifier))
5145 return true;
5146
5147 auto Keyword = Tok.getString();
5148 Keyword = StringSwitch<StringRef>(Keyword.lower())
5149 .Case("sm", "sm")
5150 .Case("za", "za")
5151 .Default(Keyword);
5152 Operands.push_back(
5153 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
5154
5155 Lex();
5156 return false;
5157}
5158
5159/// parseOperand - Parse a arm instruction operand. For now this parses the
5160/// operand regardless of the mnemonic.
5161bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
5162 bool invertCondCode) {
5163 MCAsmParser &Parser = getParser();
5164
5165 ParseStatus ResTy =
5166 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
5167
5168 // Check if the current operand has a custom associated parser, if so, try to
5169 // custom parse the operand, or fallback to the general approach.
5170 if (ResTy.isSuccess())
5171 return false;
5172 // If there wasn't a custom match, try the generic matcher below. Otherwise,
5173 // there was a match, but an error occurred, in which case, just return that
5174 // the operand parsing failed.
5175 if (ResTy.isFailure())
5176 return true;
5177
5178 // Nothing custom, so do general case parsing.
5179 SMLoc S, E;
5180 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5181 if (parseOptionalToken(AsmToken::Comma)) {
5182 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5183 if (!Res.isNoMatch())
5184 return Res.isFailure();
5185 getLexer().UnLex(SavedTok);
5186 }
5187 return false;
5188 };
5189 switch (getLexer().getKind()) {
5190 default: {
5191 SMLoc S = getLoc();
5192 const MCExpr *Expr;
5193 if (parseSymbolicImmVal(Expr))
5194 return Error(S, "invalid operand");
5195
5196 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5197 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
5198 return parseOptionalShiftExtend(getTok());
5199 }
5200 case AsmToken::LBrac: {
5201 Operands.push_back(
5202 AArch64Operand::CreateToken("[", getLoc(), getContext()));
5203 Lex(); // Eat '['
5204
5205 // There's no comma after a '[', so we can parse the next operand
5206 // immediately.
5207 return parseOperand(Operands, false, false);
5208 }
5209 case AsmToken::LCurly: {
5210 if (!parseNeonVectorList(Operands))
5211 return false;
5212
5213 Operands.push_back(
5214 AArch64Operand::CreateToken("{", getLoc(), getContext()));
5215 Lex(); // Eat '{'
5216
5217 // There's no comma after a '{', so we can parse the next operand
5218 // immediately.
5219 return parseOperand(Operands, false, false);
5220 }
5221 case AsmToken::Identifier: {
5222 // See if this is a "VG" decoration used by SME instructions.
5223 StringRef VecGroup;
5224 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5225 Operands.push_back(
5226 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
5227 return false;
5228 }
5229 // If we're expecting a Condition Code operand, then just parse that.
5230 if (isCondCode)
5231 return parseCondCode(Operands, invertCondCode);
5232
5233 // If it's a register name, parse it.
5234 if (!parseRegister(Operands)) {
5235 // Parse an optional shift/extend modifier.
5236 AsmToken SavedTok = getTok();
5237 if (parseOptionalToken(AsmToken::Comma)) {
5238 // The operand after the register may be a label (e.g. ADR/ADRP). Check
5239 // such cases and don't report an error when <label> happens to match a
5240 // shift/extend modifier.
5241 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5242 /*ParseForAllFeatures=*/true);
5243 if (!Res.isNoMatch())
5244 return Res.isFailure();
5245 Res = tryParseOptionalShiftExtend(Operands);
5246 if (!Res.isNoMatch())
5247 return Res.isFailure();
5248 getLexer().UnLex(SavedTok);
5249 }
5250 return false;
5251 }
5252
5253 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5254 // by SVE instructions.
5255 if (!parseOptionalMulOperand(Operands))
5256 return false;
5257
5258 // If this is a two-word mnemonic, parse its special keyword
5259 // operand as an identifier.
5260 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5261 Mnemonic == "gcsb")
5262 return parseKeywordOperand(Operands);
5263
5264 // This was not a register so parse other operands that start with an
5265 // identifier (like labels) as expressions and create them as immediates.
5266 const MCExpr *IdVal, *Term;
5267 S = getLoc();
5268 if (getParser().parseExpression(IdVal))
5269 return true;
5270 if (getParser().parseAtSpecifier(IdVal, E))
5271 return true;
5272 std::optional<MCBinaryExpr::Opcode> Opcode;
5273 if (parseOptionalToken(AsmToken::Plus))
5274 Opcode = MCBinaryExpr::Add;
5275 else if (parseOptionalToken(AsmToken::Minus))
5276 Opcode = MCBinaryExpr::Sub;
5277 if (Opcode) {
5278 if (getParser().parsePrimaryExpr(Term, E))
5279 return true;
5280 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5281 }
5282 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5283
5284 // Parse an optional shift/extend modifier.
5285 return parseOptionalShiftExtend(getTok());
5286 }
5287 case AsmToken::Integer:
5288 case AsmToken::Real:
5289 case AsmToken::Hash: {
5290 // #42 -> immediate.
5291 S = getLoc();
5292
5293 parseOptionalToken(AsmToken::Hash);
5294
5295 // Parse a negative sign
5296 bool isNegative = false;
5297 if (getTok().is(AsmToken::Minus)) {
5298 isNegative = true;
5299 // We need to consume this token only when we have a Real, otherwise
5300 // we let parseSymbolicImmVal take care of it
5301 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5302 Lex();
5303 }
5304
5305 // The only Real that should come through here is a literal #0.0 for
5306 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5307 // so convert the value.
5308 const AsmToken &Tok = getTok();
5309 if (Tok.is(AsmToken::Real)) {
5310 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5311 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5312 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5313 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5314 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5315 return TokError("unexpected floating point literal");
5316 else if (IntVal != 0 || isNegative)
5317 return TokError("expected floating-point constant #0.0");
5318 Lex(); // Eat the token.
5319
5320 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5321 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5322 return false;
5323 }
5324
5325 const MCExpr *ImmVal;
5326 if (parseSymbolicImmVal(ImmVal))
5327 return true;
5328
5329 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5330 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5331
5332 // Parse an optional shift/extend modifier.
5333 return parseOptionalShiftExtend(Tok);
5334 }
5335 case AsmToken::Equal: {
5336 SMLoc Loc = getLoc();
5337 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5338 return TokError("unexpected token in operand");
5339 Lex(); // Eat '='
5340 const MCExpr *SubExprVal;
5341 if (getParser().parseExpression(SubExprVal))
5342 return true;
5343
5344 if (Operands.size() < 2 ||
5345 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5346 return Error(Loc, "Only valid when first operand is register");
5347
5348 bool IsXReg =
5349 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5350 Operands[1]->getReg());
5351
5352 MCContext& Ctx = getContext();
5353 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5354 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5355 if (isa<MCConstantExpr>(SubExprVal)) {
5356 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5357 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5358 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5359 ShiftAmt += 16;
5360 Imm >>= 16;
5361 }
5362 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5363 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5364 Operands.push_back(AArch64Operand::CreateImm(
5365 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5366 if (ShiftAmt)
5367 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5368 ShiftAmt, true, S, E, Ctx));
5369 return false;
5370 }
5371 APInt Simm = APInt(64, Imm << ShiftAmt);
5372 // check if the immediate is an unsigned or signed 32-bit int for W regs
5373 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5374 return Error(Loc, "Immediate too large for register");
5375 }
5376 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5377 const MCExpr *CPLoc =
5378 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5379 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5380 return false;
5381 }
5382 }
5383}
5384
5385bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5386 const MCExpr *Expr = nullptr;
5387 SMLoc L = getLoc();
5388 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5389 return true;
5390 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5391 if (check(!Value, L, "expected constant expression"))
5392 return true;
5393 Out = Value->getValue();
5394 return false;
5395}
5396
5397bool AArch64AsmParser::parseComma() {
5398 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5399 return true;
5400 // Eat the comma
5401 Lex();
5402 return false;
5403}
5404
5405bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5406 unsigned First, unsigned Last) {
5407 MCRegister Reg;
5408 SMLoc Start, End;
5409 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5410 return true;
5411
5412 // Special handling for FP and LR; they aren't linearly after x28 in
5413 // the registers enum.
5414 unsigned RangeEnd = Last;
5415 if (Base == AArch64::X0) {
5416 if (Last == AArch64::FP) {
5417 RangeEnd = AArch64::X28;
5418 if (Reg == AArch64::FP) {
5419 Out = 29;
5420 return false;
5421 }
5422 }
5423 if (Last == AArch64::LR) {
5424 RangeEnd = AArch64::X28;
5425 if (Reg == AArch64::FP) {
5426 Out = 29;
5427 return false;
5428 } else if (Reg == AArch64::LR) {
5429 Out = 30;
5430 return false;
5431 }
5432 }
5433 }
5434
5435 if (check(Reg < First || Reg > RangeEnd, Start,
5436 Twine("expected register in range ") +
5439 return true;
5440 Out = Reg - Base;
5441 return false;
5442}
5443
5444bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5445 const MCParsedAsmOperand &Op2) const {
5446 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5447 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5448
5449 if (AOp1.isVectorList() && AOp2.isVectorList())
5450 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5451 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5452 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5453
5454 if (!AOp1.isReg() || !AOp2.isReg())
5455 return false;
5456
5457 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5458 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5459 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5460
5461 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5462 "Testing equality of non-scalar registers not supported");
5463
5464 // Check if a registers match their sub/super register classes.
5465 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5466 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5467 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5468 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5469 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5470 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5471 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5472 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5473
5474 return false;
5475}
5476
5477/// Parse an AArch64 instruction mnemonic followed by its operands.
5478bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5479 StringRef Name, SMLoc NameLoc,
5480 OperandVector &Operands) {
5481 Name = StringSwitch<StringRef>(Name.lower())
5482 .Case("beq", "b.eq")
5483 .Case("bne", "b.ne")
5484 .Case("bhs", "b.hs")
5485 .Case("bcs", "b.cs")
5486 .Case("blo", "b.lo")
5487 .Case("bcc", "b.cc")
5488 .Case("bmi", "b.mi")
5489 .Case("bpl", "b.pl")
5490 .Case("bvs", "b.vs")
5491 .Case("bvc", "b.vc")
5492 .Case("bhi", "b.hi")
5493 .Case("bls", "b.ls")
5494 .Case("bge", "b.ge")
5495 .Case("blt", "b.lt")
5496 .Case("bgt", "b.gt")
5497 .Case("ble", "b.le")
5498 .Case("bal", "b.al")
5499 .Case("bnv", "b.nv")
5500 .Default(Name);
5501
5502 // First check for the AArch64-specific .req directive.
5503 if (getTok().is(AsmToken::Identifier) &&
5504 getTok().getIdentifier().lower() == ".req") {
5505 parseDirectiveReq(Name, NameLoc);
5506 // We always return 'error' for this, as we're done with this
5507 // statement and don't need to match the 'instruction."
5508 return true;
5509 }
5510
5511 // Create the leading tokens for the mnemonic, split by '.' characters.
5512 size_t Start = 0, Next = Name.find('.');
5513 StringRef Head = Name.slice(Start, Next);
5514
5515 // IC, DC, AT, TLBI, MLBI, PLBI, GIC{R}, GSB and Prediction invalidation
5516 // instructions are aliases for the SYS instruction.
5517 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5518 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp" ||
5519 Head == "mlbi" || Head == "plbi" || Head == "gic" || Head == "gsb")
5520 return parseSysAlias(Head, NameLoc, Operands);
5521
5522 // GICR instructions are aliases for the SYSL instruction.
5523 if (Head == "gicr")
5524 return parseSyslAlias(Head, NameLoc, Operands);
5525
5526 // TLBIP instructions are aliases for the SYSP instruction.
5527 if (Head == "tlbip")
5528 return parseSyspAlias(Head, NameLoc, Operands);
5529
5530 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5531 Mnemonic = Head;
5532
5533 // Handle condition codes for a branch mnemonic
5534 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5535 Start = Next;
5536 Next = Name.find('.', Start + 1);
5537 Head = Name.slice(Start + 1, Next);
5538
5539 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5540 (Head.data() - Name.data()));
5541 std::string Suggestion;
5542 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5543 if (CC == AArch64CC::Invalid) {
5544 std::string Msg = "invalid condition code";
5545 if (!Suggestion.empty())
5546 Msg += ", did you mean " + Suggestion + "?";
5547 return Error(SuffixLoc, Msg);
5548 }
5549 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5550 /*IsSuffix=*/true));
5551 Operands.push_back(
5552 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5553 }
5554
5555 // Add the remaining tokens in the mnemonic.
5556 while (Next != StringRef::npos) {
5557 Start = Next;
5558 Next = Name.find('.', Start + 1);
5559 Head = Name.slice(Start, Next);
5560 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5561 (Head.data() - Name.data()) + 1);
5562 Operands.push_back(AArch64Operand::CreateToken(
5563 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5564 }
5565
5566 // Conditional compare instructions have a Condition Code operand, which needs
5567 // to be parsed and an immediate operand created.
5568 bool condCodeFourthOperand =
5569 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5570 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5571 Head == "csinc" || Head == "csinv" || Head == "csneg");
5572
5573 // These instructions are aliases to some of the conditional select
5574 // instructions. However, the condition code is inverted in the aliased
5575 // instruction.
5576 //
5577 // FIXME: Is this the correct way to handle these? Or should the parser
5578 // generate the aliased instructions directly?
5579 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5580 bool condCodeThirdOperand =
5581 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5582
5583 // Read the remaining operands.
5584 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5585
5586 unsigned N = 1;
5587 do {
5588 // Parse and remember the operand.
5589 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5590 (N == 3 && condCodeThirdOperand) ||
5591 (N == 2 && condCodeSecondOperand),
5592 condCodeSecondOperand || condCodeThirdOperand)) {
5593 return true;
5594 }
5595
5596 // After successfully parsing some operands there are three special cases
5597 // to consider (i.e. notional operands not separated by commas). Two are
5598 // due to memory specifiers:
5599 // + An RBrac will end an address for load/store/prefetch
5600 // + An '!' will indicate a pre-indexed operation.
5601 //
5602 // And a further case is '}', which ends a group of tokens specifying the
5603 // SME accumulator array 'ZA' or tile vector, i.e.
5604 //
5605 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5606 //
5607 // It's someone else's responsibility to make sure these tokens are sane
5608 // in the given context!
5609
5610 if (parseOptionalToken(AsmToken::RBrac))
5611 Operands.push_back(
5612 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5613 if (parseOptionalToken(AsmToken::Exclaim))
5614 Operands.push_back(
5615 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5616 if (parseOptionalToken(AsmToken::RCurly))
5617 Operands.push_back(
5618 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5619
5620 ++N;
5621 } while (parseOptionalToken(AsmToken::Comma));
5622 }
5623
5624 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5625 return true;
5626
5627 return false;
5628}
5629
5630static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5631 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5632 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5633 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5634 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5635 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5636 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5637 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5638}
5639
5640// FIXME: This entire function is a giant hack to provide us with decent
5641// operand range validation/diagnostics until TableGen/MC can be extended
5642// to support autogeneration of this kind of validation.
5643bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5644 SmallVectorImpl<SMLoc> &Loc) {
5645 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5646 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5647
5648 // A prefix only applies to the instruction following it. Here we extract
5649 // prefix information for the next instruction before validating the current
5650 // one so that in the case of failure we don't erroneously continue using the
5651 // current prefix.
5652 PrefixInfo Prefix = NextPrefix;
5653 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5654
5655 // Before validating the instruction in isolation we run through the rules
5656 // applicable when it follows a prefix instruction.
5657 // NOTE: brk & hlt can be prefixed but require no additional validation.
5658 if (Prefix.isActive() &&
5659 (Inst.getOpcode() != AArch64::BRK) &&
5660 (Inst.getOpcode() != AArch64::HLT)) {
5661
5662 // Prefixed instructions must have a destructive operand.
5665 return Error(IDLoc, "instruction is unpredictable when following a"
5666 " movprfx, suggest replacing movprfx with mov");
5667
5668 // Destination operands must match.
5669 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5670 return Error(Loc[0], "instruction is unpredictable when following a"
5671 " movprfx writing to a different destination");
5672
5673 // Destination operand must not be used in any other location.
5674 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5675 if (Inst.getOperand(i).isReg() &&
5676 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5677 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5678 return Error(Loc[0], "instruction is unpredictable when following a"
5679 " movprfx and destination also used as non-destructive"
5680 " source");
5681 }
5682
5683 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5684 if (Prefix.isPredicated()) {
5685 int PgIdx = -1;
5686
5687 // Find the instructions general predicate.
5688 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5689 if (Inst.getOperand(i).isReg() &&
5690 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5691 PgIdx = i;
5692 break;
5693 }
5694
5695 // Instruction must be predicated if the movprfx is predicated.
5696 if (PgIdx == -1 ||
5698 return Error(IDLoc, "instruction is unpredictable when following a"
5699 " predicated movprfx, suggest using unpredicated movprfx");
5700
5701 // Instruction must use same general predicate as the movprfx.
5702 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5703 return Error(IDLoc, "instruction is unpredictable when following a"
5704 " predicated movprfx using a different general predicate");
5705
5706 // Instruction element type must match the movprfx.
5707 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5708 return Error(IDLoc, "instruction is unpredictable when following a"
5709 " predicated movprfx with a different element size");
5710 }
5711 }
5712
5713 // On ARM64EC, only valid registers may be used. Warn against using
5714 // explicitly disallowed registers.
5715 if (IsWindowsArm64EC) {
5716 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5717 if (Inst.getOperand(i).isReg()) {
5718 MCRegister Reg = Inst.getOperand(i).getReg();
5719 // At this point, vector registers are matched to their
5720 // appropriately sized alias.
5721 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5722 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5723 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5724 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5725 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5726 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5727 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5728 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5729 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5730 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5731 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5732 " is disallowed on ARM64EC.");
5733 }
5734 }
5735 }
5736 }
5737
5738 // Check for indexed addressing modes w/ the base register being the
5739 // same as a destination/source register or pair load where
5740 // the Rt == Rt2. All of those are undefined behaviour.
5741 switch (Inst.getOpcode()) {
5742 case AArch64::LDPSWpre:
5743 case AArch64::LDPWpost:
5744 case AArch64::LDPWpre:
5745 case AArch64::LDPXpost:
5746 case AArch64::LDPXpre: {
5747 MCRegister Rt = Inst.getOperand(1).getReg();
5748 MCRegister Rt2 = Inst.getOperand(2).getReg();
5749 MCRegister Rn = Inst.getOperand(3).getReg();
5750 if (RI->isSubRegisterEq(Rn, Rt))
5751 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5752 "is also a destination");
5753 if (RI->isSubRegisterEq(Rn, Rt2))
5754 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5755 "is also a destination");
5756 [[fallthrough]];
5757 }
5758 case AArch64::LDR_ZA:
5759 case AArch64::STR_ZA: {
5760 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5761 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5762 return Error(Loc[1],
5763 "unpredictable instruction, immediate and offset mismatch.");
5764 break;
5765 }
5766 case AArch64::LDPDi:
5767 case AArch64::LDPQi:
5768 case AArch64::LDPSi:
5769 case AArch64::LDPSWi:
5770 case AArch64::LDPWi:
5771 case AArch64::LDPXi: {
5772 MCRegister Rt = Inst.getOperand(0).getReg();
5773 MCRegister Rt2 = Inst.getOperand(1).getReg();
5774 if (Rt == Rt2)
5775 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5776 break;
5777 }
5778 case AArch64::LDPDpost:
5779 case AArch64::LDPDpre:
5780 case AArch64::LDPQpost:
5781 case AArch64::LDPQpre:
5782 case AArch64::LDPSpost:
5783 case AArch64::LDPSpre:
5784 case AArch64::LDPSWpost: {
5785 MCRegister Rt = Inst.getOperand(1).getReg();
5786 MCRegister Rt2 = Inst.getOperand(2).getReg();
5787 if (Rt == Rt2)
5788 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5789 break;
5790 }
5791 case AArch64::STPDpost:
5792 case AArch64::STPDpre:
5793 case AArch64::STPQpost:
5794 case AArch64::STPQpre:
5795 case AArch64::STPSpost:
5796 case AArch64::STPSpre:
5797 case AArch64::STPWpost:
5798 case AArch64::STPWpre:
5799 case AArch64::STPXpost:
5800 case AArch64::STPXpre: {
5801 MCRegister Rt = Inst.getOperand(1).getReg();
5802 MCRegister Rt2 = Inst.getOperand(2).getReg();
5803 MCRegister Rn = Inst.getOperand(3).getReg();
5804 if (RI->isSubRegisterEq(Rn, Rt))
5805 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5806 "is also a source");
5807 if (RI->isSubRegisterEq(Rn, Rt2))
5808 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5809 "is also a source");
5810 break;
5811 }
5812 case AArch64::LDRBBpre:
5813 case AArch64::LDRBpre:
5814 case AArch64::LDRHHpre:
5815 case AArch64::LDRHpre:
5816 case AArch64::LDRSBWpre:
5817 case AArch64::LDRSBXpre:
5818 case AArch64::LDRSHWpre:
5819 case AArch64::LDRSHXpre:
5820 case AArch64::LDRSWpre:
5821 case AArch64::LDRWpre:
5822 case AArch64::LDRXpre:
5823 case AArch64::LDRBBpost:
5824 case AArch64::LDRBpost:
5825 case AArch64::LDRHHpost:
5826 case AArch64::LDRHpost:
5827 case AArch64::LDRSBWpost:
5828 case AArch64::LDRSBXpost:
5829 case AArch64::LDRSHWpost:
5830 case AArch64::LDRSHXpost:
5831 case AArch64::LDRSWpost:
5832 case AArch64::LDRWpost:
5833 case AArch64::LDRXpost: {
5834 MCRegister Rt = Inst.getOperand(1).getReg();
5835 MCRegister Rn = Inst.getOperand(2).getReg();
5836 if (RI->isSubRegisterEq(Rn, Rt))
5837 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5838 "is also a source");
5839 break;
5840 }
5841 case AArch64::STRBBpost:
5842 case AArch64::STRBpost:
5843 case AArch64::STRHHpost:
5844 case AArch64::STRHpost:
5845 case AArch64::STRWpost:
5846 case AArch64::STRXpost:
5847 case AArch64::STRBBpre:
5848 case AArch64::STRBpre:
5849 case AArch64::STRHHpre:
5850 case AArch64::STRHpre:
5851 case AArch64::STRWpre:
5852 case AArch64::STRXpre: {
5853 MCRegister Rt = Inst.getOperand(1).getReg();
5854 MCRegister Rn = Inst.getOperand(2).getReg();
5855 if (RI->isSubRegisterEq(Rn, Rt))
5856 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5857 "is also a source");
5858 break;
5859 }
5860 case AArch64::STXRB:
5861 case AArch64::STXRH:
5862 case AArch64::STXRW:
5863 case AArch64::STXRX:
5864 case AArch64::STLXRB:
5865 case AArch64::STLXRH:
5866 case AArch64::STLXRW:
5867 case AArch64::STLXRX: {
5868 MCRegister Rs = Inst.getOperand(0).getReg();
5869 MCRegister Rt = Inst.getOperand(1).getReg();
5870 MCRegister Rn = Inst.getOperand(2).getReg();
5871 if (RI->isSubRegisterEq(Rt, Rs) ||
5872 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5873 return Error(Loc[0],
5874 "unpredictable STXR instruction, status is also a source");
5875 break;
5876 }
5877 case AArch64::STXPW:
5878 case AArch64::STXPX:
5879 case AArch64::STLXPW:
5880 case AArch64::STLXPX: {
5881 MCRegister Rs = Inst.getOperand(0).getReg();
5882 MCRegister Rt1 = Inst.getOperand(1).getReg();
5883 MCRegister Rt2 = Inst.getOperand(2).getReg();
5884 MCRegister Rn = Inst.getOperand(3).getReg();
5885 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5886 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5887 return Error(Loc[0],
5888 "unpredictable STXP instruction, status is also a source");
5889 break;
5890 }
5891 case AArch64::LDRABwriteback:
5892 case AArch64::LDRAAwriteback: {
5893 MCRegister Xt = Inst.getOperand(0).getReg();
5894 MCRegister Xn = Inst.getOperand(1).getReg();
5895 if (Xt == Xn)
5896 return Error(Loc[0],
5897 "unpredictable LDRA instruction, writeback base"
5898 " is also a destination");
5899 break;
5900 }
5901 }
5902
5903 // Check v8.8-A memops instructions.
5904 switch (Inst.getOpcode()) {
5905 case AArch64::CPYFP:
5906 case AArch64::CPYFPWN:
5907 case AArch64::CPYFPRN:
5908 case AArch64::CPYFPN:
5909 case AArch64::CPYFPWT:
5910 case AArch64::CPYFPWTWN:
5911 case AArch64::CPYFPWTRN:
5912 case AArch64::CPYFPWTN:
5913 case AArch64::CPYFPRT:
5914 case AArch64::CPYFPRTWN:
5915 case AArch64::CPYFPRTRN:
5916 case AArch64::CPYFPRTN:
5917 case AArch64::CPYFPT:
5918 case AArch64::CPYFPTWN:
5919 case AArch64::CPYFPTRN:
5920 case AArch64::CPYFPTN:
5921 case AArch64::CPYFM:
5922 case AArch64::CPYFMWN:
5923 case AArch64::CPYFMRN:
5924 case AArch64::CPYFMN:
5925 case AArch64::CPYFMWT:
5926 case AArch64::CPYFMWTWN:
5927 case AArch64::CPYFMWTRN:
5928 case AArch64::CPYFMWTN:
5929 case AArch64::CPYFMRT:
5930 case AArch64::CPYFMRTWN:
5931 case AArch64::CPYFMRTRN:
5932 case AArch64::CPYFMRTN:
5933 case AArch64::CPYFMT:
5934 case AArch64::CPYFMTWN:
5935 case AArch64::CPYFMTRN:
5936 case AArch64::CPYFMTN:
5937 case AArch64::CPYFE:
5938 case AArch64::CPYFEWN:
5939 case AArch64::CPYFERN:
5940 case AArch64::CPYFEN:
5941 case AArch64::CPYFEWT:
5942 case AArch64::CPYFEWTWN:
5943 case AArch64::CPYFEWTRN:
5944 case AArch64::CPYFEWTN:
5945 case AArch64::CPYFERT:
5946 case AArch64::CPYFERTWN:
5947 case AArch64::CPYFERTRN:
5948 case AArch64::CPYFERTN:
5949 case AArch64::CPYFET:
5950 case AArch64::CPYFETWN:
5951 case AArch64::CPYFETRN:
5952 case AArch64::CPYFETN:
5953 case AArch64::CPYP:
5954 case AArch64::CPYPWN:
5955 case AArch64::CPYPRN:
5956 case AArch64::CPYPN:
5957 case AArch64::CPYPWT:
5958 case AArch64::CPYPWTWN:
5959 case AArch64::CPYPWTRN:
5960 case AArch64::CPYPWTN:
5961 case AArch64::CPYPRT:
5962 case AArch64::CPYPRTWN:
5963 case AArch64::CPYPRTRN:
5964 case AArch64::CPYPRTN:
5965 case AArch64::CPYPT:
5966 case AArch64::CPYPTWN:
5967 case AArch64::CPYPTRN:
5968 case AArch64::CPYPTN:
5969 case AArch64::CPYM:
5970 case AArch64::CPYMWN:
5971 case AArch64::CPYMRN:
5972 case AArch64::CPYMN:
5973 case AArch64::CPYMWT:
5974 case AArch64::CPYMWTWN:
5975 case AArch64::CPYMWTRN:
5976 case AArch64::CPYMWTN:
5977 case AArch64::CPYMRT:
5978 case AArch64::CPYMRTWN:
5979 case AArch64::CPYMRTRN:
5980 case AArch64::CPYMRTN:
5981 case AArch64::CPYMT:
5982 case AArch64::CPYMTWN:
5983 case AArch64::CPYMTRN:
5984 case AArch64::CPYMTN:
5985 case AArch64::CPYE:
5986 case AArch64::CPYEWN:
5987 case AArch64::CPYERN:
5988 case AArch64::CPYEN:
5989 case AArch64::CPYEWT:
5990 case AArch64::CPYEWTWN:
5991 case AArch64::CPYEWTRN:
5992 case AArch64::CPYEWTN:
5993 case AArch64::CPYERT:
5994 case AArch64::CPYERTWN:
5995 case AArch64::CPYERTRN:
5996 case AArch64::CPYERTN:
5997 case AArch64::CPYET:
5998 case AArch64::CPYETWN:
5999 case AArch64::CPYETRN:
6000 case AArch64::CPYETN: {
6001 // Xd_wb == op0, Xs_wb == op1, Xn_wb == op2
6002 MCRegister Xd = Inst.getOperand(3).getReg();
6003 MCRegister Xs = Inst.getOperand(4).getReg();
6004 MCRegister Xn = Inst.getOperand(5).getReg();
6005
6006 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6007 assert(Xs == Inst.getOperand(1).getReg() && "Xs_wb and Xs do not match");
6008 assert(Xn == Inst.getOperand(2).getReg() && "Xn_wb and Xn do not match");
6009
6010 if (Xd == Xs)
6011 return Error(Loc[0], "invalid CPY instruction, destination and source"
6012 " registers are the same");
6013 if (Xd == Xn)
6014 return Error(Loc[0], "invalid CPY instruction, destination and size"
6015 " registers are the same");
6016 if (Xs == Xn)
6017 return Error(Loc[0], "invalid CPY instruction, source and size"
6018 " registers are the same");
6019 break;
6020 }
6021 case AArch64::SETP:
6022 case AArch64::SETPT:
6023 case AArch64::SETPN:
6024 case AArch64::SETPTN:
6025 case AArch64::SETM:
6026 case AArch64::SETMT:
6027 case AArch64::SETMN:
6028 case AArch64::SETMTN:
6029 case AArch64::SETE:
6030 case AArch64::SETET:
6031 case AArch64::SETEN:
6032 case AArch64::SETETN:
6033 case AArch64::SETGP:
6034 case AArch64::SETGPT:
6035 case AArch64::SETGPN:
6036 case AArch64::SETGPTN:
6037 case AArch64::SETGM:
6038 case AArch64::SETGMT:
6039 case AArch64::SETGMN:
6040 case AArch64::SETGMTN:
6041 case AArch64::MOPSSETGE:
6042 case AArch64::MOPSSETGET:
6043 case AArch64::MOPSSETGEN:
6044 case AArch64::MOPSSETGETN: {
6045 // Xd_wb == op0, Xn_wb == op1
6046 MCRegister Xd = Inst.getOperand(2).getReg();
6047 MCRegister Xn = Inst.getOperand(3).getReg();
6048 MCRegister Xm = Inst.getOperand(4).getReg();
6049
6050 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6051 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6052
6053 if (Xd == Xn)
6054 return Error(Loc[0], "invalid SET instruction, destination and size"
6055 " registers are the same");
6056 if (Xd == Xm)
6057 return Error(Loc[0], "invalid SET instruction, destination and source"
6058 " registers are the same");
6059 if (Xn == Xm)
6060 return Error(Loc[0], "invalid SET instruction, source and size"
6061 " registers are the same");
6062 break;
6063 }
6064 case AArch64::SETGOP:
6065 case AArch64::SETGOPT:
6066 case AArch64::SETGOPN:
6067 case AArch64::SETGOPTN:
6068 case AArch64::SETGOM:
6069 case AArch64::SETGOMT:
6070 case AArch64::SETGOMN:
6071 case AArch64::SETGOMTN:
6072 case AArch64::SETGOE:
6073 case AArch64::SETGOET:
6074 case AArch64::SETGOEN:
6075 case AArch64::SETGOETN: {
6076 // Xd_wb == op0, Xn_wb == op1
6077 MCRegister Xd = Inst.getOperand(2).getReg();
6078 MCRegister Xn = Inst.getOperand(3).getReg();
6079
6080 assert(Xd == Inst.getOperand(0).getReg() && "Xd_wb and Xd do not match");
6081 assert(Xn == Inst.getOperand(1).getReg() && "Xn_wb and Xn do not match");
6082
6083 if (Xd == Xn)
6084 return Error(Loc[0], "invalid SET instruction, destination and size"
6085 " registers are the same");
6086 break;
6087 }
6088 }
6089
6090 // Now check immediate ranges. Separate from the above as there is overlap
6091 // in the instructions being checked and this keeps the nested conditionals
6092 // to a minimum.
6093 switch (Inst.getOpcode()) {
6094 case AArch64::ADDSWri:
6095 case AArch64::ADDSXri:
6096 case AArch64::ADDWri:
6097 case AArch64::ADDXri:
6098 case AArch64::SUBSWri:
6099 case AArch64::SUBSXri:
6100 case AArch64::SUBWri:
6101 case AArch64::SUBXri: {
6102 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
6103 // some slight duplication here.
6104 if (Inst.getOperand(2).isExpr()) {
6105 const MCExpr *Expr = Inst.getOperand(2).getExpr();
6106 AArch64::Specifier ELFSpec;
6107 AArch64::Specifier DarwinSpec;
6108 int64_t Addend;
6109 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6110
6111 // Only allow these with ADDXri.
6112 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
6113 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
6114 Inst.getOpcode() == AArch64::ADDXri)
6115 return false;
6116
6117 // Only allow these with ADDXri/ADDWri
6125 ELFSpec) &&
6126 (Inst.getOpcode() == AArch64::ADDXri ||
6127 Inst.getOpcode() == AArch64::ADDWri))
6128 return false;
6129
6130 // Don't allow symbol refs in the immediate field otherwise
6131 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
6132 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
6133 // 'cmp w0, 'borked')
6134 return Error(Loc.back(), "invalid immediate expression");
6135 }
6136 // We don't validate more complex expressions here
6137 }
6138 return false;
6139 }
6140 default:
6141 return false;
6142 }
6143}
6144
6146 const FeatureBitset &FBS,
6147 unsigned VariantID = 0);
6148
6149bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
6151 OperandVector &Operands) {
6152 switch (ErrCode) {
6153 case Match_InvalidTiedOperand: {
6154 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
6155 if (Op.isVectorList())
6156 return Error(Loc, "operand must match destination register list");
6157
6158 assert(Op.isReg() && "Unexpected operand type");
6159 switch (Op.getRegEqualityTy()) {
6160 case RegConstraintEqualityTy::EqualsSubReg:
6161 return Error(Loc, "operand must be 64-bit form of destination register");
6162 case RegConstraintEqualityTy::EqualsSuperReg:
6163 return Error(Loc, "operand must be 32-bit form of destination register");
6164 case RegConstraintEqualityTy::EqualsReg:
6165 return Error(Loc, "operand must match destination register");
6166 }
6167 llvm_unreachable("Unknown RegConstraintEqualityTy");
6168 }
6169 case Match_MissingFeature:
6170 return Error(Loc,
6171 "instruction requires a CPU feature not currently enabled");
6172 case Match_InvalidOperand:
6173 return Error(Loc, "invalid operand for instruction");
6174 case Match_InvalidSuffix:
6175 return Error(Loc, "invalid type suffix for instruction");
6176 case Match_InvalidCondCode:
6177 return Error(Loc, "expected AArch64 condition code");
6178 case Match_AddSubRegExtendSmall:
6179 return Error(Loc,
6180 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6181 case Match_AddSubRegExtendLarge:
6182 return Error(Loc,
6183 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6184 case Match_AddSubSecondSource:
6185 return Error(Loc,
6186 "expected compatible register, symbol or integer in range [0, 4095]");
6187 case Match_LogicalSecondSource:
6188 return Error(Loc, "expected compatible register or logical immediate");
6189 case Match_InvalidMovImm32Shift:
6190 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
6191 case Match_InvalidMovImm64Shift:
6192 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
6193 case Match_AddSubRegShift32:
6194 return Error(Loc,
6195 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6196 case Match_AddSubRegShift64:
6197 return Error(Loc,
6198 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6199 case Match_InvalidFPImm:
6200 return Error(Loc,
6201 "expected compatible register or floating-point constant");
6202 case Match_InvalidMemoryIndexedSImm6:
6203 return Error(Loc, "index must be an integer in range [-32, 31].");
6204 case Match_InvalidMemoryIndexedSImm5:
6205 return Error(Loc, "index must be an integer in range [-16, 15].");
6206 case Match_InvalidMemoryIndexed1SImm4:
6207 return Error(Loc, "index must be an integer in range [-8, 7].");
6208 case Match_InvalidMemoryIndexed2SImm4:
6209 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
6210 case Match_InvalidMemoryIndexed3SImm4:
6211 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
6212 case Match_InvalidMemoryIndexed4SImm4:
6213 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
6214 case Match_InvalidMemoryIndexed16SImm4:
6215 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
6216 case Match_InvalidMemoryIndexed32SImm4:
6217 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
6218 case Match_InvalidMemoryIndexed1SImm6:
6219 return Error(Loc, "index must be an integer in range [-32, 31].");
6220 case Match_InvalidMemoryIndexedSImm8:
6221 return Error(Loc, "index must be an integer in range [-128, 127].");
6222 case Match_InvalidMemoryIndexedSImm9:
6223 return Error(Loc, "index must be an integer in range [-256, 255].");
6224 case Match_InvalidMemoryIndexed16SImm9:
6225 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
6226 case Match_InvalidMemoryIndexed8SImm10:
6227 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
6228 case Match_InvalidMemoryIndexed4SImm7:
6229 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
6230 case Match_InvalidMemoryIndexed8SImm7:
6231 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
6232 case Match_InvalidMemoryIndexed16SImm7:
6233 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
6234 case Match_InvalidMemoryIndexed8UImm5:
6235 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
6236 case Match_InvalidMemoryIndexed8UImm3:
6237 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
6238 case Match_InvalidMemoryIndexed4UImm5:
6239 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
6240 case Match_InvalidMemoryIndexed2UImm5:
6241 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
6242 case Match_InvalidMemoryIndexed8UImm6:
6243 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
6244 case Match_InvalidMemoryIndexed16UImm6:
6245 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
6246 case Match_InvalidMemoryIndexed4UImm6:
6247 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
6248 case Match_InvalidMemoryIndexed2UImm6:
6249 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
6250 case Match_InvalidMemoryIndexed1UImm6:
6251 return Error(Loc, "index must be in range [0, 63].");
6252 case Match_InvalidMemoryWExtend8:
6253 return Error(Loc,
6254 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6255 case Match_InvalidMemoryWExtend16:
6256 return Error(Loc,
6257 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6258 case Match_InvalidMemoryWExtend32:
6259 return Error(Loc,
6260 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6261 case Match_InvalidMemoryWExtend64:
6262 return Error(Loc,
6263 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6264 case Match_InvalidMemoryWExtend128:
6265 return Error(Loc,
6266 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6267 case Match_InvalidMemoryXExtend8:
6268 return Error(Loc,
6269 "expected 'lsl' or 'sxtx' with optional shift of #0");
6270 case Match_InvalidMemoryXExtend16:
6271 return Error(Loc,
6272 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6273 case Match_InvalidMemoryXExtend32:
6274 return Error(Loc,
6275 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6276 case Match_InvalidMemoryXExtend64:
6277 return Error(Loc,
6278 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6279 case Match_InvalidMemoryXExtend128:
6280 return Error(Loc,
6281 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6282 case Match_InvalidMemoryIndexed1:
6283 return Error(Loc, "index must be an integer in range [0, 4095].");
6284 case Match_InvalidMemoryIndexed2:
6285 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6286 case Match_InvalidMemoryIndexed4:
6287 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6288 case Match_InvalidMemoryIndexed8:
6289 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6290 case Match_InvalidMemoryIndexed16:
6291 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6292 case Match_InvalidImm0_0:
6293 return Error(Loc, "immediate must be 0.");
6294 case Match_InvalidImm0_1:
6295 return Error(Loc, "immediate must be an integer in range [0, 1].");
6296 case Match_InvalidImm0_3:
6297 return Error(Loc, "immediate must be an integer in range [0, 3].");
6298 case Match_InvalidImm0_7:
6299 return Error(Loc, "immediate must be an integer in range [0, 7].");
6300 case Match_InvalidImm0_15:
6301 return Error(Loc, "immediate must be an integer in range [0, 15].");
6302 case Match_InvalidImm0_31:
6303 return Error(Loc, "immediate must be an integer in range [0, 31].");
6304 case Match_InvalidImm0_63:
6305 return Error(Loc, "immediate must be an integer in range [0, 63].");
6306 case Match_InvalidImm0_127:
6307 return Error(Loc, "immediate must be an integer in range [0, 127].");
6308 case Match_InvalidImm0_255:
6309 return Error(Loc, "immediate must be an integer in range [0, 255].");
6310 case Match_InvalidImm0_65535:
6311 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6312 case Match_InvalidImm1_8:
6313 return Error(Loc, "immediate must be an integer in range [1, 8].");
6314 case Match_InvalidImm1_16:
6315 return Error(Loc, "immediate must be an integer in range [1, 16].");
6316 case Match_InvalidImm1_32:
6317 return Error(Loc, "immediate must be an integer in range [1, 32].");
6318 case Match_InvalidImm1_64:
6319 return Error(Loc, "immediate must be an integer in range [1, 64].");
6320 case Match_InvalidImmM1_62:
6321 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6322 case Match_InvalidMemoryIndexedRange2UImm0:
6323 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6324 case Match_InvalidMemoryIndexedRange2UImm1:
6325 return Error(Loc, "vector select offset must be an immediate range of the "
6326 "form <immf>:<imml>, where the first "
6327 "immediate is a multiple of 2 in the range [0, 2], and "
6328 "the second immediate is immf + 1.");
6329 case Match_InvalidMemoryIndexedRange2UImm2:
6330 case Match_InvalidMemoryIndexedRange2UImm3:
6331 return Error(
6332 Loc,
6333 "vector select offset must be an immediate range of the form "
6334 "<immf>:<imml>, "
6335 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6336 "[0, 14] "
6337 "depending on the instruction, and the second immediate is immf + 1.");
6338 case Match_InvalidMemoryIndexedRange4UImm0:
6339 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6340 case Match_InvalidMemoryIndexedRange4UImm1:
6341 case Match_InvalidMemoryIndexedRange4UImm2:
6342 return Error(
6343 Loc,
6344 "vector select offset must be an immediate range of the form "
6345 "<immf>:<imml>, "
6346 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6347 "[0, 12] "
6348 "depending on the instruction, and the second immediate is immf + 3.");
6349 case Match_InvalidSVEAddSubImm8:
6350 return Error(Loc, "immediate must be an integer in range [0, 255]"
6351 " with a shift amount of 0");
6352 case Match_InvalidSVEAddSubImm16:
6353 case Match_InvalidSVEAddSubImm32:
6354 case Match_InvalidSVEAddSubImm64:
6355 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6356 "multiple of 256 in range [256, 65280]");
6357 case Match_InvalidSVECpyImm8:
6358 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6359 " with a shift amount of 0");
6360 case Match_InvalidSVECpyImm16:
6361 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6362 "multiple of 256 in range [-32768, 65280]");
6363 case Match_InvalidSVECpyImm32:
6364 case Match_InvalidSVECpyImm64:
6365 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6366 "multiple of 256 in range [-32768, 32512]");
6367 case Match_InvalidIndexRange0_0:
6368 return Error(Loc, "expected lane specifier '[0]'");
6369 case Match_InvalidIndexRange1_1:
6370 return Error(Loc, "expected lane specifier '[1]'");
6371 case Match_InvalidIndexRange0_15:
6372 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6373 case Match_InvalidIndexRange0_7:
6374 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6375 case Match_InvalidIndexRange0_3:
6376 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6377 case Match_InvalidIndexRange0_1:
6378 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6379 case Match_InvalidSVEIndexRange0_63:
6380 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6381 case Match_InvalidSVEIndexRange0_31:
6382 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6383 case Match_InvalidSVEIndexRange0_15:
6384 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6385 case Match_InvalidSVEIndexRange0_7:
6386 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6387 case Match_InvalidSVEIndexRange0_3:
6388 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6389 case Match_InvalidLabel:
6390 return Error(Loc, "expected label or encodable integer pc offset");
6391 case Match_MRS:
6392 return Error(Loc, "expected readable system register");
6393 case Match_MSR:
6394 case Match_InvalidSVCR:
6395 return Error(Loc, "expected writable system register or pstate");
6396 case Match_InvalidComplexRotationEven:
6397 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6398 case Match_InvalidComplexRotationOdd:
6399 return Error(Loc, "complex rotation must be 90 or 270.");
6400 case Match_MnemonicFail: {
6401 std::string Suggestion = AArch64MnemonicSpellCheck(
6402 ((AArch64Operand &)*Operands[0]).getToken(),
6403 ComputeAvailableFeatures(STI->getFeatureBits()));
6404 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6405 }
6406 case Match_InvalidGPR64shifted8:
6407 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6408 case Match_InvalidGPR64shifted16:
6409 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6410 case Match_InvalidGPR64shifted32:
6411 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6412 case Match_InvalidGPR64shifted64:
6413 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6414 case Match_InvalidGPR64shifted128:
6415 return Error(
6416 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6417 case Match_InvalidGPR64NoXZRshifted8:
6418 return Error(Loc, "register must be x0..x30 without shift");
6419 case Match_InvalidGPR64NoXZRshifted16:
6420 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6421 case Match_InvalidGPR64NoXZRshifted32:
6422 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6423 case Match_InvalidGPR64NoXZRshifted64:
6424 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6425 case Match_InvalidGPR64NoXZRshifted128:
6426 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6427 case Match_InvalidZPR32UXTW8:
6428 case Match_InvalidZPR32SXTW8:
6429 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6430 case Match_InvalidZPR32UXTW16:
6431 case Match_InvalidZPR32SXTW16:
6432 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6433 case Match_InvalidZPR32UXTW32:
6434 case Match_InvalidZPR32SXTW32:
6435 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6436 case Match_InvalidZPR32UXTW64:
6437 case Match_InvalidZPR32SXTW64:
6438 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6439 case Match_InvalidZPR64UXTW8:
6440 case Match_InvalidZPR64SXTW8:
6441 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6442 case Match_InvalidZPR64UXTW16:
6443 case Match_InvalidZPR64SXTW16:
6444 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6445 case Match_InvalidZPR64UXTW32:
6446 case Match_InvalidZPR64SXTW32:
6447 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6448 case Match_InvalidZPR64UXTW64:
6449 case Match_InvalidZPR64SXTW64:
6450 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6451 case Match_InvalidZPR32LSL8:
6452 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6453 case Match_InvalidZPR32LSL16:
6454 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6455 case Match_InvalidZPR32LSL32:
6456 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6457 case Match_InvalidZPR32LSL64:
6458 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6459 case Match_InvalidZPR64LSL8:
6460 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6461 case Match_InvalidZPR64LSL16:
6462 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6463 case Match_InvalidZPR64LSL32:
6464 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6465 case Match_InvalidZPR64LSL64:
6466 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6467 case Match_InvalidZPR0:
6468 return Error(Loc, "expected register without element width suffix");
6469 case Match_InvalidZPR8:
6470 case Match_InvalidZPR16:
6471 case Match_InvalidZPR32:
6472 case Match_InvalidZPR64:
6473 case Match_InvalidZPR128:
6474 return Error(Loc, "invalid element width");
6475 case Match_InvalidZPR_3b8:
6476 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6477 case Match_InvalidZPR_3b16:
6478 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6479 case Match_InvalidZPR_3b32:
6480 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6481 case Match_InvalidZPR_4b8:
6482 return Error(Loc,
6483 "Invalid restricted vector register, expected z0.b..z15.b");
6484 case Match_InvalidZPR_4b16:
6485 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6486 case Match_InvalidZPR_4b32:
6487 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6488 case Match_InvalidZPR_4b64:
6489 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6490 case Match_InvalidZPRMul2_Lo8:
6491 return Error(Loc, "Invalid restricted vector register, expected even "
6492 "register in z0.b..z14.b");
6493 case Match_InvalidZPRMul2_Hi8:
6494 return Error(Loc, "Invalid restricted vector register, expected even "
6495 "register in z16.b..z30.b");
6496 case Match_InvalidZPRMul2_Lo16:
6497 return Error(Loc, "Invalid restricted vector register, expected even "
6498 "register in z0.h..z14.h");
6499 case Match_InvalidZPRMul2_Hi16:
6500 return Error(Loc, "Invalid restricted vector register, expected even "
6501 "register in z16.h..z30.h");
6502 case Match_InvalidZPRMul2_Lo32:
6503 return Error(Loc, "Invalid restricted vector register, expected even "
6504 "register in z0.s..z14.s");
6505 case Match_InvalidZPRMul2_Hi32:
6506 return Error(Loc, "Invalid restricted vector register, expected even "
6507 "register in z16.s..z30.s");
6508 case Match_InvalidZPRMul2_Lo64:
6509 return Error(Loc, "Invalid restricted vector register, expected even "
6510 "register in z0.d..z14.d");
6511 case Match_InvalidZPRMul2_Hi64:
6512 return Error(Loc, "Invalid restricted vector register, expected even "
6513 "register in z16.d..z30.d");
6514 case Match_InvalidZPR_K0:
6515 return Error(Loc, "invalid restricted vector register, expected register "
6516 "in z20..z23 or z28..z31");
6517 case Match_InvalidSVEPattern:
6518 return Error(Loc, "invalid predicate pattern");
6519 case Match_InvalidSVEPPRorPNRAnyReg:
6520 case Match_InvalidSVEPPRorPNRBReg:
6521 case Match_InvalidSVEPredicateAnyReg:
6522 case Match_InvalidSVEPredicateBReg:
6523 case Match_InvalidSVEPredicateHReg:
6524 case Match_InvalidSVEPredicateSReg:
6525 case Match_InvalidSVEPredicateDReg:
6526 return Error(Loc, "invalid predicate register.");
6527 case Match_InvalidSVEPredicate3bAnyReg:
6528 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6529 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6530 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6531 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6532 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6533 return Error(Loc, "Invalid predicate register, expected PN in range "
6534 "pn8..pn15 with element suffix.");
6535 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6536 return Error(Loc, "invalid restricted predicate-as-counter register "
6537 "expected pn8..pn15");
6538 case Match_InvalidSVEPNPredicateBReg:
6539 case Match_InvalidSVEPNPredicateHReg:
6540 case Match_InvalidSVEPNPredicateSReg:
6541 case Match_InvalidSVEPNPredicateDReg:
6542 return Error(Loc, "Invalid predicate register, expected PN in range "
6543 "pn0..pn15 with element suffix.");
6544 case Match_InvalidSVEVecLenSpecifier:
6545 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6546 case Match_InvalidSVEPredicateListMul2x8:
6547 case Match_InvalidSVEPredicateListMul2x16:
6548 case Match_InvalidSVEPredicateListMul2x32:
6549 case Match_InvalidSVEPredicateListMul2x64:
6550 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6551 "predicate registers, where the first vector is a multiple of 2 "
6552 "and with correct element type");
6553 case Match_InvalidSVEExactFPImmOperandHalfOne:
6554 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6555 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6556 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6557 case Match_InvalidSVEExactFPImmOperandZeroOne:
6558 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6559 case Match_InvalidMatrixTileVectorH8:
6560 case Match_InvalidMatrixTileVectorV8:
6561 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6562 case Match_InvalidMatrixTileVectorH16:
6563 case Match_InvalidMatrixTileVectorV16:
6564 return Error(Loc,
6565 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6566 case Match_InvalidMatrixTileVectorH32:
6567 case Match_InvalidMatrixTileVectorV32:
6568 return Error(Loc,
6569 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6570 case Match_InvalidMatrixTileVectorH64:
6571 case Match_InvalidMatrixTileVectorV64:
6572 return Error(Loc,
6573 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6574 case Match_InvalidMatrixTileVectorH128:
6575 case Match_InvalidMatrixTileVectorV128:
6576 return Error(Loc,
6577 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6578 case Match_InvalidMatrixTile16:
6579 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6580 case Match_InvalidMatrixTile32:
6581 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6582 case Match_InvalidMatrixTile64:
6583 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6584 case Match_InvalidMatrix:
6585 return Error(Loc, "invalid matrix operand, expected za");
6586 case Match_InvalidMatrix8:
6587 return Error(Loc, "invalid matrix operand, expected suffix .b");
6588 case Match_InvalidMatrix16:
6589 return Error(Loc, "invalid matrix operand, expected suffix .h");
6590 case Match_InvalidMatrix32:
6591 return Error(Loc, "invalid matrix operand, expected suffix .s");
6592 case Match_InvalidMatrix64:
6593 return Error(Loc, "invalid matrix operand, expected suffix .d");
6594 case Match_InvalidMatrixIndexGPR32_12_15:
6595 return Error(Loc, "operand must be a register in range [w12, w15]");
6596 case Match_InvalidMatrixIndexGPR32_8_11:
6597 return Error(Loc, "operand must be a register in range [w8, w11]");
6598 case Match_InvalidSVEVectorList2x8Mul2:
6599 case Match_InvalidSVEVectorList2x16Mul2:
6600 case Match_InvalidSVEVectorList2x32Mul2:
6601 case Match_InvalidSVEVectorList2x64Mul2:
6602 case Match_InvalidSVEVectorList2x128Mul2:
6603 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6604 "SVE vectors, where the first vector is a multiple of 2 "
6605 "and with matching element types");
6606 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6607 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6608 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6609 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6610 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6611 "SVE vectors in the range z0-z14, where the first vector "
6612 "is a multiple of 2 "
6613 "and with matching element types");
6614 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6615 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6616 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6617 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6618 return Error(Loc,
6619 "Invalid vector list, expected list with 2 consecutive "
6620 "SVE vectors in the range z16-z30, where the first vector "
6621 "is a multiple of 2 "
6622 "and with matching element types");
6623 case Match_InvalidSVEVectorList4x8Mul4:
6624 case Match_InvalidSVEVectorList4x16Mul4:
6625 case Match_InvalidSVEVectorList4x32Mul4:
6626 case Match_InvalidSVEVectorList4x64Mul4:
6627 case Match_InvalidSVEVectorList4x128Mul4:
6628 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6629 "SVE vectors, where the first vector is a multiple of 4 "
6630 "and with matching element types");
6631 case Match_InvalidLookupTable:
6632 return Error(Loc, "Invalid lookup table, expected zt0");
6633 case Match_InvalidSVEVectorListStrided2x8:
6634 case Match_InvalidSVEVectorListStrided2x16:
6635 case Match_InvalidSVEVectorListStrided2x32:
6636 case Match_InvalidSVEVectorListStrided2x64:
6637 return Error(
6638 Loc,
6639 "Invalid vector list, expected list with each SVE vector in the list "
6640 "8 registers apart, and the first register in the range [z0, z7] or "
6641 "[z16, z23] and with correct element type");
6642 case Match_InvalidSVEVectorListStrided4x8:
6643 case Match_InvalidSVEVectorListStrided4x16:
6644 case Match_InvalidSVEVectorListStrided4x32:
6645 case Match_InvalidSVEVectorListStrided4x64:
6646 return Error(
6647 Loc,
6648 "Invalid vector list, expected list with each SVE vector in the list "
6649 "4 registers apart, and the first register in the range [z0, z3] or "
6650 "[z16, z19] and with correct element type");
6651 case Match_AddSubLSLImm3ShiftLarge:
6652 return Error(Loc,
6653 "expected 'lsl' with optional integer in range [0, 7]");
6654 default:
6655 llvm_unreachable("unexpected error code!");
6656 }
6657}
6658
6659static const char *getSubtargetFeatureName(uint64_t Val);
6660
6661bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6662 OperandVector &Operands,
6663 MCStreamer &Out,
6665 bool MatchingInlineAsm) {
6666 assert(!Operands.empty() && "Unexpected empty operand list!");
6667 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6668 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6669
6670 StringRef Tok = Op.getToken();
6671 unsigned NumOperands = Operands.size();
6672
6673 if (NumOperands == 4 && Tok == "lsl") {
6674 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6675 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6676 if (Op2.isScalarReg() && Op3.isImm()) {
6677 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6678 if (Op3CE) {
6679 uint64_t Op3Val = Op3CE->getValue();
6680 uint64_t NewOp3Val = 0;
6681 uint64_t NewOp4Val = 0;
6682 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6683 Op2.getReg())) {
6684 NewOp3Val = (32 - Op3Val) & 0x1f;
6685 NewOp4Val = 31 - Op3Val;
6686 } else {
6687 NewOp3Val = (64 - Op3Val) & 0x3f;
6688 NewOp4Val = 63 - Op3Val;
6689 }
6690
6691 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6692 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6693
6694 Operands[0] =
6695 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6696 Operands.push_back(AArch64Operand::CreateImm(
6697 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6698 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6699 Op3.getEndLoc(), getContext());
6700 }
6701 }
6702 } else if (NumOperands == 4 && Tok == "bfc") {
6703 // FIXME: Horrible hack to handle BFC->BFM alias.
6704 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6705 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6706 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6707
6708 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6709 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6710 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6711
6712 if (LSBCE && WidthCE) {
6713 uint64_t LSB = LSBCE->getValue();
6714 uint64_t Width = WidthCE->getValue();
6715
6716 uint64_t RegWidth = 0;
6717 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6718 Op1.getReg()))
6719 RegWidth = 64;
6720 else
6721 RegWidth = 32;
6722
6723 if (LSB >= RegWidth)
6724 return Error(LSBOp.getStartLoc(),
6725 "expected integer in range [0, 31]");
6726 if (Width < 1 || Width > RegWidth)
6727 return Error(WidthOp.getStartLoc(),
6728 "expected integer in range [1, 32]");
6729
6730 uint64_t ImmR = 0;
6731 if (RegWidth == 32)
6732 ImmR = (32 - LSB) & 0x1f;
6733 else
6734 ImmR = (64 - LSB) & 0x3f;
6735
6736 uint64_t ImmS = Width - 1;
6737
6738 if (ImmR != 0 && ImmS >= ImmR)
6739 return Error(WidthOp.getStartLoc(),
6740 "requested insert overflows register");
6741
6742 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6743 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6744 Operands[0] =
6745 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6746 Operands[2] = AArch64Operand::CreateReg(
6747 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6748 SMLoc(), SMLoc(), getContext());
6749 Operands[3] = AArch64Operand::CreateImm(
6750 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6751 Operands.emplace_back(
6752 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6753 WidthOp.getEndLoc(), getContext()));
6754 }
6755 }
6756 } else if (NumOperands == 5) {
6757 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6758 // UBFIZ -> UBFM aliases.
6759 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6760 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6761 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6762 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6763
6764 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6765 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6766 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6767
6768 if (Op3CE && Op4CE) {
6769 uint64_t Op3Val = Op3CE->getValue();
6770 uint64_t Op4Val = Op4CE->getValue();
6771
6772 uint64_t RegWidth = 0;
6773 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6774 Op1.getReg()))
6775 RegWidth = 64;
6776 else
6777 RegWidth = 32;
6778
6779 if (Op3Val >= RegWidth)
6780 return Error(Op3.getStartLoc(),
6781 "expected integer in range [0, 31]");
6782 if (Op4Val < 1 || Op4Val > RegWidth)
6783 return Error(Op4.getStartLoc(),
6784 "expected integer in range [1, 32]");
6785
6786 uint64_t NewOp3Val = 0;
6787 if (RegWidth == 32)
6788 NewOp3Val = (32 - Op3Val) & 0x1f;
6789 else
6790 NewOp3Val = (64 - Op3Val) & 0x3f;
6791
6792 uint64_t NewOp4Val = Op4Val - 1;
6793
6794 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6795 return Error(Op4.getStartLoc(),
6796 "requested insert overflows register");
6797
6798 const MCExpr *NewOp3 =
6799 MCConstantExpr::create(NewOp3Val, getContext());
6800 const MCExpr *NewOp4 =
6801 MCConstantExpr::create(NewOp4Val, getContext());
6802 Operands[3] = AArch64Operand::CreateImm(
6803 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6804 Operands[4] = AArch64Operand::CreateImm(
6805 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6806 if (Tok == "bfi")
6807 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6808 getContext());
6809 else if (Tok == "sbfiz")
6810 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6811 getContext());
6812 else if (Tok == "ubfiz")
6813 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6814 getContext());
6815 else
6816 llvm_unreachable("No valid mnemonic for alias?");
6817 }
6818 }
6819
6820 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6821 // UBFX -> UBFM aliases.
6822 } else if (NumOperands == 5 &&
6823 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6824 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6825 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6826 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6827
6828 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6829 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6830 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6831
6832 if (Op3CE && Op4CE) {
6833 uint64_t Op3Val = Op3CE->getValue();
6834 uint64_t Op4Val = Op4CE->getValue();
6835
6836 uint64_t RegWidth = 0;
6837 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6838 Op1.getReg()))
6839 RegWidth = 64;
6840 else
6841 RegWidth = 32;
6842
6843 if (Op3Val >= RegWidth)
6844 return Error(Op3.getStartLoc(),
6845 "expected integer in range [0, 31]");
6846 if (Op4Val < 1 || Op4Val > RegWidth)
6847 return Error(Op4.getStartLoc(),
6848 "expected integer in range [1, 32]");
6849
6850 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6851
6852 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6853 return Error(Op4.getStartLoc(),
6854 "requested extract overflows register");
6855
6856 const MCExpr *NewOp4 =
6857 MCConstantExpr::create(NewOp4Val, getContext());
6858 Operands[4] = AArch64Operand::CreateImm(
6859 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6860 if (Tok == "bfxil")
6861 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6862 getContext());
6863 else if (Tok == "sbfx")
6864 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6865 getContext());
6866 else if (Tok == "ubfx")
6867 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6868 getContext());
6869 else
6870 llvm_unreachable("No valid mnemonic for alias?");
6871 }
6872 }
6873 }
6874 }
6875
6876 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6877 // instruction for FP registers correctly in some rare circumstances. Convert
6878 // it to a safe instruction and warn (because silently changing someone's
6879 // assembly is rude).
6880 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6881 NumOperands == 4 && Tok == "movi") {
6882 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6883 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6884 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6885 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6886 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6887 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6888 if (Suffix.lower() == ".2d" &&
6889 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6890 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6891 " correctly on this CPU, converting to equivalent movi.16b");
6892 // Switch the suffix to .16b.
6893 unsigned Idx = Op1.isToken() ? 1 : 2;
6894 Operands[Idx] =
6895 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6896 }
6897 }
6898 }
6899
6900 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6901 // InstAlias can't quite handle this since the reg classes aren't
6902 // subclasses.
6903 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6904 // The source register can be Wn here, but the matcher expects a
6905 // GPR64. Twiddle it here if necessary.
6906 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6907 if (Op.isScalarReg()) {
6908 MCRegister Reg = getXRegFromWReg(Op.getReg());
6909 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6910 Op.getStartLoc(), Op.getEndLoc(),
6911 getContext());
6912 }
6913 }
6914 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6915 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6916 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6917 if (Op.isScalarReg() &&
6918 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6919 Op.getReg())) {
6920 // The source register can be Wn here, but the matcher expects a
6921 // GPR64. Twiddle it here if necessary.
6922 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6923 if (Op.isScalarReg()) {
6924 MCRegister Reg = getXRegFromWReg(Op.getReg());
6925 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6926 Op.getStartLoc(),
6927 Op.getEndLoc(), getContext());
6928 }
6929 }
6930 }
6931 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6932 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6933 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6934 if (Op.isScalarReg() &&
6935 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6936 Op.getReg())) {
6937 // The source register can be Wn here, but the matcher expects a
6938 // GPR32. Twiddle it here if necessary.
6939 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6940 if (Op.isScalarReg()) {
6941 MCRegister Reg = getWRegFromXReg(Op.getReg());
6942 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6943 Op.getStartLoc(),
6944 Op.getEndLoc(), getContext());
6945 }
6946 }
6947 }
6948
6949 MCInst Inst;
6950 FeatureBitset MissingFeatures;
6951 // First try to match against the secondary set of tables containing the
6952 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6953 unsigned MatchResult =
6954 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6955 MatchingInlineAsm, 1);
6956
6957 // If that fails, try against the alternate table containing long-form NEON:
6958 // "fadd v0.2s, v1.2s, v2.2s"
6959 if (MatchResult != Match_Success) {
6960 // But first, save the short-form match result: we can use it in case the
6961 // long-form match also fails.
6962 auto ShortFormNEONErrorInfo = ErrorInfo;
6963 auto ShortFormNEONMatchResult = MatchResult;
6964 auto ShortFormNEONMissingFeatures = MissingFeatures;
6965
6966 MatchResult =
6967 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6968 MatchingInlineAsm, 0);
6969
6970 // Now, both matches failed, and the long-form match failed on the mnemonic
6971 // suffix token operand. The short-form match failure is probably more
6972 // relevant: use it instead.
6973 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6974 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6975 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6976 MatchResult = ShortFormNEONMatchResult;
6977 ErrorInfo = ShortFormNEONErrorInfo;
6978 MissingFeatures = ShortFormNEONMissingFeatures;
6979 }
6980 }
6981
6982 switch (MatchResult) {
6983 case Match_Success: {
6984 // Perform range checking and other semantic validations
6985 SmallVector<SMLoc, 8> OperandLocs;
6986 NumOperands = Operands.size();
6987 for (unsigned i = 1; i < NumOperands; ++i)
6988 OperandLocs.push_back(Operands[i]->getStartLoc());
6989 if (validateInstruction(Inst, IDLoc, OperandLocs))
6990 return true;
6991
6992 Inst.setLoc(IDLoc);
6993 Out.emitInstruction(Inst, getSTI());
6994 return false;
6995 }
6996 case Match_MissingFeature: {
6997 assert(MissingFeatures.any() && "Unknown missing feature!");
6998 // Special case the error message for the very common case where only
6999 // a single subtarget feature is missing (neon, e.g.).
7000 std::string Msg = "instruction requires:";
7001 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
7002 if (MissingFeatures[i]) {
7003 Msg += " ";
7004 Msg += getSubtargetFeatureName(i);
7005 }
7006 }
7007 return Error(IDLoc, Msg);
7008 }
7009 case Match_MnemonicFail:
7010 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
7011 case Match_InvalidOperand: {
7012 SMLoc ErrorLoc = IDLoc;
7013
7014 if (ErrorInfo != ~0ULL) {
7015 if (ErrorInfo >= Operands.size())
7016 return Error(IDLoc, "too few operands for instruction",
7017 SMRange(IDLoc, getTok().getLoc()));
7018
7019 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7020 if (ErrorLoc == SMLoc())
7021 ErrorLoc = IDLoc;
7022 }
7023 // If the match failed on a suffix token operand, tweak the diagnostic
7024 // accordingly.
7025 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7026 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7027 MatchResult = Match_InvalidSuffix;
7028
7029 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7030 }
7031 case Match_InvalidTiedOperand:
7032 case Match_InvalidMemoryIndexed1:
7033 case Match_InvalidMemoryIndexed2:
7034 case Match_InvalidMemoryIndexed4:
7035 case Match_InvalidMemoryIndexed8:
7036 case Match_InvalidMemoryIndexed16:
7037 case Match_InvalidCondCode:
7038 case Match_AddSubLSLImm3ShiftLarge:
7039 case Match_AddSubRegExtendSmall:
7040 case Match_AddSubRegExtendLarge:
7041 case Match_AddSubSecondSource:
7042 case Match_LogicalSecondSource:
7043 case Match_AddSubRegShift32:
7044 case Match_AddSubRegShift64:
7045 case Match_InvalidMovImm32Shift:
7046 case Match_InvalidMovImm64Shift:
7047 case Match_InvalidFPImm:
7048 case Match_InvalidMemoryWExtend8:
7049 case Match_InvalidMemoryWExtend16:
7050 case Match_InvalidMemoryWExtend32:
7051 case Match_InvalidMemoryWExtend64:
7052 case Match_InvalidMemoryWExtend128:
7053 case Match_InvalidMemoryXExtend8:
7054 case Match_InvalidMemoryXExtend16:
7055 case Match_InvalidMemoryXExtend32:
7056 case Match_InvalidMemoryXExtend64:
7057 case Match_InvalidMemoryXExtend128:
7058 case Match_InvalidMemoryIndexed1SImm4:
7059 case Match_InvalidMemoryIndexed2SImm4:
7060 case Match_InvalidMemoryIndexed3SImm4:
7061 case Match_InvalidMemoryIndexed4SImm4:
7062 case Match_InvalidMemoryIndexed1SImm6:
7063 case Match_InvalidMemoryIndexed16SImm4:
7064 case Match_InvalidMemoryIndexed32SImm4:
7065 case Match_InvalidMemoryIndexed4SImm7:
7066 case Match_InvalidMemoryIndexed8SImm7:
7067 case Match_InvalidMemoryIndexed16SImm7:
7068 case Match_InvalidMemoryIndexed8UImm5:
7069 case Match_InvalidMemoryIndexed8UImm3:
7070 case Match_InvalidMemoryIndexed4UImm5:
7071 case Match_InvalidMemoryIndexed2UImm5:
7072 case Match_InvalidMemoryIndexed1UImm6:
7073 case Match_InvalidMemoryIndexed2UImm6:
7074 case Match_InvalidMemoryIndexed4UImm6:
7075 case Match_InvalidMemoryIndexed8UImm6:
7076 case Match_InvalidMemoryIndexed16UImm6:
7077 case Match_InvalidMemoryIndexedSImm6:
7078 case Match_InvalidMemoryIndexedSImm5:
7079 case Match_InvalidMemoryIndexedSImm8:
7080 case Match_InvalidMemoryIndexedSImm9:
7081 case Match_InvalidMemoryIndexed16SImm9:
7082 case Match_InvalidMemoryIndexed8SImm10:
7083 case Match_InvalidImm0_0:
7084 case Match_InvalidImm0_1:
7085 case Match_InvalidImm0_3:
7086 case Match_InvalidImm0_7:
7087 case Match_InvalidImm0_15:
7088 case Match_InvalidImm0_31:
7089 case Match_InvalidImm0_63:
7090 case Match_InvalidImm0_127:
7091 case Match_InvalidImm0_255:
7092 case Match_InvalidImm0_65535:
7093 case Match_InvalidImm1_8:
7094 case Match_InvalidImm1_16:
7095 case Match_InvalidImm1_32:
7096 case Match_InvalidImm1_64:
7097 case Match_InvalidImmM1_62:
7098 case Match_InvalidMemoryIndexedRange2UImm0:
7099 case Match_InvalidMemoryIndexedRange2UImm1:
7100 case Match_InvalidMemoryIndexedRange2UImm2:
7101 case Match_InvalidMemoryIndexedRange2UImm3:
7102 case Match_InvalidMemoryIndexedRange4UImm0:
7103 case Match_InvalidMemoryIndexedRange4UImm1:
7104 case Match_InvalidMemoryIndexedRange4UImm2:
7105 case Match_InvalidSVEAddSubImm8:
7106 case Match_InvalidSVEAddSubImm16:
7107 case Match_InvalidSVEAddSubImm32:
7108 case Match_InvalidSVEAddSubImm64:
7109 case Match_InvalidSVECpyImm8:
7110 case Match_InvalidSVECpyImm16:
7111 case Match_InvalidSVECpyImm32:
7112 case Match_InvalidSVECpyImm64:
7113 case Match_InvalidIndexRange0_0:
7114 case Match_InvalidIndexRange1_1:
7115 case Match_InvalidIndexRange0_15:
7116 case Match_InvalidIndexRange0_7:
7117 case Match_InvalidIndexRange0_3:
7118 case Match_InvalidIndexRange0_1:
7119 case Match_InvalidSVEIndexRange0_63:
7120 case Match_InvalidSVEIndexRange0_31:
7121 case Match_InvalidSVEIndexRange0_15:
7122 case Match_InvalidSVEIndexRange0_7:
7123 case Match_InvalidSVEIndexRange0_3:
7124 case Match_InvalidLabel:
7125 case Match_InvalidComplexRotationEven:
7126 case Match_InvalidComplexRotationOdd:
7127 case Match_InvalidGPR64shifted8:
7128 case Match_InvalidGPR64shifted16:
7129 case Match_InvalidGPR64shifted32:
7130 case Match_InvalidGPR64shifted64:
7131 case Match_InvalidGPR64shifted128:
7132 case Match_InvalidGPR64NoXZRshifted8:
7133 case Match_InvalidGPR64NoXZRshifted16:
7134 case Match_InvalidGPR64NoXZRshifted32:
7135 case Match_InvalidGPR64NoXZRshifted64:
7136 case Match_InvalidGPR64NoXZRshifted128:
7137 case Match_InvalidZPR32UXTW8:
7138 case Match_InvalidZPR32UXTW16:
7139 case Match_InvalidZPR32UXTW32:
7140 case Match_InvalidZPR32UXTW64:
7141 case Match_InvalidZPR32SXTW8:
7142 case Match_InvalidZPR32SXTW16:
7143 case Match_InvalidZPR32SXTW32:
7144 case Match_InvalidZPR32SXTW64:
7145 case Match_InvalidZPR64UXTW8:
7146 case Match_InvalidZPR64SXTW8:
7147 case Match_InvalidZPR64UXTW16:
7148 case Match_InvalidZPR64SXTW16:
7149 case Match_InvalidZPR64UXTW32:
7150 case Match_InvalidZPR64SXTW32:
7151 case Match_InvalidZPR64UXTW64:
7152 case Match_InvalidZPR64SXTW64:
7153 case Match_InvalidZPR32LSL8:
7154 case Match_InvalidZPR32LSL16:
7155 case Match_InvalidZPR32LSL32:
7156 case Match_InvalidZPR32LSL64:
7157 case Match_InvalidZPR64LSL8:
7158 case Match_InvalidZPR64LSL16:
7159 case Match_InvalidZPR64LSL32:
7160 case Match_InvalidZPR64LSL64:
7161 case Match_InvalidZPR0:
7162 case Match_InvalidZPR8:
7163 case Match_InvalidZPR16:
7164 case Match_InvalidZPR32:
7165 case Match_InvalidZPR64:
7166 case Match_InvalidZPR128:
7167 case Match_InvalidZPR_3b8:
7168 case Match_InvalidZPR_3b16:
7169 case Match_InvalidZPR_3b32:
7170 case Match_InvalidZPR_4b8:
7171 case Match_InvalidZPR_4b16:
7172 case Match_InvalidZPR_4b32:
7173 case Match_InvalidZPR_4b64:
7174 case Match_InvalidSVEPPRorPNRAnyReg:
7175 case Match_InvalidSVEPPRorPNRBReg:
7176 case Match_InvalidSVEPredicateAnyReg:
7177 case Match_InvalidSVEPattern:
7178 case Match_InvalidSVEVecLenSpecifier:
7179 case Match_InvalidSVEPredicateBReg:
7180 case Match_InvalidSVEPredicateHReg:
7181 case Match_InvalidSVEPredicateSReg:
7182 case Match_InvalidSVEPredicateDReg:
7183 case Match_InvalidSVEPredicate3bAnyReg:
7184 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7186 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7187 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7188 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7189 case Match_InvalidSVEPNPredicateBReg:
7190 case Match_InvalidSVEPNPredicateHReg:
7191 case Match_InvalidSVEPNPredicateSReg:
7192 case Match_InvalidSVEPNPredicateDReg:
7193 case Match_InvalidSVEPredicateListMul2x8:
7194 case Match_InvalidSVEPredicateListMul2x16:
7195 case Match_InvalidSVEPredicateListMul2x32:
7196 case Match_InvalidSVEPredicateListMul2x64:
7197 case Match_InvalidSVEExactFPImmOperandHalfOne:
7198 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7199 case Match_InvalidSVEExactFPImmOperandZeroOne:
7200 case Match_InvalidMatrixTile16:
7201 case Match_InvalidMatrixTile32:
7202 case Match_InvalidMatrixTile64:
7203 case Match_InvalidMatrix:
7204 case Match_InvalidMatrix8:
7205 case Match_InvalidMatrix16:
7206 case Match_InvalidMatrix32:
7207 case Match_InvalidMatrix64:
7208 case Match_InvalidMatrixTileVectorH8:
7209 case Match_InvalidMatrixTileVectorH16:
7210 case Match_InvalidMatrixTileVectorH32:
7211 case Match_InvalidMatrixTileVectorH64:
7212 case Match_InvalidMatrixTileVectorH128:
7213 case Match_InvalidMatrixTileVectorV8:
7214 case Match_InvalidMatrixTileVectorV16:
7215 case Match_InvalidMatrixTileVectorV32:
7216 case Match_InvalidMatrixTileVectorV64:
7217 case Match_InvalidMatrixTileVectorV128:
7218 case Match_InvalidSVCR:
7219 case Match_InvalidMatrixIndexGPR32_12_15:
7220 case Match_InvalidMatrixIndexGPR32_8_11:
7221 case Match_InvalidLookupTable:
7222 case Match_InvalidZPRMul2_Lo8:
7223 case Match_InvalidZPRMul2_Hi8:
7224 case Match_InvalidZPRMul2_Lo16:
7225 case Match_InvalidZPRMul2_Hi16:
7226 case Match_InvalidZPRMul2_Lo32:
7227 case Match_InvalidZPRMul2_Hi32:
7228 case Match_InvalidZPRMul2_Lo64:
7229 case Match_InvalidZPRMul2_Hi64:
7230 case Match_InvalidZPR_K0:
7231 case Match_InvalidSVEVectorList2x8Mul2:
7232 case Match_InvalidSVEVectorList2x16Mul2:
7233 case Match_InvalidSVEVectorList2x32Mul2:
7234 case Match_InvalidSVEVectorList2x64Mul2:
7235 case Match_InvalidSVEVectorList2x128Mul2:
7236 case Match_InvalidSVEVectorList4x8Mul4:
7237 case Match_InvalidSVEVectorList4x16Mul4:
7238 case Match_InvalidSVEVectorList4x32Mul4:
7239 case Match_InvalidSVEVectorList4x64Mul4:
7240 case Match_InvalidSVEVectorList4x128Mul4:
7241 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7242 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7243 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7244 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7245 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7246 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7247 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7248 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7249 case Match_InvalidSVEVectorListStrided2x8:
7250 case Match_InvalidSVEVectorListStrided2x16:
7251 case Match_InvalidSVEVectorListStrided2x32:
7252 case Match_InvalidSVEVectorListStrided2x64:
7253 case Match_InvalidSVEVectorListStrided4x8:
7254 case Match_InvalidSVEVectorListStrided4x16:
7255 case Match_InvalidSVEVectorListStrided4x32:
7256 case Match_InvalidSVEVectorListStrided4x64:
7257 case Match_MSR:
7258 case Match_MRS: {
7259 if (ErrorInfo >= Operands.size())
7260 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
7261 // Any time we get here, there's nothing fancy to do. Just get the
7262 // operand SMLoc and display the diagnostic.
7263 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7264 if (ErrorLoc == SMLoc())
7265 ErrorLoc = IDLoc;
7266 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7267 }
7268 }
7269
7270 llvm_unreachable("Implement any new match types added!");
7271}
7272
7273/// ParseDirective parses the arm specific directives
7274bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7275 const MCContext::Environment Format = getContext().getObjectFileType();
7276 bool IsMachO = Format == MCContext::IsMachO;
7277 bool IsCOFF = Format == MCContext::IsCOFF;
7278 bool IsELF = Format == MCContext::IsELF;
7279
7280 auto IDVal = DirectiveID.getIdentifier().lower();
7281 SMLoc Loc = DirectiveID.getLoc();
7282 if (IDVal == ".arch")
7283 parseDirectiveArch(Loc);
7284 else if (IDVal == ".cpu")
7285 parseDirectiveCPU(Loc);
7286 else if (IDVal == ".tlsdesccall")
7287 parseDirectiveTLSDescCall(Loc);
7288 else if (IDVal == ".ltorg" || IDVal == ".pool")
7289 parseDirectiveLtorg(Loc);
7290 else if (IDVal == ".unreq")
7291 parseDirectiveUnreq(Loc);
7292 else if (IDVal == ".inst")
7293 parseDirectiveInst(Loc);
7294 else if (IDVal == ".cfi_negate_ra_state")
7295 parseDirectiveCFINegateRAState();
7296 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7297 parseDirectiveCFINegateRAStateWithPC();
7298 else if (IDVal == ".cfi_b_key_frame")
7299 parseDirectiveCFIBKeyFrame();
7300 else if (IDVal == ".cfi_mte_tagged_frame")
7301 parseDirectiveCFIMTETaggedFrame();
7302 else if (IDVal == ".arch_extension")
7303 parseDirectiveArchExtension(Loc);
7304 else if (IDVal == ".variant_pcs")
7305 parseDirectiveVariantPCS(Loc);
7306 else if (IsMachO) {
7307 if (IDVal == MCLOHDirectiveName())
7308 parseDirectiveLOH(IDVal, Loc);
7309 else
7310 return true;
7311 } else if (IsCOFF) {
7312 if (IDVal == ".seh_stackalloc")
7313 parseDirectiveSEHAllocStack(Loc);
7314 else if (IDVal == ".seh_endprologue")
7315 parseDirectiveSEHPrologEnd(Loc);
7316 else if (IDVal == ".seh_save_r19r20_x")
7317 parseDirectiveSEHSaveR19R20X(Loc);
7318 else if (IDVal == ".seh_save_fplr")
7319 parseDirectiveSEHSaveFPLR(Loc);
7320 else if (IDVal == ".seh_save_fplr_x")
7321 parseDirectiveSEHSaveFPLRX(Loc);
7322 else if (IDVal == ".seh_save_reg")
7323 parseDirectiveSEHSaveReg(Loc);
7324 else if (IDVal == ".seh_save_reg_x")
7325 parseDirectiveSEHSaveRegX(Loc);
7326 else if (IDVal == ".seh_save_regp")
7327 parseDirectiveSEHSaveRegP(Loc);
7328 else if (IDVal == ".seh_save_regp_x")
7329 parseDirectiveSEHSaveRegPX(Loc);
7330 else if (IDVal == ".seh_save_lrpair")
7331 parseDirectiveSEHSaveLRPair(Loc);
7332 else if (IDVal == ".seh_save_freg")
7333 parseDirectiveSEHSaveFReg(Loc);
7334 else if (IDVal == ".seh_save_freg_x")
7335 parseDirectiveSEHSaveFRegX(Loc);
7336 else if (IDVal == ".seh_save_fregp")
7337 parseDirectiveSEHSaveFRegP(Loc);
7338 else if (IDVal == ".seh_save_fregp_x")
7339 parseDirectiveSEHSaveFRegPX(Loc);
7340 else if (IDVal == ".seh_set_fp")
7341 parseDirectiveSEHSetFP(Loc);
7342 else if (IDVal == ".seh_add_fp")
7343 parseDirectiveSEHAddFP(Loc);
7344 else if (IDVal == ".seh_nop")
7345 parseDirectiveSEHNop(Loc);
7346 else if (IDVal == ".seh_save_next")
7347 parseDirectiveSEHSaveNext(Loc);
7348 else if (IDVal == ".seh_startepilogue")
7349 parseDirectiveSEHEpilogStart(Loc);
7350 else if (IDVal == ".seh_endepilogue")
7351 parseDirectiveSEHEpilogEnd(Loc);
7352 else if (IDVal == ".seh_trap_frame")
7353 parseDirectiveSEHTrapFrame(Loc);
7354 else if (IDVal == ".seh_pushframe")
7355 parseDirectiveSEHMachineFrame(Loc);
7356 else if (IDVal == ".seh_context")
7357 parseDirectiveSEHContext(Loc);
7358 else if (IDVal == ".seh_ec_context")
7359 parseDirectiveSEHECContext(Loc);
7360 else if (IDVal == ".seh_clear_unwound_to_call")
7361 parseDirectiveSEHClearUnwoundToCall(Loc);
7362 else if (IDVal == ".seh_pac_sign_lr")
7363 parseDirectiveSEHPACSignLR(Loc);
7364 else if (IDVal == ".seh_save_any_reg")
7365 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7366 else if (IDVal == ".seh_save_any_reg_p")
7367 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7368 else if (IDVal == ".seh_save_any_reg_x")
7369 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7370 else if (IDVal == ".seh_save_any_reg_px")
7371 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7372 else if (IDVal == ".seh_allocz")
7373 parseDirectiveSEHAllocZ(Loc);
7374 else if (IDVal == ".seh_save_zreg")
7375 parseDirectiveSEHSaveZReg(Loc);
7376 else if (IDVal == ".seh_save_preg")
7377 parseDirectiveSEHSavePReg(Loc);
7378 else
7379 return true;
7380 } else if (IsELF) {
7381 if (IDVal == ".aeabi_subsection")
7382 parseDirectiveAeabiSubSectionHeader(Loc);
7383 else if (IDVal == ".aeabi_attribute")
7384 parseDirectiveAeabiAArch64Attr(Loc);
7385 else
7386 return true;
7387 } else
7388 return true;
7389 return false;
7390}
7391
7392static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7393 SmallVector<StringRef, 4> &RequestedExtensions) {
7394 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7395 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7396
7397 if (!NoCrypto && Crypto) {
7398 // Map 'generic' (and others) to sha2 and aes, because
7399 // that was the traditional meaning of crypto.
7400 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7401 ArchInfo == AArch64::ARMV8_3A) {
7402 RequestedExtensions.push_back("sha2");
7403 RequestedExtensions.push_back("aes");
7404 }
7405 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7406 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7407 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7408 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7409 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7410 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7411 RequestedExtensions.push_back("sm4");
7412 RequestedExtensions.push_back("sha3");
7413 RequestedExtensions.push_back("sha2");
7414 RequestedExtensions.push_back("aes");
7415 }
7416 } else if (NoCrypto) {
7417 // Map 'generic' (and others) to sha2 and aes, because
7418 // that was the traditional meaning of crypto.
7419 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7420 ArchInfo == AArch64::ARMV8_3A) {
7421 RequestedExtensions.push_back("nosha2");
7422 RequestedExtensions.push_back("noaes");
7423 }
7424 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7425 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7426 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7427 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7428 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7429 ArchInfo == AArch64::ARMV9_4A) {
7430 RequestedExtensions.push_back("nosm4");
7431 RequestedExtensions.push_back("nosha3");
7432 RequestedExtensions.push_back("nosha2");
7433 RequestedExtensions.push_back("noaes");
7434 }
7435 }
7436}
7437
7439 return SMLoc::getFromPointer(L.getPointer() + Offset);
7440}
7441
7442/// parseDirectiveArch
7443/// ::= .arch token
7444bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7445 SMLoc CurLoc = getLoc();
7446
7447 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7448 StringRef Arch, ExtensionString;
7449 std::tie(Arch, ExtensionString) = Name.split('+');
7450
7451 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7452 if (!ArchInfo)
7453 return Error(CurLoc, "unknown arch name");
7454
7455 if (parseToken(AsmToken::EndOfStatement))
7456 return true;
7457
7458 // Get the architecture and extension features.
7459 std::vector<StringRef> AArch64Features;
7460 AArch64Features.push_back(ArchInfo->ArchFeature);
7461 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7462
7463 MCSubtargetInfo &STI = copySTI();
7464 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7465 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7466 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7467
7468 SmallVector<StringRef, 4> RequestedExtensions;
7469 if (!ExtensionString.empty())
7470 ExtensionString.split(RequestedExtensions, '+');
7471
7472 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7473 CurLoc = incrementLoc(CurLoc, Arch.size());
7474
7475 for (auto Name : RequestedExtensions) {
7476 // Advance source location past '+'.
7477 CurLoc = incrementLoc(CurLoc, 1);
7478
7479 bool EnableFeature = !Name.consume_front_insensitive("no");
7480
7481 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7482 return Extension.Name == Name;
7483 });
7484
7485 if (It == std::end(ExtensionMap))
7486 return Error(CurLoc, "unsupported architectural extension: " + Name);
7487
7488 if (EnableFeature)
7489 STI.SetFeatureBitsTransitively(It->Features);
7490 else
7491 STI.ClearFeatureBitsTransitively(It->Features);
7492 CurLoc = incrementLoc(CurLoc, Name.size());
7493 }
7494 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7495 setAvailableFeatures(Features);
7496
7497 getTargetStreamer().emitDirectiveArch(Name);
7498 return false;
7499}
7500
7501/// parseDirectiveArchExtension
7502/// ::= .arch_extension [no]feature
7503bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7504 SMLoc ExtLoc = getLoc();
7505
7506 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7507
7508 if (parseEOL())
7509 return true;
7510
7511 bool EnableFeature = true;
7512 StringRef Name = FullName;
7513 if (Name.starts_with_insensitive("no")) {
7514 EnableFeature = false;
7515 Name = Name.substr(2);
7516 }
7517
7518 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7519 return Extension.Name == Name;
7520 });
7521
7522 if (It == std::end(ExtensionMap))
7523 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7524
7525 MCSubtargetInfo &STI = copySTI();
7526 if (EnableFeature)
7527 STI.SetFeatureBitsTransitively(It->Features);
7528 else
7529 STI.ClearFeatureBitsTransitively(It->Features);
7530 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7531 setAvailableFeatures(Features);
7532
7533 getTargetStreamer().emitDirectiveArchExtension(FullName);
7534 return false;
7535}
7536
7537/// parseDirectiveCPU
7538/// ::= .cpu id
7539bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7540 SMLoc CurLoc = getLoc();
7541
7542 StringRef CPU, ExtensionString;
7543 std::tie(CPU, ExtensionString) =
7544 getParser().parseStringToEndOfStatement().trim().split('+');
7545
7546 if (parseToken(AsmToken::EndOfStatement))
7547 return true;
7548
7549 SmallVector<StringRef, 4> RequestedExtensions;
7550 if (!ExtensionString.empty())
7551 ExtensionString.split(RequestedExtensions, '+');
7552
7553 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7554 if (!CpuArch) {
7555 Error(CurLoc, "unknown CPU name");
7556 return false;
7557 }
7558 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7559
7560 MCSubtargetInfo &STI = copySTI();
7561 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7562 CurLoc = incrementLoc(CurLoc, CPU.size());
7563
7564 for (auto Name : RequestedExtensions) {
7565 // Advance source location past '+'.
7566 CurLoc = incrementLoc(CurLoc, 1);
7567
7568 bool EnableFeature = !Name.consume_front_insensitive("no");
7569
7570 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7571 return Extension.Name == Name;
7572 });
7573
7574 if (It == std::end(ExtensionMap))
7575 return Error(CurLoc, "unsupported architectural extension: " + Name);
7576
7577 if (EnableFeature)
7578 STI.SetFeatureBitsTransitively(It->Features);
7579 else
7580 STI.ClearFeatureBitsTransitively(It->Features);
7581 CurLoc = incrementLoc(CurLoc, Name.size());
7582 }
7583 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7584 setAvailableFeatures(Features);
7585 return false;
7586}
7587
7588/// parseDirectiveInst
7589/// ::= .inst opcode [, ...]
7590bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7591 if (getLexer().is(AsmToken::EndOfStatement))
7592 return Error(Loc, "expected expression following '.inst' directive");
7593
7594 auto parseOp = [&]() -> bool {
7595 SMLoc L = getLoc();
7596 const MCExpr *Expr = nullptr;
7597 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7598 return true;
7599 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7600 if (check(!Value, L, "expected constant expression"))
7601 return true;
7602 getTargetStreamer().emitInst(Value->getValue());
7603 return false;
7604 };
7605
7606 return parseMany(parseOp);
7607}
7608
7609// parseDirectiveTLSDescCall:
7610// ::= .tlsdesccall symbol
7611bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7612 StringRef Name;
7613 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7614 parseToken(AsmToken::EndOfStatement))
7615 return true;
7616
7617 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7618 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7620
7621 MCInst Inst;
7622 Inst.setOpcode(AArch64::TLSDESCCALL);
7624
7625 getParser().getStreamer().emitInstruction(Inst, getSTI());
7626 return false;
7627}
7628
7629/// ::= .loh <lohName | lohId> label1, ..., labelN
7630/// The number of arguments depends on the loh identifier.
7631bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7633 if (getTok().isNot(AsmToken::Identifier)) {
7634 if (getTok().isNot(AsmToken::Integer))
7635 return TokError("expected an identifier or a number in directive");
7636 // We successfully get a numeric value for the identifier.
7637 // Check if it is valid.
7638 int64_t Id = getTok().getIntVal();
7639 if (Id <= -1U && !isValidMCLOHType(Id))
7640 return TokError("invalid numeric identifier in directive");
7641 Kind = (MCLOHType)Id;
7642 } else {
7643 StringRef Name = getTok().getIdentifier();
7644 // We successfully parse an identifier.
7645 // Check if it is a recognized one.
7646 int Id = MCLOHNameToId(Name);
7647
7648 if (Id == -1)
7649 return TokError("invalid identifier in directive");
7650 Kind = (MCLOHType)Id;
7651 }
7652 // Consume the identifier.
7653 Lex();
7654 // Get the number of arguments of this LOH.
7655 int NbArgs = MCLOHIdToNbArgs(Kind);
7656
7657 assert(NbArgs != -1 && "Invalid number of arguments");
7658
7660 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7661 StringRef Name;
7662 if (getParser().parseIdentifier(Name))
7663 return TokError("expected identifier in directive");
7664 Args.push_back(getContext().getOrCreateSymbol(Name));
7665
7666 if (Idx + 1 == NbArgs)
7667 break;
7668 if (parseComma())
7669 return true;
7670 }
7671 if (parseEOL())
7672 return true;
7673
7674 getStreamer().emitLOHDirective(Kind, Args);
7675 return false;
7676}
7677
7678/// parseDirectiveLtorg
7679/// ::= .ltorg | .pool
7680bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7681 if (parseEOL())
7682 return true;
7683 getTargetStreamer().emitCurrentConstantPool();
7684 return false;
7685}
7686
7687/// parseDirectiveReq
7688/// ::= name .req registername
7689bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7690 Lex(); // Eat the '.req' token.
7691 SMLoc SRegLoc = getLoc();
7692 RegKind RegisterKind = RegKind::Scalar;
7693 MCRegister RegNum;
7694 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7695
7696 if (!ParseRes.isSuccess()) {
7697 StringRef Kind;
7698 RegisterKind = RegKind::NeonVector;
7699 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7700
7701 if (ParseRes.isFailure())
7702 return true;
7703
7704 if (ParseRes.isSuccess() && !Kind.empty())
7705 return Error(SRegLoc, "vector register without type specifier expected");
7706 }
7707
7708 if (!ParseRes.isSuccess()) {
7709 StringRef Kind;
7710 RegisterKind = RegKind::SVEDataVector;
7711 ParseRes =
7712 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7713
7714 if (ParseRes.isFailure())
7715 return true;
7716
7717 if (ParseRes.isSuccess() && !Kind.empty())
7718 return Error(SRegLoc,
7719 "sve vector register without type specifier expected");
7720 }
7721
7722 if (!ParseRes.isSuccess()) {
7723 StringRef Kind;
7724 RegisterKind = RegKind::SVEPredicateVector;
7725 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7726
7727 if (ParseRes.isFailure())
7728 return true;
7729
7730 if (ParseRes.isSuccess() && !Kind.empty())
7731 return Error(SRegLoc,
7732 "sve predicate register without type specifier expected");
7733 }
7734
7735 if (!ParseRes.isSuccess())
7736 return Error(SRegLoc, "register name or alias expected");
7737
7738 // Shouldn't be anything else.
7739 if (parseEOL())
7740 return true;
7741
7742 auto pair = std::make_pair(RegisterKind, RegNum);
7743 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7744 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7745
7746 return false;
7747}
7748
7749/// parseDirectiveUneq
7750/// ::= .unreq registername
7751bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7752 if (getTok().isNot(AsmToken::Identifier))
7753 return TokError("unexpected input in .unreq directive.");
7754 RegisterReqs.erase(getTok().getIdentifier().lower());
7755 Lex(); // Eat the identifier.
7756 return parseToken(AsmToken::EndOfStatement);
7757}
7758
7759bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7760 if (parseEOL())
7761 return true;
7762 getStreamer().emitCFINegateRAState();
7763 return false;
7764}
7765
7766bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7767 if (parseEOL())
7768 return true;
7769 getStreamer().emitCFINegateRAStateWithPC();
7770 return false;
7771}
7772
7773/// parseDirectiveCFIBKeyFrame
7774/// ::= .cfi_b_key
7775bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7776 if (parseEOL())
7777 return true;
7778 getStreamer().emitCFIBKeyFrame();
7779 return false;
7780}
7781
7782/// parseDirectiveCFIMTETaggedFrame
7783/// ::= .cfi_mte_tagged_frame
7784bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7785 if (parseEOL())
7786 return true;
7787 getStreamer().emitCFIMTETaggedFrame();
7788 return false;
7789}
7790
7791/// parseDirectiveVariantPCS
7792/// ::= .variant_pcs symbolname
7793bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7794 StringRef Name;
7795 if (getParser().parseIdentifier(Name))
7796 return TokError("expected symbol name");
7797 if (parseEOL())
7798 return true;
7799 getTargetStreamer().emitDirectiveVariantPCS(
7800 getContext().getOrCreateSymbol(Name));
7801 return false;
7802}
7803
7804/// parseDirectiveSEHAllocStack
7805/// ::= .seh_stackalloc
7806bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7807 int64_t Size;
7808 if (parseImmExpr(Size))
7809 return true;
7810 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7811 return false;
7812}
7813
7814/// parseDirectiveSEHPrologEnd
7815/// ::= .seh_endprologue
7816bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7817 getTargetStreamer().emitARM64WinCFIPrologEnd();
7818 return false;
7819}
7820
7821/// parseDirectiveSEHSaveR19R20X
7822/// ::= .seh_save_r19r20_x
7823bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7824 int64_t Offset;
7825 if (parseImmExpr(Offset))
7826 return true;
7827 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7828 return false;
7829}
7830
7831/// parseDirectiveSEHSaveFPLR
7832/// ::= .seh_save_fplr
7833bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7834 int64_t Offset;
7835 if (parseImmExpr(Offset))
7836 return true;
7837 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7838 return false;
7839}
7840
7841/// parseDirectiveSEHSaveFPLRX
7842/// ::= .seh_save_fplr_x
7843bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7844 int64_t Offset;
7845 if (parseImmExpr(Offset))
7846 return true;
7847 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7848 return false;
7849}
7850
7851/// parseDirectiveSEHSaveReg
7852/// ::= .seh_save_reg
7853bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7854 unsigned Reg;
7855 int64_t Offset;
7856 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7857 parseComma() || parseImmExpr(Offset))
7858 return true;
7859 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7860 return false;
7861}
7862
7863/// parseDirectiveSEHSaveRegX
7864/// ::= .seh_save_reg_x
7865bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7866 unsigned Reg;
7867 int64_t Offset;
7868 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7869 parseComma() || parseImmExpr(Offset))
7870 return true;
7871 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7872 return false;
7873}
7874
7875/// parseDirectiveSEHSaveRegP
7876/// ::= .seh_save_regp
7877bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7878 unsigned Reg;
7879 int64_t Offset;
7880 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7881 parseComma() || parseImmExpr(Offset))
7882 return true;
7883 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7884 return false;
7885}
7886
7887/// parseDirectiveSEHSaveRegPX
7888/// ::= .seh_save_regp_x
7889bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7890 unsigned Reg;
7891 int64_t Offset;
7892 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7893 parseComma() || parseImmExpr(Offset))
7894 return true;
7895 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7896 return false;
7897}
7898
7899/// parseDirectiveSEHSaveLRPair
7900/// ::= .seh_save_lrpair
7901bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7902 unsigned Reg;
7903 int64_t Offset;
7904 L = getLoc();
7905 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7906 parseComma() || parseImmExpr(Offset))
7907 return true;
7908 if (check(((Reg - 19) % 2 != 0), L,
7909 "expected register with even offset from x19"))
7910 return true;
7911 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7912 return false;
7913}
7914
7915/// parseDirectiveSEHSaveFReg
7916/// ::= .seh_save_freg
7917bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7918 unsigned Reg;
7919 int64_t Offset;
7920 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7921 parseComma() || parseImmExpr(Offset))
7922 return true;
7923 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7924 return false;
7925}
7926
7927/// parseDirectiveSEHSaveFRegX
7928/// ::= .seh_save_freg_x
7929bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7930 unsigned Reg;
7931 int64_t Offset;
7932 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7933 parseComma() || parseImmExpr(Offset))
7934 return true;
7935 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7936 return false;
7937}
7938
7939/// parseDirectiveSEHSaveFRegP
7940/// ::= .seh_save_fregp
7941bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7942 unsigned Reg;
7943 int64_t Offset;
7944 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7945 parseComma() || parseImmExpr(Offset))
7946 return true;
7947 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7948 return false;
7949}
7950
7951/// parseDirectiveSEHSaveFRegPX
7952/// ::= .seh_save_fregp_x
7953bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7954 unsigned Reg;
7955 int64_t Offset;
7956 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7957 parseComma() || parseImmExpr(Offset))
7958 return true;
7959 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7960 return false;
7961}
7962
7963/// parseDirectiveSEHSetFP
7964/// ::= .seh_set_fp
7965bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7966 getTargetStreamer().emitARM64WinCFISetFP();
7967 return false;
7968}
7969
7970/// parseDirectiveSEHAddFP
7971/// ::= .seh_add_fp
7972bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7973 int64_t Size;
7974 if (parseImmExpr(Size))
7975 return true;
7976 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7977 return false;
7978}
7979
7980/// parseDirectiveSEHNop
7981/// ::= .seh_nop
7982bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7983 getTargetStreamer().emitARM64WinCFINop();
7984 return false;
7985}
7986
7987/// parseDirectiveSEHSaveNext
7988/// ::= .seh_save_next
7989bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7990 getTargetStreamer().emitARM64WinCFISaveNext();
7991 return false;
7992}
7993
7994/// parseDirectiveSEHEpilogStart
7995/// ::= .seh_startepilogue
7996bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7997 getTargetStreamer().emitARM64WinCFIEpilogStart();
7998 return false;
7999}
8000
8001/// parseDirectiveSEHEpilogEnd
8002/// ::= .seh_endepilogue
8003bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8004 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8005 return false;
8006}
8007
8008/// parseDirectiveSEHTrapFrame
8009/// ::= .seh_trap_frame
8010bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8011 getTargetStreamer().emitARM64WinCFITrapFrame();
8012 return false;
8013}
8014
8015/// parseDirectiveSEHMachineFrame
8016/// ::= .seh_pushframe
8017bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8018 getTargetStreamer().emitARM64WinCFIMachineFrame();
8019 return false;
8020}
8021
8022/// parseDirectiveSEHContext
8023/// ::= .seh_context
8024bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8025 getTargetStreamer().emitARM64WinCFIContext();
8026 return false;
8027}
8028
8029/// parseDirectiveSEHECContext
8030/// ::= .seh_ec_context
8031bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8032 getTargetStreamer().emitARM64WinCFIECContext();
8033 return false;
8034}
8035
8036/// parseDirectiveSEHClearUnwoundToCall
8037/// ::= .seh_clear_unwound_to_call
8038bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8039 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8040 return false;
8041}
8042
8043/// parseDirectiveSEHPACSignLR
8044/// ::= .seh_pac_sign_lr
8045bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8046 getTargetStreamer().emitARM64WinCFIPACSignLR();
8047 return false;
8048}
8049
8050/// parseDirectiveSEHSaveAnyReg
8051/// ::= .seh_save_any_reg
8052/// ::= .seh_save_any_reg_p
8053/// ::= .seh_save_any_reg_x
8054/// ::= .seh_save_any_reg_px
8055bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
8056 bool Writeback) {
8057 MCRegister Reg;
8058 SMLoc Start, End;
8059 int64_t Offset;
8060 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
8061 parseComma() || parseImmExpr(Offset))
8062 return true;
8063
8064 if (Reg == AArch64::FP || Reg == AArch64::LR ||
8065 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
8066 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8067 return Error(L, "invalid save_any_reg offset");
8068 unsigned EncodedReg;
8069 if (Reg == AArch64::FP)
8070 EncodedReg = 29;
8071 else if (Reg == AArch64::LR)
8072 EncodedReg = 30;
8073 else
8074 EncodedReg = Reg - AArch64::X0;
8075 if (Paired) {
8076 if (Reg == AArch64::LR)
8077 return Error(Start, "lr cannot be paired with another register");
8078 if (Writeback)
8079 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
8080 else
8081 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
8082 } else {
8083 if (Writeback)
8084 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
8085 else
8086 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
8087 }
8088 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
8089 unsigned EncodedReg = Reg - AArch64::D0;
8090 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
8091 return Error(L, "invalid save_any_reg offset");
8092 if (Paired) {
8093 if (Reg == AArch64::D31)
8094 return Error(Start, "d31 cannot be paired with another register");
8095 if (Writeback)
8096 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
8097 else
8098 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
8099 } else {
8100 if (Writeback)
8101 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
8102 else
8103 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
8104 }
8105 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
8106 unsigned EncodedReg = Reg - AArch64::Q0;
8107 if (Offset < 0 || Offset % 16)
8108 return Error(L, "invalid save_any_reg offset");
8109 if (Paired) {
8110 if (Reg == AArch64::Q31)
8111 return Error(Start, "q31 cannot be paired with another register");
8112 if (Writeback)
8113 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
8114 else
8115 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
8116 } else {
8117 if (Writeback)
8118 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
8119 else
8120 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
8121 }
8122 } else {
8123 return Error(Start, "save_any_reg register must be x, q or d register");
8124 }
8125 return false;
8126}
8127
8128/// parseDirectiveAllocZ
8129/// ::= .seh_allocz
8130bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8131 int64_t Offset;
8132 if (parseImmExpr(Offset))
8133 return true;
8134 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
8135 return false;
8136}
8137
8138/// parseDirectiveSEHSaveZReg
8139/// ::= .seh_save_zreg
8140bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8141 MCRegister RegNum;
8142 StringRef Kind;
8143 int64_t Offset;
8144 ParseStatus Res =
8145 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8146 if (!Res.isSuccess())
8147 return true;
8148 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8149 "expected register in range z8 to z23"))
8150 return true;
8151 if (parseComma() || parseImmExpr(Offset))
8152 return true;
8153 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
8154 return false;
8155}
8156
8157/// parseDirectiveSEHSavePReg
8158/// ::= .seh_save_preg
8159bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8160 MCRegister RegNum;
8161 StringRef Kind;
8162 int64_t Offset;
8163 ParseStatus Res =
8164 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8165 if (!Res.isSuccess())
8166 return true;
8167 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8168 "expected register in range p4 to p15"))
8169 return true;
8170 if (parseComma() || parseImmExpr(Offset))
8171 return true;
8172 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
8173 return false;
8174}
8175
8176bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8177 // Handle parsing of .aeabi_subsection directives
8178 // - On first declaration of a subsection, expect exactly three identifiers
8179 // after `.aeabi_subsection`: the subsection name and two parameters.
8180 // - When switching to an existing subsection, it is valid to provide only
8181 // the subsection name, or the name together with the two parameters.
8182 MCAsmParser &Parser = getParser();
8183
8184 // Consume the name (subsection name)
8185 StringRef SubsectionName;
8186 AArch64BuildAttributes::VendorID SubsectionNameID;
8187 if (Parser.getTok().is(AsmToken::Identifier)) {
8188 SubsectionName = Parser.getTok().getIdentifier();
8189 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
8190 } else {
8191 Error(Parser.getTok().getLoc(), "subsection name not found");
8192 return true;
8193 }
8194 Parser.Lex();
8195
8196 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8197 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8198 // Check whether only the subsection name was provided.
8199 // If so, the user is trying to switch to a subsection that should have been
8200 // declared before.
8202 if (SubsectionExists) {
8203 getTargetStreamer().emitAttributesSubsection(
8204 SubsectionName,
8206 SubsectionExists->IsOptional),
8208 SubsectionExists->ParameterType));
8209 return false;
8210 }
8211 // If subsection does not exists, report error.
8212 else {
8213 Error(Parser.getTok().getLoc(),
8214 "Could not switch to subsection '" + SubsectionName +
8215 "' using subsection name, subsection has not been defined");
8216 return true;
8217 }
8218 }
8219
8220 // Otherwise, expecting 2 more parameters: consume a comma
8221 // parseComma() return *false* on success, and call Lex(), no need to call
8222 // Lex() again.
8223 if (Parser.parseComma()) {
8224 return true;
8225 }
8226
8227 // Consume the first parameter (optionality parameter)
8229 // options: optional/required
8230 if (Parser.getTok().is(AsmToken::Identifier)) {
8231 StringRef Optionality = Parser.getTok().getIdentifier();
8232 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
8234 Error(Parser.getTok().getLoc(),
8236 return true;
8237 }
8238 if (SubsectionExists) {
8239 if (IsOptional != SubsectionExists->IsOptional) {
8240 Error(Parser.getTok().getLoc(),
8241 "optionality mismatch! subsection '" + SubsectionName +
8242 "' already exists with optionality defined as '" +
8244 SubsectionExists->IsOptional) +
8245 "' and not '" +
8246 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
8247 return true;
8248 }
8249 }
8250 } else {
8251 Error(Parser.getTok().getLoc(),
8252 "optionality parameter not found, expected required|optional");
8253 return true;
8254 }
8255 // Check for possible IsOptional unaccepted values for known subsections
8256 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
8257 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
8258 Error(Parser.getTok().getLoc(),
8259 "aeabi_feature_and_bits must be marked as optional");
8260 return true;
8261 }
8262 }
8263 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8264 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8265 Error(Parser.getTok().getLoc(),
8266 "aeabi_pauthabi must be marked as required");
8267 return true;
8268 }
8269 }
8270 Parser.Lex();
8271 // consume a comma
8272 if (Parser.parseComma()) {
8273 return true;
8274 }
8275
8276 // Consume the second parameter (type parameter)
8278 if (Parser.getTok().is(AsmToken::Identifier)) {
8279 StringRef Name = Parser.getTok().getIdentifier();
8282 Error(Parser.getTok().getLoc(),
8284 return true;
8285 }
8286 if (SubsectionExists) {
8287 if (Type != SubsectionExists->ParameterType) {
8288 Error(Parser.getTok().getLoc(),
8289 "type mismatch! subsection '" + SubsectionName +
8290 "' already exists with type defined as '" +
8292 SubsectionExists->ParameterType) +
8293 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8294 "'");
8295 return true;
8296 }
8297 }
8298 } else {
8299 Error(Parser.getTok().getLoc(),
8300 "type parameter not found, expected uleb128|ntbs");
8301 return true;
8302 }
8303 // Check for possible unaccepted 'type' values for known subsections
8304 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8305 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8307 Error(Parser.getTok().getLoc(),
8308 SubsectionName + " must be marked as ULEB128");
8309 return true;
8310 }
8311 }
8312 Parser.Lex();
8313
8314 // Parsing finished, check for trailing tokens.
8316 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8317 "attributes subsection header directive");
8318 return true;
8319 }
8320
8321 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8322
8323 return false;
8324}
8325
8326bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8327 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8328 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8329 // separated by a comma.
8330 MCAsmParser &Parser = getParser();
8331
8332 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8333 getTargetStreamer().getActiveAttributesSubsection();
8334 if (nullptr == ActiveSubsection) {
8335 Error(Parser.getTok().getLoc(),
8336 "no active subsection, build attribute can not be added");
8337 return true;
8338 }
8339 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8340 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8341
8342 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8344 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8345 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8348 ActiveSubsectionName)
8350
8351 StringRef TagStr = "";
8352 unsigned Tag;
8353 if (Parser.getTok().is(AsmToken::Integer)) {
8354 Tag = getTok().getIntVal();
8355 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8356 TagStr = Parser.getTok().getIdentifier();
8357 switch (ActiveSubsectionID) {
8359 // Tag was provided as an unrecognized string instead of an unsigned
8360 // integer
8361 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8362 "' \nExcept for public subsections, "
8363 "tags have to be an unsigned int.");
8364 return true;
8365 break;
8369 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8370 TagStr + "' for subsection '" +
8371 ActiveSubsectionName + "'");
8372 return true;
8373 }
8374 break;
8378 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8379 TagStr + "' for subsection '" +
8380 ActiveSubsectionName + "'");
8381 return true;
8382 }
8383 break;
8384 }
8385 } else {
8386 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8387 return true;
8388 }
8389 Parser.Lex();
8390 // consume a comma
8391 // parseComma() return *false* on success, and call Lex(), no need to call
8392 // Lex() again.
8393 if (Parser.parseComma()) {
8394 return true;
8395 }
8396
8397 // Consume the second parameter (attribute value)
8398 unsigned ValueInt = unsigned(-1);
8399 std::string ValueStr = "";
8400 if (Parser.getTok().is(AsmToken::Integer)) {
8401 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8402 Error(
8403 Parser.getTok().getLoc(),
8404 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8405 return true;
8406 }
8407 ValueInt = getTok().getIntVal();
8408 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8409 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8410 Error(
8411 Parser.getTok().getLoc(),
8412 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8413 return true;
8414 }
8415 ValueStr = Parser.getTok().getIdentifier();
8416 } else if (Parser.getTok().is(AsmToken::String)) {
8417 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8418 Error(
8419 Parser.getTok().getLoc(),
8420 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8421 return true;
8422 }
8423 ValueStr = Parser.getTok().getString();
8424 } else {
8425 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8426 return true;
8427 }
8428 // Check for possible unaccepted values for known tags
8429 // (AEABI_FEATURE_AND_BITS)
8430 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8431 if (0 != ValueInt && 1 != ValueInt) {
8432 Error(Parser.getTok().getLoc(),
8433 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8434 "' options are 0|1");
8435 return true;
8436 }
8437 }
8438 Parser.Lex();
8439
8440 // Parsing finished. Check for trailing tokens.
8442 Error(Parser.getTok().getLoc(),
8443 "unexpected token for AArch64 build attributes tag and value "
8444 "attribute directive");
8445 return true;
8446 }
8447
8448 if (unsigned(-1) != ValueInt) {
8449 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8450 }
8451 if ("" != ValueStr) {
8452 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8453 ValueStr);
8454 }
8455 return false;
8456}
8457
8458bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8459 SMLoc EndLoc;
8460
8461 if (getParser().parseExpression(Res))
8462 return true;
8463 MCAsmParser &Parser = getParser();
8464 if (!parseOptionalToken(AsmToken::At))
8465 return false;
8466 if (getLexer().getKind() != AsmToken::Identifier)
8467 return Error(getLoc(), "expected relocation specifier");
8468
8469 std::string Identifier = Parser.getTok().getIdentifier().lower();
8470 SMLoc Loc = getLoc();
8471 Lex();
8472 if (Identifier == "auth")
8473 return parseAuthExpr(Res, EndLoc);
8474
8475 auto Spec = AArch64::S_None;
8476 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8477 if (Identifier == "got")
8478 Spec = AArch64::S_MACHO_GOT;
8479 } else {
8480 // Unofficial, experimental syntax that will be changed.
8481 if (Identifier == "gotpcrel")
8482 Spec = AArch64::S_GOTPCREL;
8483 else if (Identifier == "plt")
8484 Spec = AArch64::S_PLT;
8485 else if (Identifier == "funcinit")
8486 Spec = AArch64::S_FUNCINIT;
8487 }
8488 if (Spec == AArch64::S_None)
8489 return Error(Loc, "invalid relocation specifier");
8490 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8491 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8492 SRE->getLoc());
8493 else
8494 return Error(Loc, "@ specifier only allowed after a symbol");
8495
8496 for (;;) {
8497 std::optional<MCBinaryExpr::Opcode> Opcode;
8498 if (parseOptionalToken(AsmToken::Plus))
8499 Opcode = MCBinaryExpr::Add;
8500 else if (parseOptionalToken(AsmToken::Minus))
8501 Opcode = MCBinaryExpr::Sub;
8502 else
8503 break;
8504 const MCExpr *Term;
8505 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8506 return true;
8507 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8508 }
8509 return false;
8510}
8511
8512/// parseAuthExpr
8513/// ::= _sym@AUTH(ib,123[,addr])
8514/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8515/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8516bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8517 MCAsmParser &Parser = getParser();
8518 MCContext &Ctx = getContext();
8519 AsmToken Tok = Parser.getTok();
8520
8521 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8522 if (parseToken(AsmToken::LParen, "expected '('"))
8523 return true;
8524
8525 if (Parser.getTok().isNot(AsmToken::Identifier))
8526 return TokError("expected key name");
8527
8528 StringRef KeyStr = Parser.getTok().getIdentifier();
8529 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8530 if (!KeyIDOrNone)
8531 return TokError("invalid key '" + KeyStr + "'");
8532 Parser.Lex();
8533
8534 if (parseToken(AsmToken::Comma, "expected ','"))
8535 return true;
8536
8537 if (Parser.getTok().isNot(AsmToken::Integer))
8538 return TokError("expected integer discriminator");
8539 int64_t Discriminator = Parser.getTok().getIntVal();
8540
8541 if (!isUInt<16>(Discriminator))
8542 return TokError("integer discriminator " + Twine(Discriminator) +
8543 " out of range [0, 0xFFFF]");
8544 Parser.Lex();
8545
8546 bool UseAddressDiversity = false;
8547 if (Parser.getTok().is(AsmToken::Comma)) {
8548 Parser.Lex();
8549 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8550 Parser.getTok().getIdentifier() != "addr")
8551 return TokError("expected 'addr'");
8552 UseAddressDiversity = true;
8553 Parser.Lex();
8554 }
8555
8556 EndLoc = Parser.getTok().getEndLoc();
8557 if (parseToken(AsmToken::RParen, "expected ')'"))
8558 return true;
8559
8560 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8561 UseAddressDiversity, Ctx, Res->getLoc());
8562 return false;
8563}
8564
8565bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8566 AArch64::Specifier &ELFSpec,
8567 AArch64::Specifier &DarwinSpec,
8568 int64_t &Addend) {
8569 ELFSpec = AArch64::S_INVALID;
8570 DarwinSpec = AArch64::S_None;
8571 Addend = 0;
8572
8573 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8574 ELFSpec = AE->getSpecifier();
8575 Expr = AE->getSubExpr();
8576 }
8577
8578 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8579 if (SE) {
8580 // It's a simple symbol reference with no addend.
8581 DarwinSpec = AArch64::Specifier(SE->getKind());
8582 return true;
8583 }
8584
8585 // Check that it looks like a symbol + an addend
8586 MCValue Res;
8587 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8588 if (!Relocatable || Res.getSubSym())
8589 return false;
8590
8591 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8592 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8593 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8594 return false;
8595
8596 if (Res.getAddSym())
8597 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8598 Addend = Res.getConstant();
8599
8600 // It's some symbol reference + a constant addend, but really
8601 // shouldn't use both Darwin and ELF syntax.
8602 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8603}
8604
8605/// Force static initialization.
8606extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8614
8615#define GET_REGISTER_MATCHER
8616#define GET_SUBTARGET_FEATURE_NAME
8617#define GET_MATCHER_IMPLEMENTATION
8618#define GET_MNEMONIC_SPELL_CHECKER
8619#include "AArch64GenAsmMatcher.inc"
8620
8621// Define this matcher function after the auto-generated include so we
8622// have the match class enum definitions.
8623unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8624 unsigned Kind) {
8625 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8626
8627 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8628 if (!Op.isImm())
8629 return Match_InvalidOperand;
8630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8631 if (!CE)
8632 return Match_InvalidOperand;
8633 if (CE->getValue() == ExpectedVal)
8634 return Match_Success;
8635 return Match_InvalidOperand;
8636 };
8637
8638 switch (Kind) {
8639 default:
8640 return Match_InvalidOperand;
8641 case MCK_MPR:
8642 // If the Kind is a token for the MPR register class which has the "za"
8643 // register (SME accumulator array), check if the asm is a literal "za"
8644 // token. This is for the "smstart za" alias that defines the register
8645 // as a literal token.
8646 if (Op.isTokenEqual("za"))
8647 return Match_Success;
8648 return Match_InvalidOperand;
8649
8650 // If the kind is a token for a literal immediate, check if our asm operand
8651 // matches. This is for InstAliases which have a fixed-value immediate in
8652 // the asm string, such as hints which are parsed into a specific
8653 // instruction definition.
8654#define MATCH_HASH(N) \
8655 case MCK__HASH_##N: \
8656 return MatchesOpImmediate(N);
8657 MATCH_HASH(0)
8658 MATCH_HASH(1)
8659 MATCH_HASH(2)
8660 MATCH_HASH(3)
8661 MATCH_HASH(4)
8662 MATCH_HASH(6)
8663 MATCH_HASH(7)
8664 MATCH_HASH(8)
8665 MATCH_HASH(10)
8666 MATCH_HASH(12)
8667 MATCH_HASH(14)
8668 MATCH_HASH(16)
8669 MATCH_HASH(24)
8670 MATCH_HASH(25)
8671 MATCH_HASH(26)
8672 MATCH_HASH(27)
8673 MATCH_HASH(28)
8674 MATCH_HASH(29)
8675 MATCH_HASH(30)
8676 MATCH_HASH(31)
8677 MATCH_HASH(32)
8678 MATCH_HASH(40)
8679 MATCH_HASH(48)
8680 MATCH_HASH(64)
8681#undef MATCH_HASH
8682#define MATCH_HASH_MINUS(N) \
8683 case MCK__HASH__MINUS_##N: \
8684 return MatchesOpImmediate(-N);
8688#undef MATCH_HASH_MINUS
8689 }
8690}
8691
8692ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8693
8694 SMLoc S = getLoc();
8695
8696 if (getTok().isNot(AsmToken::Identifier))
8697 return Error(S, "expected register");
8698
8699 MCRegister FirstReg;
8700 ParseStatus Res = tryParseScalarRegister(FirstReg);
8701 if (!Res.isSuccess())
8702 return Error(S, "expected first even register of a consecutive same-size "
8703 "even/odd register pair");
8704
8705 const MCRegisterClass &WRegClass =
8706 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8707 const MCRegisterClass &XRegClass =
8708 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8709
8710 bool isXReg = XRegClass.contains(FirstReg),
8711 isWReg = WRegClass.contains(FirstReg);
8712 if (!isXReg && !isWReg)
8713 return Error(S, "expected first even register of a consecutive same-size "
8714 "even/odd register pair");
8715
8716 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8717 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8718
8719 if (FirstEncoding & 0x1)
8720 return Error(S, "expected first even register of a consecutive same-size "
8721 "even/odd register pair");
8722
8723 if (getTok().isNot(AsmToken::Comma))
8724 return Error(getLoc(), "expected comma");
8725 // Eat the comma
8726 Lex();
8727
8728 SMLoc E = getLoc();
8729 MCRegister SecondReg;
8730 Res = tryParseScalarRegister(SecondReg);
8731 if (!Res.isSuccess())
8732 return Error(E, "expected second odd register of a consecutive same-size "
8733 "even/odd register pair");
8734
8735 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8736 (isXReg && !XRegClass.contains(SecondReg)) ||
8737 (isWReg && !WRegClass.contains(SecondReg)))
8738 return Error(E, "expected second odd register of a consecutive same-size "
8739 "even/odd register pair");
8740
8741 MCRegister Pair;
8742 if (isXReg) {
8743 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8744 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8745 } else {
8746 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8747 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8748 }
8749
8750 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8751 getLoc(), getContext()));
8752
8753 return ParseStatus::Success;
8754}
8755
8756template <bool ParseShiftExtend, bool ParseSuffix>
8757ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8758 const SMLoc S = getLoc();
8759 // Check for a SVE vector register specifier first.
8760 MCRegister RegNum;
8761 StringRef Kind;
8762
8763 ParseStatus Res =
8764 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8765
8766 if (!Res.isSuccess())
8767 return Res;
8768
8769 if (ParseSuffix && Kind.empty())
8770 return ParseStatus::NoMatch;
8771
8772 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8773 if (!KindRes)
8774 return ParseStatus::NoMatch;
8775
8776 unsigned ElementWidth = KindRes->second;
8777
8778 // No shift/extend is the default.
8779 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8780 Operands.push_back(AArch64Operand::CreateVectorReg(
8781 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8782
8783 ParseStatus Res = tryParseVectorIndex(Operands);
8784 if (Res.isFailure())
8785 return ParseStatus::Failure;
8786 return ParseStatus::Success;
8787 }
8788
8789 // Eat the comma
8790 Lex();
8791
8792 // Match the shift
8794 Res = tryParseOptionalShiftExtend(ExtOpnd);
8795 if (!Res.isSuccess())
8796 return Res;
8797
8798 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8799 Operands.push_back(AArch64Operand::CreateVectorReg(
8800 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8801 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8802 Ext->hasShiftExtendAmount()));
8803
8804 return ParseStatus::Success;
8805}
8806
8807ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8808 MCAsmParser &Parser = getParser();
8809
8810 SMLoc SS = getLoc();
8811 const AsmToken &TokE = getTok();
8812 bool IsHash = TokE.is(AsmToken::Hash);
8813
8814 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8815 return ParseStatus::NoMatch;
8816
8817 int64_t Pattern;
8818 if (IsHash) {
8819 Lex(); // Eat hash
8820
8821 // Parse the immediate operand.
8822 const MCExpr *ImmVal;
8823 SS = getLoc();
8824 if (Parser.parseExpression(ImmVal))
8825 return ParseStatus::Failure;
8826
8827 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8828 if (!MCE)
8829 return TokError("invalid operand for instruction");
8830
8831 Pattern = MCE->getValue();
8832 } else {
8833 // Parse the pattern
8834 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8835 if (!Pat)
8836 return ParseStatus::NoMatch;
8837
8838 Lex();
8839 Pattern = Pat->Encoding;
8840 assert(Pattern >= 0 && Pattern < 32);
8841 }
8842
8843 Operands.push_back(
8844 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8845 SS, getLoc(), getContext()));
8846
8847 return ParseStatus::Success;
8848}
8849
8850ParseStatus
8851AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8852 int64_t Pattern;
8853 SMLoc SS = getLoc();
8854 const AsmToken &TokE = getTok();
8855 // Parse the pattern
8856 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8857 TokE.getString());
8858 if (!Pat)
8859 return ParseStatus::NoMatch;
8860
8861 Lex();
8862 Pattern = Pat->Encoding;
8863 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8864
8865 Operands.push_back(
8866 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8867 SS, getLoc(), getContext()));
8868
8869 return ParseStatus::Success;
8870}
8871
8872ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8873 SMLoc SS = getLoc();
8874
8875 MCRegister XReg;
8876 if (!tryParseScalarRegister(XReg).isSuccess())
8877 return ParseStatus::NoMatch;
8878
8879 MCContext &ctx = getContext();
8880 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8881 MCRegister X8Reg = RI->getMatchingSuperReg(
8882 XReg, AArch64::x8sub_0,
8883 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8884 if (!X8Reg)
8885 return Error(SS,
8886 "expected an even-numbered x-register in the range [x0,x22]");
8887
8888 Operands.push_back(
8889 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8890 return ParseStatus::Success;
8891}
8892
8893ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8894 SMLoc S = getLoc();
8895
8896 if (getTok().isNot(AsmToken::Integer))
8897 return ParseStatus::NoMatch;
8898
8899 if (getLexer().peekTok().isNot(AsmToken::Colon))
8900 return ParseStatus::NoMatch;
8901
8902 const MCExpr *ImmF;
8903 if (getParser().parseExpression(ImmF))
8904 return ParseStatus::NoMatch;
8905
8906 if (getTok().isNot(AsmToken::Colon))
8907 return ParseStatus::NoMatch;
8908
8909 Lex(); // Eat ':'
8910 if (getTok().isNot(AsmToken::Integer))
8911 return ParseStatus::NoMatch;
8912
8913 SMLoc E = getTok().getLoc();
8914 const MCExpr *ImmL;
8915 if (getParser().parseExpression(ImmL))
8916 return ParseStatus::NoMatch;
8917
8918 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8919 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8920
8921 Operands.push_back(
8922 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8923 return ParseStatus::Success;
8924}
8925
8926template <int Adj>
8927ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8928 SMLoc S = getLoc();
8929
8930 parseOptionalToken(AsmToken::Hash);
8931 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8932
8933 if (getTok().isNot(AsmToken::Integer))
8934 return ParseStatus::NoMatch;
8935
8936 const MCExpr *Ex;
8937 if (getParser().parseExpression(Ex))
8938 return ParseStatus::NoMatch;
8939
8940 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8941 if (IsNegative)
8942 Imm = -Imm;
8943
8944 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8945 // return a value, which is certain to trigger a error message about invalid
8946 // immediate range instead of a non-descriptive invalid operand error.
8947 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8948 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8949 Imm = -2;
8950 else
8951 Imm += Adj;
8952
8953 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8954 Operands.push_back(AArch64Operand::CreateImm(
8956
8957 return ParseStatus::Success;
8958}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
Value * getPointer(Value *Ptr)
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1335
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:31
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:33
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
constexpr unsigned id() const
Definition MCRegister.h:82
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Definition Register.h:100
Represents a location in source code.
Definition SMLoc.h:22
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:35
constexpr const char * getPointer() const
Definition SMLoc.h:33
void insert_range(Range &&R)
Definition SmallSet.h:195
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
void erase(iterator I)
Definition StringMap.h:427
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:321
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:611
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:591
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:816
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:172
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:798
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired