LLVM 19.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 unsigned getDstReg() const { return Dst; }
140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
237 bool MatchingInlineAsm) override;
238/// @name Auto-generated Match Functions
239/// {
240
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
279 template <RegKind VectorKind>
280 ParseStatus tryParseVectorList(OperandVector &Operands,
281 bool ExpectMatch = false);
282 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
283 ParseStatus tryParseSVEPattern(OperandVector &Operands);
284 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
285 ParseStatus tryParseGPR64x8(OperandVector &Operands);
286 ParseStatus tryParseImmRange(OperandVector &Operands);
287
288public:
289 enum AArch64MatchResultTy {
290 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
291#define GET_OPERAND_DIAGNOSTIC_TYPES
292#include "AArch64GenAsmMatcher.inc"
293 };
294 bool IsILP32;
295
296 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
297 const MCInstrInfo &MII, const MCTargetOptions &Options)
298 : MCTargetAsmParser(Options, STI, MII) {
302 if (S.getTargetStreamer() == nullptr)
304
305 // Alias .hword/.word/.[dx]word to the target-independent
306 // .2byte/.4byte/.8byte directives as they have the same form and
307 // semantics:
308 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
309 Parser.addAliasForDirective(".hword", ".2byte");
310 Parser.addAliasForDirective(".word", ".4byte");
311 Parser.addAliasForDirective(".dword", ".8byte");
312 Parser.addAliasForDirective(".xword", ".8byte");
313
314 // Initialize the set of available features.
315 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
316 }
317
318 bool areEqualRegs(const MCParsedAsmOperand &Op1,
319 const MCParsedAsmOperand &Op2) const override;
321 SMLoc NameLoc, OperandVector &Operands) override;
322 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
324 SMLoc &EndLoc) override;
325 bool ParseDirective(AsmToken DirectiveID) override;
327 unsigned Kind) override;
328
329 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
330
331 static bool classifySymbolRef(const MCExpr *Expr,
332 AArch64MCExpr::VariantKind &ELFRefKind,
333 MCSymbolRefExpr::VariantKind &DarwinRefKind,
334 int64_t &Addend);
335};
336
337/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
338/// instruction.
339class AArch64Operand : public MCParsedAsmOperand {
340private:
341 enum KindTy {
342 k_Immediate,
343 k_ShiftedImm,
344 k_ImmRange,
345 k_CondCode,
346 k_Register,
347 k_MatrixRegister,
348 k_MatrixTileList,
349 k_SVCR,
350 k_VectorList,
351 k_VectorIndex,
352 k_Token,
353 k_SysReg,
354 k_SysCR,
355 k_Prefetch,
356 k_ShiftExtend,
357 k_FPImm,
358 k_Barrier,
359 k_PSBHint,
360 k_BTIHint,
361 } Kind;
362
363 SMLoc StartLoc, EndLoc;
364
365 struct TokOp {
366 const char *Data;
367 unsigned Length;
368 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
369 };
370
371 // Separate shift/extend operand.
372 struct ShiftExtendOp {
374 unsigned Amount;
375 bool HasExplicitAmount;
376 };
377
378 struct RegOp {
379 unsigned RegNum;
380 RegKind Kind;
381 int ElementWidth;
382
383 // The register may be allowed as a different register class,
384 // e.g. for GPR64as32 or GPR32as64.
385 RegConstraintEqualityTy EqualityTy;
386
387 // In some cases the shift/extend needs to be explicitly parsed together
388 // with the register, rather than as a separate operand. This is needed
389 // for addressing modes where the instruction as a whole dictates the
390 // scaling/extend, rather than specific bits in the instruction.
391 // By parsing them as a single operand, we avoid the need to pass an
392 // extra operand in all CodeGen patterns (because all operands need to
393 // have an associated value), and we avoid the need to update TableGen to
394 // accept operands that have no associated bits in the instruction.
395 //
396 // An added benefit of parsing them together is that the assembler
397 // can give a sensible diagnostic if the scaling is not correct.
398 //
399 // The default is 'lsl #0' (HasExplicitAmount = false) if no
400 // ShiftExtend is specified.
401 ShiftExtendOp ShiftExtend;
402 };
403
404 struct MatrixRegOp {
405 unsigned RegNum;
406 unsigned ElementWidth;
407 MatrixKind Kind;
408 };
409
410 struct MatrixTileListOp {
411 unsigned RegMask = 0;
412 };
413
414 struct VectorListOp {
415 unsigned RegNum;
416 unsigned Count;
417 unsigned Stride;
418 unsigned NumElements;
419 unsigned ElementWidth;
420 RegKind RegisterKind;
421 };
422
423 struct VectorIndexOp {
424 int Val;
425 };
426
427 struct ImmOp {
428 const MCExpr *Val;
429 };
430
431 struct ShiftedImmOp {
432 const MCExpr *Val;
433 unsigned ShiftAmount;
434 };
435
436 struct ImmRangeOp {
437 unsigned First;
438 unsigned Last;
439 };
440
441 struct CondCodeOp {
443 };
444
445 struct FPImmOp {
446 uint64_t Val; // APFloat value bitcasted to uint64_t.
447 bool IsExact; // describes whether parsed value was exact.
448 };
449
450 struct BarrierOp {
451 const char *Data;
452 unsigned Length;
453 unsigned Val; // Not the enum since not all values have names.
454 bool HasnXSModifier;
455 };
456
457 struct SysRegOp {
458 const char *Data;
459 unsigned Length;
460 uint32_t MRSReg;
461 uint32_t MSRReg;
462 uint32_t PStateField;
463 };
464
465 struct SysCRImmOp {
466 unsigned Val;
467 };
468
469 struct PrefetchOp {
470 const char *Data;
471 unsigned Length;
472 unsigned Val;
473 };
474
475 struct PSBHintOp {
476 const char *Data;
477 unsigned Length;
478 unsigned Val;
479 };
480
481 struct BTIHintOp {
482 const char *Data;
483 unsigned Length;
484 unsigned Val;
485 };
486
487 struct SVCROp {
488 const char *Data;
489 unsigned Length;
490 unsigned PStateField;
491 };
492
493 union {
494 struct TokOp Tok;
495 struct RegOp Reg;
496 struct MatrixRegOp MatrixReg;
497 struct MatrixTileListOp MatrixTileList;
498 struct VectorListOp VectorList;
499 struct VectorIndexOp VectorIndex;
500 struct ImmOp Imm;
501 struct ShiftedImmOp ShiftedImm;
502 struct ImmRangeOp ImmRange;
503 struct CondCodeOp CondCode;
504 struct FPImmOp FPImm;
505 struct BarrierOp Barrier;
506 struct SysRegOp SysReg;
507 struct SysCRImmOp SysCRImm;
508 struct PrefetchOp Prefetch;
509 struct PSBHintOp PSBHint;
510 struct BTIHintOp BTIHint;
511 struct ShiftExtendOp ShiftExtend;
512 struct SVCROp SVCR;
513 };
514
515 // Keep the MCContext around as the MCExprs may need manipulated during
516 // the add<>Operands() calls.
517 MCContext &Ctx;
518
519public:
520 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
521
522 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
523 Kind = o.Kind;
524 StartLoc = o.StartLoc;
525 EndLoc = o.EndLoc;
526 switch (Kind) {
527 case k_Token:
528 Tok = o.Tok;
529 break;
530 case k_Immediate:
531 Imm = o.Imm;
532 break;
533 case k_ShiftedImm:
534 ShiftedImm = o.ShiftedImm;
535 break;
536 case k_ImmRange:
537 ImmRange = o.ImmRange;
538 break;
539 case k_CondCode:
540 CondCode = o.CondCode;
541 break;
542 case k_FPImm:
543 FPImm = o.FPImm;
544 break;
545 case k_Barrier:
546 Barrier = o.Barrier;
547 break;
548 case k_Register:
549 Reg = o.Reg;
550 break;
551 case k_MatrixRegister:
552 MatrixReg = o.MatrixReg;
553 break;
554 case k_MatrixTileList:
555 MatrixTileList = o.MatrixTileList;
556 break;
557 case k_VectorList:
558 VectorList = o.VectorList;
559 break;
560 case k_VectorIndex:
561 VectorIndex = o.VectorIndex;
562 break;
563 case k_SysReg:
564 SysReg = o.SysReg;
565 break;
566 case k_SysCR:
567 SysCRImm = o.SysCRImm;
568 break;
569 case k_Prefetch:
570 Prefetch = o.Prefetch;
571 break;
572 case k_PSBHint:
573 PSBHint = o.PSBHint;
574 break;
575 case k_BTIHint:
576 BTIHint = o.BTIHint;
577 break;
578 case k_ShiftExtend:
579 ShiftExtend = o.ShiftExtend;
580 break;
581 case k_SVCR:
582 SVCR = o.SVCR;
583 break;
584 }
585 }
586
587 /// getStartLoc - Get the location of the first token of this operand.
588 SMLoc getStartLoc() const override { return StartLoc; }
589 /// getEndLoc - Get the location of the last token of this operand.
590 SMLoc getEndLoc() const override { return EndLoc; }
591
592 StringRef getToken() const {
593 assert(Kind == k_Token && "Invalid access!");
594 return StringRef(Tok.Data, Tok.Length);
595 }
596
597 bool isTokenSuffix() const {
598 assert(Kind == k_Token && "Invalid access!");
599 return Tok.IsSuffix;
600 }
601
602 const MCExpr *getImm() const {
603 assert(Kind == k_Immediate && "Invalid access!");
604 return Imm.Val;
605 }
606
607 const MCExpr *getShiftedImmVal() const {
608 assert(Kind == k_ShiftedImm && "Invalid access!");
609 return ShiftedImm.Val;
610 }
611
612 unsigned getShiftedImmShift() const {
613 assert(Kind == k_ShiftedImm && "Invalid access!");
614 return ShiftedImm.ShiftAmount;
615 }
616
617 unsigned getFirstImmVal() const {
618 assert(Kind == k_ImmRange && "Invalid access!");
619 return ImmRange.First;
620 }
621
622 unsigned getLastImmVal() const {
623 assert(Kind == k_ImmRange && "Invalid access!");
624 return ImmRange.Last;
625 }
626
628 assert(Kind == k_CondCode && "Invalid access!");
629 return CondCode.Code;
630 }
631
632 APFloat getFPImm() const {
633 assert (Kind == k_FPImm && "Invalid access!");
634 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
635 }
636
637 bool getFPImmIsExact() const {
638 assert (Kind == k_FPImm && "Invalid access!");
639 return FPImm.IsExact;
640 }
641
642 unsigned getBarrier() const {
643 assert(Kind == k_Barrier && "Invalid access!");
644 return Barrier.Val;
645 }
646
647 StringRef getBarrierName() const {
648 assert(Kind == k_Barrier && "Invalid access!");
649 return StringRef(Barrier.Data, Barrier.Length);
650 }
651
652 bool getBarriernXSModifier() const {
653 assert(Kind == k_Barrier && "Invalid access!");
654 return Barrier.HasnXSModifier;
655 }
656
657 unsigned getReg() const override {
658 assert(Kind == k_Register && "Invalid access!");
659 return Reg.RegNum;
660 }
661
662 unsigned getMatrixReg() const {
663 assert(Kind == k_MatrixRegister && "Invalid access!");
664 return MatrixReg.RegNum;
665 }
666
667 unsigned getMatrixElementWidth() const {
668 assert(Kind == k_MatrixRegister && "Invalid access!");
669 return MatrixReg.ElementWidth;
670 }
671
672 MatrixKind getMatrixKind() const {
673 assert(Kind == k_MatrixRegister && "Invalid access!");
674 return MatrixReg.Kind;
675 }
676
677 unsigned getMatrixTileListRegMask() const {
678 assert(isMatrixTileList() && "Invalid access!");
679 return MatrixTileList.RegMask;
680 }
681
682 RegConstraintEqualityTy getRegEqualityTy() const {
683 assert(Kind == k_Register && "Invalid access!");
684 return Reg.EqualityTy;
685 }
686
687 unsigned getVectorListStart() const {
688 assert(Kind == k_VectorList && "Invalid access!");
689 return VectorList.RegNum;
690 }
691
692 unsigned getVectorListCount() const {
693 assert(Kind == k_VectorList && "Invalid access!");
694 return VectorList.Count;
695 }
696
697 unsigned getVectorListStride() const {
698 assert(Kind == k_VectorList && "Invalid access!");
699 return VectorList.Stride;
700 }
701
702 int getVectorIndex() const {
703 assert(Kind == k_VectorIndex && "Invalid access!");
704 return VectorIndex.Val;
705 }
706
707 StringRef getSysReg() const {
708 assert(Kind == k_SysReg && "Invalid access!");
709 return StringRef(SysReg.Data, SysReg.Length);
710 }
711
712 unsigned getSysCR() const {
713 assert(Kind == k_SysCR && "Invalid access!");
714 return SysCRImm.Val;
715 }
716
717 unsigned getPrefetch() const {
718 assert(Kind == k_Prefetch && "Invalid access!");
719 return Prefetch.Val;
720 }
721
722 unsigned getPSBHint() const {
723 assert(Kind == k_PSBHint && "Invalid access!");
724 return PSBHint.Val;
725 }
726
727 StringRef getPSBHintName() const {
728 assert(Kind == k_PSBHint && "Invalid access!");
729 return StringRef(PSBHint.Data, PSBHint.Length);
730 }
731
732 unsigned getBTIHint() const {
733 assert(Kind == k_BTIHint && "Invalid access!");
734 return BTIHint.Val;
735 }
736
737 StringRef getBTIHintName() const {
738 assert(Kind == k_BTIHint && "Invalid access!");
739 return StringRef(BTIHint.Data, BTIHint.Length);
740 }
741
742 StringRef getSVCR() const {
743 assert(Kind == k_SVCR && "Invalid access!");
744 return StringRef(SVCR.Data, SVCR.Length);
745 }
746
747 StringRef getPrefetchName() const {
748 assert(Kind == k_Prefetch && "Invalid access!");
749 return StringRef(Prefetch.Data, Prefetch.Length);
750 }
751
752 AArch64_AM::ShiftExtendType getShiftExtendType() const {
753 if (Kind == k_ShiftExtend)
754 return ShiftExtend.Type;
755 if (Kind == k_Register)
756 return Reg.ShiftExtend.Type;
757 llvm_unreachable("Invalid access!");
758 }
759
760 unsigned getShiftExtendAmount() const {
761 if (Kind == k_ShiftExtend)
762 return ShiftExtend.Amount;
763 if (Kind == k_Register)
764 return Reg.ShiftExtend.Amount;
765 llvm_unreachable("Invalid access!");
766 }
767
768 bool hasShiftExtendAmount() const {
769 if (Kind == k_ShiftExtend)
770 return ShiftExtend.HasExplicitAmount;
771 if (Kind == k_Register)
772 return Reg.ShiftExtend.HasExplicitAmount;
773 llvm_unreachable("Invalid access!");
774 }
775
776 bool isImm() const override { return Kind == k_Immediate; }
777 bool isMem() const override { return false; }
778
779 bool isUImm6() const {
780 if (!isImm())
781 return false;
782 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
783 if (!MCE)
784 return false;
785 int64_t Val = MCE->getValue();
786 return (Val >= 0 && Val < 64);
787 }
788
789 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
790
791 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
792 return isImmScaled<Bits, Scale>(true);
793 }
794
795 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
796 DiagnosticPredicate isUImmScaled() const {
797 if (IsRange && isImmRange() &&
798 (getLastImmVal() != getFirstImmVal() + Offset))
799 return DiagnosticPredicateTy::NoMatch;
800
801 return isImmScaled<Bits, Scale, IsRange>(false);
802 }
803
804 template <int Bits, int Scale, bool IsRange = false>
805 DiagnosticPredicate isImmScaled(bool Signed) const {
806 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
807 (isImmRange() && !IsRange))
808 return DiagnosticPredicateTy::NoMatch;
809
810 int64_t Val;
811 if (isImmRange())
812 Val = getFirstImmVal();
813 else {
814 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
815 if (!MCE)
816 return DiagnosticPredicateTy::NoMatch;
817 Val = MCE->getValue();
818 }
819
820 int64_t MinVal, MaxVal;
821 if (Signed) {
822 int64_t Shift = Bits - 1;
823 MinVal = (int64_t(1) << Shift) * -Scale;
824 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
825 } else {
826 MinVal = 0;
827 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
828 }
829
830 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
831 return DiagnosticPredicateTy::Match;
832
833 return DiagnosticPredicateTy::NearMatch;
834 }
835
836 DiagnosticPredicate isSVEPattern() const {
837 if (!isImm())
838 return DiagnosticPredicateTy::NoMatch;
839 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
840 if (!MCE)
841 return DiagnosticPredicateTy::NoMatch;
842 int64_t Val = MCE->getValue();
843 if (Val >= 0 && Val < 32)
844 return DiagnosticPredicateTy::Match;
845 return DiagnosticPredicateTy::NearMatch;
846 }
847
848 DiagnosticPredicate isSVEVecLenSpecifier() const {
849 if (!isImm())
850 return DiagnosticPredicateTy::NoMatch;
851 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
852 if (!MCE)
853 return DiagnosticPredicateTy::NoMatch;
854 int64_t Val = MCE->getValue();
855 if (Val >= 0 && Val <= 1)
856 return DiagnosticPredicateTy::Match;
857 return DiagnosticPredicateTy::NearMatch;
858 }
859
860 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
862 MCSymbolRefExpr::VariantKind DarwinRefKind;
863 int64_t Addend;
864 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
865 Addend)) {
866 // If we don't understand the expression, assume the best and
867 // let the fixup and relocation code deal with it.
868 return true;
869 }
870
871 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
872 ELFRefKind == AArch64MCExpr::VK_LO12 ||
873 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
874 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
875 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
876 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
879 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
880 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
881 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
882 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
883 // Note that we don't range-check the addend. It's adjusted modulo page
884 // size when converted, so there is no "out of range" condition when using
885 // @pageoff.
886 return true;
887 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
888 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
889 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
890 return Addend == 0;
891 }
892
893 return false;
894 }
895
896 template <int Scale> bool isUImm12Offset() const {
897 if (!isImm())
898 return false;
899
900 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
901 if (!MCE)
902 return isSymbolicUImm12Offset(getImm());
903
904 int64_t Val = MCE->getValue();
905 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
906 }
907
908 template <int N, int M>
909 bool isImmInRange() const {
910 if (!isImm())
911 return false;
912 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
913 if (!MCE)
914 return false;
915 int64_t Val = MCE->getValue();
916 return (Val >= N && Val <= M);
917 }
918
919 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
920 // a logical immediate can always be represented when inverted.
921 template <typename T>
922 bool isLogicalImm() const {
923 if (!isImm())
924 return false;
925 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
926 if (!MCE)
927 return false;
928
929 int64_t Val = MCE->getValue();
930 // Avoid left shift by 64 directly.
931 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
932 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
933 if ((Val & Upper) && (Val & Upper) != Upper)
934 return false;
935
936 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
937 }
938
939 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
940
941 bool isImmRange() const { return Kind == k_ImmRange; }
942
943 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
944 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
945 /// immediate that can be shifted by 'Shift'.
946 template <unsigned Width>
947 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
948 if (isShiftedImm() && Width == getShiftedImmShift())
949 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
950 return std::make_pair(CE->getValue(), Width);
951
952 if (isImm())
953 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
954 int64_t Val = CE->getValue();
955 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
956 return std::make_pair(Val >> Width, Width);
957 else
958 return std::make_pair(Val, 0u);
959 }
960
961 return {};
962 }
963
964 bool isAddSubImm() const {
965 if (!isShiftedImm() && !isImm())
966 return false;
967
968 const MCExpr *Expr;
969
970 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
971 if (isShiftedImm()) {
972 unsigned Shift = ShiftedImm.ShiftAmount;
973 Expr = ShiftedImm.Val;
974 if (Shift != 0 && Shift != 12)
975 return false;
976 } else {
977 Expr = getImm();
978 }
979
981 MCSymbolRefExpr::VariantKind DarwinRefKind;
982 int64_t Addend;
983 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
984 DarwinRefKind, Addend)) {
985 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
986 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
987 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
988 || ELFRefKind == AArch64MCExpr::VK_LO12
989 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
990 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
991 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
992 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
993 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
994 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
995 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
996 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
997 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
998 }
999
1000 // If it's a constant, it should be a real immediate in range.
1001 if (auto ShiftedVal = getShiftedVal<12>())
1002 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1003
1004 // If it's an expression, we hope for the best and let the fixup/relocation
1005 // code deal with it.
1006 return true;
1007 }
1008
1009 bool isAddSubImmNeg() const {
1010 if (!isShiftedImm() && !isImm())
1011 return false;
1012
1013 // Otherwise it should be a real negative immediate in range.
1014 if (auto ShiftedVal = getShiftedVal<12>())
1015 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1016
1017 return false;
1018 }
1019
1020 // Signed value in the range -128 to +127. For element widths of
1021 // 16 bits or higher it may also be a signed multiple of 256 in the
1022 // range -32768 to +32512.
1023 // For element-width of 8 bits a range of -128 to 255 is accepted,
1024 // since a copy of a byte can be either signed/unsigned.
1025 template <typename T>
1026 DiagnosticPredicate isSVECpyImm() const {
1027 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1028 return DiagnosticPredicateTy::NoMatch;
1029
1030 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1031 std::is_same<int8_t, T>::value;
1032 if (auto ShiftedImm = getShiftedVal<8>())
1033 if (!(IsByte && ShiftedImm->second) &&
1034 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1035 << ShiftedImm->second))
1036 return DiagnosticPredicateTy::Match;
1037
1038 return DiagnosticPredicateTy::NearMatch;
1039 }
1040
1041 // Unsigned value in the range 0 to 255. For element widths of
1042 // 16 bits or higher it may also be a signed multiple of 256 in the
1043 // range 0 to 65280.
1044 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1045 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1046 return DiagnosticPredicateTy::NoMatch;
1047
1048 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1049 std::is_same<int8_t, T>::value;
1050 if (auto ShiftedImm = getShiftedVal<8>())
1051 if (!(IsByte && ShiftedImm->second) &&
1052 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1053 << ShiftedImm->second))
1054 return DiagnosticPredicateTy::Match;
1055
1056 return DiagnosticPredicateTy::NearMatch;
1057 }
1058
1059 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1060 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1061 return DiagnosticPredicateTy::Match;
1062 return DiagnosticPredicateTy::NoMatch;
1063 }
1064
1065 bool isCondCode() const { return Kind == k_CondCode; }
1066
1067 bool isSIMDImmType10() const {
1068 if (!isImm())
1069 return false;
1070 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1071 if (!MCE)
1072 return false;
1074 }
1075
1076 template<int N>
1077 bool isBranchTarget() const {
1078 if (!isImm())
1079 return false;
1080 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1081 if (!MCE)
1082 return true;
1083 int64_t Val = MCE->getValue();
1084 if (Val & 0x3)
1085 return false;
1086 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1087 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1088 }
1089
1090 bool
1091 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1092 if (!isImm())
1093 return false;
1094
1095 AArch64MCExpr::VariantKind ELFRefKind;
1096 MCSymbolRefExpr::VariantKind DarwinRefKind;
1097 int64_t Addend;
1098 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1099 DarwinRefKind, Addend)) {
1100 return false;
1101 }
1102 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1103 return false;
1104
1105 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1106 }
1107
1108 bool isMovWSymbolG3() const {
1110 }
1111
1112 bool isMovWSymbolG2() const {
1113 return isMovWSymbol(
1118 }
1119
1120 bool isMovWSymbolG1() const {
1121 return isMovWSymbol(
1127 }
1128
1129 bool isMovWSymbolG0() const {
1130 return isMovWSymbol(
1136 }
1137
1138 template<int RegWidth, int Shift>
1139 bool isMOVZMovAlias() const {
1140 if (!isImm()) return false;
1141
1142 const MCExpr *E = getImm();
1143 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1144 uint64_t Value = CE->getValue();
1145
1146 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1147 }
1148 // Only supports the case of Shift being 0 if an expression is used as an
1149 // operand
1150 return !Shift && E;
1151 }
1152
1153 template<int RegWidth, int Shift>
1154 bool isMOVNMovAlias() const {
1155 if (!isImm()) return false;
1156
1157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1158 if (!CE) return false;
1159 uint64_t Value = CE->getValue();
1160
1161 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1162 }
1163
1164 bool isFPImm() const {
1165 return Kind == k_FPImm &&
1166 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1167 }
1168
1169 bool isBarrier() const {
1170 return Kind == k_Barrier && !getBarriernXSModifier();
1171 }
1172 bool isBarriernXS() const {
1173 return Kind == k_Barrier && getBarriernXSModifier();
1174 }
1175 bool isSysReg() const { return Kind == k_SysReg; }
1176
1177 bool isMRSSystemRegister() const {
1178 if (!isSysReg()) return false;
1179
1180 return SysReg.MRSReg != -1U;
1181 }
1182
1183 bool isMSRSystemRegister() const {
1184 if (!isSysReg()) return false;
1185 return SysReg.MSRReg != -1U;
1186 }
1187
1188 bool isSystemPStateFieldWithImm0_1() const {
1189 if (!isSysReg()) return false;
1190 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1191 }
1192
1193 bool isSystemPStateFieldWithImm0_15() const {
1194 if (!isSysReg())
1195 return false;
1196 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1197 }
1198
1199 bool isSVCR() const {
1200 if (Kind != k_SVCR)
1201 return false;
1202 return SVCR.PStateField != -1U;
1203 }
1204
1205 bool isReg() const override {
1206 return Kind == k_Register;
1207 }
1208
1209 bool isVectorList() const { return Kind == k_VectorList; }
1210
1211 bool isScalarReg() const {
1212 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1213 }
1214
1215 bool isNeonVectorReg() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1217 }
1218
1219 bool isNeonVectorRegLo() const {
1220 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1221 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1222 Reg.RegNum) ||
1223 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1224 Reg.RegNum));
1225 }
1226
1227 bool isNeonVectorReg0to7() const {
1228 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1229 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1230 Reg.RegNum));
1231 }
1232
1233 bool isMatrix() const { return Kind == k_MatrixRegister; }
1234 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1235
1236 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1237 RegKind RK;
1238 switch (Class) {
1239 case AArch64::PPRRegClassID:
1240 case AArch64::PPR_3bRegClassID:
1241 case AArch64::PPR_p8to15RegClassID:
1242 case AArch64::PNRRegClassID:
1243 case AArch64::PNR_p8to15RegClassID:
1244 RK = RegKind::SVEPredicateAsCounter;
1245 break;
1246 default:
1247 llvm_unreachable("Unsupport register class");
1248 }
1249
1250 return (Kind == k_Register && Reg.Kind == RK) &&
1251 AArch64MCRegisterClasses[Class].contains(getReg());
1252 }
1253
1254 template <unsigned Class> bool isSVEVectorReg() const {
1255 RegKind RK;
1256 switch (Class) {
1257 case AArch64::ZPRRegClassID:
1258 case AArch64::ZPR_3bRegClassID:
1259 case AArch64::ZPR_4bRegClassID:
1260 RK = RegKind::SVEDataVector;
1261 break;
1262 case AArch64::PPRRegClassID:
1263 case AArch64::PPR_3bRegClassID:
1264 case AArch64::PPR_p8to15RegClassID:
1265 case AArch64::PNRRegClassID:
1266 case AArch64::PNR_p8to15RegClassID:
1267 RK = RegKind::SVEPredicateVector;
1268 break;
1269 default:
1270 llvm_unreachable("Unsupport register class");
1271 }
1272
1273 return (Kind == k_Register && Reg.Kind == RK) &&
1274 AArch64MCRegisterClasses[Class].contains(getReg());
1275 }
1276
1277 template <unsigned Class> bool isFPRasZPR() const {
1278 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1279 AArch64MCRegisterClasses[Class].contains(getReg());
1280 }
1281
1282 template <int ElementWidth, unsigned Class>
1283 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1284 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1285 return DiagnosticPredicateTy::NoMatch;
1286
1287 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1288 return DiagnosticPredicateTy::Match;
1289
1290 return DiagnosticPredicateTy::NearMatch;
1291 }
1292
1293 template <int ElementWidth, unsigned Class>
1294 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1295 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1296 return DiagnosticPredicateTy::NoMatch;
1297
1298 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1299 return DiagnosticPredicateTy::Match;
1300
1301 return DiagnosticPredicateTy::NearMatch;
1302 }
1303
1304 template <int ElementWidth, unsigned Class>
1305 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1306 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1307 return DiagnosticPredicateTy::NoMatch;
1308
1309 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1310 return DiagnosticPredicateTy::Match;
1311
1312 return DiagnosticPredicateTy::NearMatch;
1313 }
1314
1315 template <int ElementWidth, unsigned Class,
1316 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1317 bool ShiftWidthAlwaysSame>
1318 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1319 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1320 if (!VectorMatch.isMatch())
1321 return DiagnosticPredicateTy::NoMatch;
1322
1323 // Give a more specific diagnostic when the user has explicitly typed in
1324 // a shift-amount that does not match what is expected, but for which
1325 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1326 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1327 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1328 ShiftExtendTy == AArch64_AM::SXTW) &&
1329 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1330 return DiagnosticPredicateTy::NoMatch;
1331
1332 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1333 return DiagnosticPredicateTy::Match;
1334
1335 return DiagnosticPredicateTy::NearMatch;
1336 }
1337
1338 bool isGPR32as64() const {
1339 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1340 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1341 }
1342
1343 bool isGPR64as32() const {
1344 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1345 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1346 }
1347
1348 bool isGPR64x8() const {
1349 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1351 Reg.RegNum);
1352 }
1353
1354 bool isWSeqPair() const {
1355 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1356 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1357 Reg.RegNum);
1358 }
1359
1360 bool isXSeqPair() const {
1361 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1362 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1363 Reg.RegNum);
1364 }
1365
1366 bool isSyspXzrPair() const {
1367 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1368 }
1369
1370 template<int64_t Angle, int64_t Remainder>
1371 DiagnosticPredicate isComplexRotation() const {
1372 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1373
1374 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1375 if (!CE) return DiagnosticPredicateTy::NoMatch;
1376 uint64_t Value = CE->getValue();
1377
1378 if (Value % Angle == Remainder && Value <= 270)
1379 return DiagnosticPredicateTy::Match;
1380 return DiagnosticPredicateTy::NearMatch;
1381 }
1382
1383 template <unsigned RegClassID> bool isGPR64() const {
1384 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1385 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1386 }
1387
1388 template <unsigned RegClassID, int ExtWidth>
1389 DiagnosticPredicate isGPR64WithShiftExtend() const {
1390 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1391 return DiagnosticPredicateTy::NoMatch;
1392
1393 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1394 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1395 return DiagnosticPredicateTy::Match;
1396 return DiagnosticPredicateTy::NearMatch;
1397 }
1398
1399 /// Is this a vector list with the type implicit (presumably attached to the
1400 /// instruction itself)?
1401 template <RegKind VectorKind, unsigned NumRegs>
1402 bool isImplicitlyTypedVectorList() const {
1403 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1404 VectorList.NumElements == 0 &&
1405 VectorList.RegisterKind == VectorKind;
1406 }
1407
1408 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1409 unsigned ElementWidth, unsigned Stride = 1>
1410 bool isTypedVectorList() const {
1411 if (Kind != k_VectorList)
1412 return false;
1413 if (VectorList.Count != NumRegs)
1414 return false;
1415 if (VectorList.RegisterKind != VectorKind)
1416 return false;
1417 if (VectorList.ElementWidth != ElementWidth)
1418 return false;
1419 if (VectorList.Stride != Stride)
1420 return false;
1421 return VectorList.NumElements == NumElements;
1422 }
1423
1424 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1425 unsigned ElementWidth>
1426 DiagnosticPredicate isTypedVectorListMultiple() const {
1427 bool Res =
1428 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1429 if (!Res)
1430 return DiagnosticPredicateTy::NoMatch;
1431 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1432 return DiagnosticPredicateTy::NearMatch;
1433 return DiagnosticPredicateTy::Match;
1434 }
1435
1436 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1437 unsigned ElementWidth>
1438 DiagnosticPredicate isTypedVectorListStrided() const {
1439 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1440 ElementWidth, Stride>();
1441 if (!Res)
1442 return DiagnosticPredicateTy::NoMatch;
1443 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1444 ((VectorList.RegNum >= AArch64::Z16) &&
1445 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1446 return DiagnosticPredicateTy::Match;
1447 return DiagnosticPredicateTy::NoMatch;
1448 }
1449
1450 template <int Min, int Max>
1451 DiagnosticPredicate isVectorIndex() const {
1452 if (Kind != k_VectorIndex)
1453 return DiagnosticPredicateTy::NoMatch;
1454 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1455 return DiagnosticPredicateTy::Match;
1456 return DiagnosticPredicateTy::NearMatch;
1457 }
1458
1459 bool isToken() const override { return Kind == k_Token; }
1460
1461 bool isTokenEqual(StringRef Str) const {
1462 return Kind == k_Token && getToken() == Str;
1463 }
1464 bool isSysCR() const { return Kind == k_SysCR; }
1465 bool isPrefetch() const { return Kind == k_Prefetch; }
1466 bool isPSBHint() const { return Kind == k_PSBHint; }
1467 bool isBTIHint() const { return Kind == k_BTIHint; }
1468 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1469 bool isShifter() const {
1470 if (!isShiftExtend())
1471 return false;
1472
1473 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1474 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1475 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1476 ST == AArch64_AM::MSL);
1477 }
1478
1479 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1480 if (Kind != k_FPImm)
1481 return DiagnosticPredicateTy::NoMatch;
1482
1483 if (getFPImmIsExact()) {
1484 // Lookup the immediate from table of supported immediates.
1485 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1486 assert(Desc && "Unknown enum value");
1487
1488 // Calculate its FP value.
1489 APFloat RealVal(APFloat::IEEEdouble());
1490 auto StatusOrErr =
1491 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1492 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1493 llvm_unreachable("FP immediate is not exact");
1494
1495 if (getFPImm().bitwiseIsEqual(RealVal))
1496 return DiagnosticPredicateTy::Match;
1497 }
1498
1499 return DiagnosticPredicateTy::NearMatch;
1500 }
1501
1502 template <unsigned ImmA, unsigned ImmB>
1503 DiagnosticPredicate isExactFPImm() const {
1504 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1505 if ((Res = isExactFPImm<ImmA>()))
1506 return DiagnosticPredicateTy::Match;
1507 if ((Res = isExactFPImm<ImmB>()))
1508 return DiagnosticPredicateTy::Match;
1509 return Res;
1510 }
1511
1512 bool isExtend() const {
1513 if (!isShiftExtend())
1514 return false;
1515
1516 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1518 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1519 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1520 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1521 ET == AArch64_AM::LSL) &&
1522 getShiftExtendAmount() <= 4;
1523 }
1524
1525 bool isExtend64() const {
1526 if (!isExtend())
1527 return false;
1528 // Make sure the extend expects a 32-bit source register.
1529 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1530 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1531 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1532 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1533 }
1534
1535 bool isExtendLSL64() const {
1536 if (!isExtend())
1537 return false;
1538 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1539 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1540 ET == AArch64_AM::LSL) &&
1541 getShiftExtendAmount() <= 4;
1542 }
1543
1544 bool isLSLImm3Shift() const {
1545 if (!isShiftExtend())
1546 return false;
1547 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1549 }
1550
1551 template<int Width> bool isMemXExtend() const {
1552 if (!isExtend())
1553 return false;
1554 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1556 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1557 getShiftExtendAmount() == 0);
1558 }
1559
1560 template<int Width> bool isMemWExtend() const {
1561 if (!isExtend())
1562 return false;
1563 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1564 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1565 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1566 getShiftExtendAmount() == 0);
1567 }
1568
1569 template <unsigned width>
1570 bool isArithmeticShifter() const {
1571 if (!isShifter())
1572 return false;
1573
1574 // An arithmetic shifter is LSL, LSR, or ASR.
1575 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1576 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1577 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1578 }
1579
1580 template <unsigned width>
1581 bool isLogicalShifter() const {
1582 if (!isShifter())
1583 return false;
1584
1585 // A logical shifter is LSL, LSR, ASR or ROR.
1586 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1587 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1588 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1589 getShiftExtendAmount() < width;
1590 }
1591
1592 bool isMovImm32Shifter() const {
1593 if (!isShifter())
1594 return false;
1595
1596 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1597 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1598 if (ST != AArch64_AM::LSL)
1599 return false;
1600 uint64_t Val = getShiftExtendAmount();
1601 return (Val == 0 || Val == 16);
1602 }
1603
1604 bool isMovImm64Shifter() const {
1605 if (!isShifter())
1606 return false;
1607
1608 // A MOVi shifter is LSL of 0 or 16.
1609 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1610 if (ST != AArch64_AM::LSL)
1611 return false;
1612 uint64_t Val = getShiftExtendAmount();
1613 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1614 }
1615
1616 bool isLogicalVecShifter() const {
1617 if (!isShifter())
1618 return false;
1619
1620 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1621 unsigned Shift = getShiftExtendAmount();
1622 return getShiftExtendType() == AArch64_AM::LSL &&
1623 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1624 }
1625
1626 bool isLogicalVecHalfWordShifter() const {
1627 if (!isLogicalVecShifter())
1628 return false;
1629
1630 // A logical vector shifter is a left shift by 0 or 8.
1631 unsigned Shift = getShiftExtendAmount();
1632 return getShiftExtendType() == AArch64_AM::LSL &&
1633 (Shift == 0 || Shift == 8);
1634 }
1635
1636 bool isMoveVecShifter() const {
1637 if (!isShiftExtend())
1638 return false;
1639
1640 // A logical vector shifter is a left shift by 8 or 16.
1641 unsigned Shift = getShiftExtendAmount();
1642 return getShiftExtendType() == AArch64_AM::MSL &&
1643 (Shift == 8 || Shift == 16);
1644 }
1645
1646 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1647 // to LDUR/STUR when the offset is not legal for the former but is for
1648 // the latter. As such, in addition to checking for being a legal unscaled
1649 // address, also check that it is not a legal scaled address. This avoids
1650 // ambiguity in the matcher.
1651 template<int Width>
1652 bool isSImm9OffsetFB() const {
1653 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1654 }
1655
1656 bool isAdrpLabel() const {
1657 // Validation was handled during parsing, so we just verify that
1658 // something didn't go haywire.
1659 if (!isImm())
1660 return false;
1661
1662 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1663 int64_t Val = CE->getValue();
1664 int64_t Min = - (4096 * (1LL << (21 - 1)));
1665 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1666 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1667 }
1668
1669 return true;
1670 }
1671
1672 bool isAdrLabel() const {
1673 // Validation was handled during parsing, so we just verify that
1674 // something didn't go haywire.
1675 if (!isImm())
1676 return false;
1677
1678 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1679 int64_t Val = CE->getValue();
1680 int64_t Min = - (1LL << (21 - 1));
1681 int64_t Max = ((1LL << (21 - 1)) - 1);
1682 return Val >= Min && Val <= Max;
1683 }
1684
1685 return true;
1686 }
1687
1688 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1689 DiagnosticPredicate isMatrixRegOperand() const {
1690 if (!isMatrix())
1691 return DiagnosticPredicateTy::NoMatch;
1692 if (getMatrixKind() != Kind ||
1693 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1694 EltSize != getMatrixElementWidth())
1695 return DiagnosticPredicateTy::NearMatch;
1696 return DiagnosticPredicateTy::Match;
1697 }
1698
1699 bool isPAuthPCRelLabel16Operand() const {
1700 // PAuth PCRel16 operands are similar to regular branch targets, but only
1701 // negative values are allowed for concrete immediates as signing instr
1702 // should be in a lower address.
1703 if (!isImm())
1704 return false;
1705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1706 if (!MCE)
1707 return true;
1708 int64_t Val = MCE->getValue();
1709 if (Val & 0b11)
1710 return false;
1711 return (Val <= 0) && (Val > -(1 << 18));
1712 }
1713
1714 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1715 // Add as immediates when possible. Null MCExpr = 0.
1716 if (!Expr)
1718 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1719 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1720 else
1722 }
1723
1724 void addRegOperands(MCInst &Inst, unsigned N) const {
1725 assert(N == 1 && "Invalid number of operands!");
1727 }
1728
1729 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1730 assert(N == 1 && "Invalid number of operands!");
1731 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1732 }
1733
1734 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1735 assert(N == 1 && "Invalid number of operands!");
1736 assert(
1737 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1738
1739 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1740 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1741 RI->getEncodingValue(getReg()));
1742
1744 }
1745
1746 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1747 assert(N == 1 && "Invalid number of operands!");
1748 assert(
1749 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1750
1751 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1752 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1753 RI->getEncodingValue(getReg()));
1754
1756 }
1757
1758 template <int Width>
1759 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1760 unsigned Base;
1761 switch (Width) {
1762 case 8: Base = AArch64::B0; break;
1763 case 16: Base = AArch64::H0; break;
1764 case 32: Base = AArch64::S0; break;
1765 case 64: Base = AArch64::D0; break;
1766 case 128: Base = AArch64::Q0; break;
1767 default:
1768 llvm_unreachable("Unsupported width");
1769 }
1770 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1771 }
1772
1773 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1774 assert(N == 1 && "Invalid number of operands!");
1775 Inst.addOperand(
1776 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1777 }
1778
1779 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1780 assert(N == 1 && "Invalid number of operands!");
1781 assert(
1782 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1783 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1784 }
1785
1786 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1787 assert(N == 1 && "Invalid number of operands!");
1788 assert(
1789 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1791 }
1792
1793 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1794 assert(N == 1 && "Invalid number of operands!");
1796 }
1797
1798 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1799 assert(N == 1 && "Invalid number of operands!");
1801 }
1802
1803 enum VecListIndexType {
1804 VecListIdx_DReg = 0,
1805 VecListIdx_QReg = 1,
1806 VecListIdx_ZReg = 2,
1807 VecListIdx_PReg = 3,
1808 };
1809
1810 template <VecListIndexType RegTy, unsigned NumRegs>
1811 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1812 assert(N == 1 && "Invalid number of operands!");
1813 static const unsigned FirstRegs[][5] = {
1814 /* DReg */ { AArch64::Q0,
1815 AArch64::D0, AArch64::D0_D1,
1816 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1817 /* QReg */ { AArch64::Q0,
1818 AArch64::Q0, AArch64::Q0_Q1,
1819 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1820 /* ZReg */ { AArch64::Z0,
1821 AArch64::Z0, AArch64::Z0_Z1,
1822 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1823 /* PReg */ { AArch64::P0,
1824 AArch64::P0, AArch64::P0_P1 }
1825 };
1826
1827 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1828 " NumRegs must be <= 4 for ZRegs");
1829
1830 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1831 " NumRegs must be <= 2 for PRegs");
1832
1833 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1834 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1835 FirstRegs[(unsigned)RegTy][0]));
1836 }
1837
1838 template <unsigned NumRegs>
1839 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!");
1841 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1842
1843 switch (NumRegs) {
1844 case 2:
1845 if (getVectorListStart() < AArch64::Z16) {
1846 assert((getVectorListStart() < AArch64::Z8) &&
1847 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1849 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1850 } else {
1851 assert((getVectorListStart() < AArch64::Z24) &&
1852 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1854 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1855 }
1856 break;
1857 case 4:
1858 if (getVectorListStart() < AArch64::Z16) {
1859 assert((getVectorListStart() < AArch64::Z4) &&
1860 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1862 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1863 } else {
1864 assert((getVectorListStart() < AArch64::Z20) &&
1865 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1867 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1868 }
1869 break;
1870 default:
1871 llvm_unreachable("Unsupported number of registers for strided vec list");
1872 }
1873 }
1874
1875 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1876 assert(N == 1 && "Invalid number of operands!");
1877 unsigned RegMask = getMatrixTileListRegMask();
1878 assert(RegMask <= 0xFF && "Invalid mask!");
1879 Inst.addOperand(MCOperand::createImm(RegMask));
1880 }
1881
1882 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1883 assert(N == 1 && "Invalid number of operands!");
1884 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1885 }
1886
1887 template <unsigned ImmIs0, unsigned ImmIs1>
1888 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1889 assert(N == 1 && "Invalid number of operands!");
1890 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1891 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1892 }
1893
1894 void addImmOperands(MCInst &Inst, unsigned N) const {
1895 assert(N == 1 && "Invalid number of operands!");
1896 // If this is a pageoff symrefexpr with an addend, adjust the addend
1897 // to be only the page-offset portion. Otherwise, just add the expr
1898 // as-is.
1899 addExpr(Inst, getImm());
1900 }
1901
1902 template <int Shift>
1903 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1904 assert(N == 2 && "Invalid number of operands!");
1905 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1906 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1907 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1908 } else if (isShiftedImm()) {
1909 addExpr(Inst, getShiftedImmVal());
1910 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1911 } else {
1912 addExpr(Inst, getImm());
1914 }
1915 }
1916
1917 template <int Shift>
1918 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1919 assert(N == 2 && "Invalid number of operands!");
1920 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1921 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1922 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1923 } else
1924 llvm_unreachable("Not a shifted negative immediate");
1925 }
1926
1927 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1928 assert(N == 1 && "Invalid number of operands!");
1930 }
1931
1932 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 1 && "Invalid number of operands!");
1934 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1935 if (!MCE)
1936 addExpr(Inst, getImm());
1937 else
1938 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1939 }
1940
1941 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1942 addImmOperands(Inst, N);
1943 }
1944
1945 template<int Scale>
1946 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1947 assert(N == 1 && "Invalid number of operands!");
1948 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1949
1950 if (!MCE) {
1951 Inst.addOperand(MCOperand::createExpr(getImm()));
1952 return;
1953 }
1954 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1955 }
1956
1957 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!");
1959 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1961 }
1962
1963 template <int Scale>
1964 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1965 assert(N == 1 && "Invalid number of operands!");
1966 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1967 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1968 }
1969
1970 template <int Scale>
1971 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1972 assert(N == 1 && "Invalid number of operands!");
1973 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1974 }
1975
1976 template <typename T>
1977 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1978 assert(N == 1 && "Invalid number of operands!");
1979 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1980 std::make_unsigned_t<T> Val = MCE->getValue();
1981 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1982 Inst.addOperand(MCOperand::createImm(encoding));
1983 }
1984
1985 template <typename T>
1986 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1989 std::make_unsigned_t<T> Val = ~MCE->getValue();
1990 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1991 Inst.addOperand(MCOperand::createImm(encoding));
1992 }
1993
1994 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1995 assert(N == 1 && "Invalid number of operands!");
1996 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1998 Inst.addOperand(MCOperand::createImm(encoding));
1999 }
2000
2001 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2002 // Branch operands don't encode the low bits, so shift them off
2003 // here. If it's a label, however, just put it on directly as there's
2004 // not enough information now to do anything.
2005 assert(N == 1 && "Invalid number of operands!");
2006 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2007 if (!MCE) {
2008 addExpr(Inst, getImm());
2009 return;
2010 }
2011 assert(MCE && "Invalid constant immediate operand!");
2012 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2013 }
2014
2015 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2016 // PC-relative operands don't encode the low bits, so shift them off
2017 // here. If it's a label, however, just put it on directly as there's
2018 // not enough information now to do anything.
2019 assert(N == 1 && "Invalid number of operands!");
2020 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2021 if (!MCE) {
2022 addExpr(Inst, getImm());
2023 return;
2024 }
2025 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2026 }
2027
2028 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2029 // Branch operands don't encode the low bits, so shift them off
2030 // here. If it's a label, however, just put it on directly as there's
2031 // not enough information now to do anything.
2032 assert(N == 1 && "Invalid number of operands!");
2033 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2034 if (!MCE) {
2035 addExpr(Inst, getImm());
2036 return;
2037 }
2038 assert(MCE && "Invalid constant immediate operand!");
2039 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2040 }
2041
2042 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2043 // Branch operands don't encode the low bits, so shift them off
2044 // here. If it's a label, however, just put it on directly as there's
2045 // not enough information now to do anything.
2046 assert(N == 1 && "Invalid number of operands!");
2047 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2048 if (!MCE) {
2049 addExpr(Inst, getImm());
2050 return;
2051 }
2052 assert(MCE && "Invalid constant immediate operand!");
2053 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2054 }
2055
2056 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2057 assert(N == 1 && "Invalid number of operands!");
2059 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2060 }
2061
2062 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2063 assert(N == 1 && "Invalid number of operands!");
2064 Inst.addOperand(MCOperand::createImm(getBarrier()));
2065 }
2066
2067 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2068 assert(N == 1 && "Invalid number of operands!");
2069 Inst.addOperand(MCOperand::createImm(getBarrier()));
2070 }
2071
2072 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2073 assert(N == 1 && "Invalid number of operands!");
2074
2075 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2076 }
2077
2078 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2079 assert(N == 1 && "Invalid number of operands!");
2080
2081 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2082 }
2083
2084 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2085 assert(N == 1 && "Invalid number of operands!");
2086
2087 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2088 }
2089
2090 void addSVCROperands(MCInst &Inst, unsigned N) const {
2091 assert(N == 1 && "Invalid number of operands!");
2092
2093 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2094 }
2095
2096 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2097 assert(N == 1 && "Invalid number of operands!");
2098
2099 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2100 }
2101
2102 void addSysCROperands(MCInst &Inst, unsigned N) const {
2103 assert(N == 1 && "Invalid number of operands!");
2104 Inst.addOperand(MCOperand::createImm(getSysCR()));
2105 }
2106
2107 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2108 assert(N == 1 && "Invalid number of operands!");
2109 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2110 }
2111
2112 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2113 assert(N == 1 && "Invalid number of operands!");
2114 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2115 }
2116
2117 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2118 assert(N == 1 && "Invalid number of operands!");
2119 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2120 }
2121
2122 void addShifterOperands(MCInst &Inst, unsigned N) const {
2123 assert(N == 1 && "Invalid number of operands!");
2124 unsigned Imm =
2125 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2127 }
2128
2129 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2130 assert(N == 1 && "Invalid number of operands!");
2131 unsigned Imm = getShiftExtendAmount();
2133 }
2134
2135 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2136 assert(N == 1 && "Invalid number of operands!");
2137
2138 if (!isScalarReg())
2139 return;
2140
2141 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2142 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2144 if (Reg != AArch64::XZR)
2145 llvm_unreachable("wrong register");
2146
2147 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2148 }
2149
2150 void addExtendOperands(MCInst &Inst, unsigned N) const {
2151 assert(N == 1 && "Invalid number of operands!");
2152 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2153 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2154 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2156 }
2157
2158 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2159 assert(N == 1 && "Invalid number of operands!");
2160 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2161 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2162 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2164 }
2165
2166 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2167 assert(N == 2 && "Invalid number of operands!");
2168 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2169 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2170 Inst.addOperand(MCOperand::createImm(IsSigned));
2171 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2172 }
2173
2174 // For 8-bit load/store instructions with a register offset, both the
2175 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2176 // they're disambiguated by whether the shift was explicit or implicit rather
2177 // than its size.
2178 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2179 assert(N == 2 && "Invalid number of operands!");
2180 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2181 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2182 Inst.addOperand(MCOperand::createImm(IsSigned));
2183 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2184 }
2185
2186 template<int Shift>
2187 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2188 assert(N == 1 && "Invalid number of operands!");
2189
2190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2191 if (CE) {
2192 uint64_t Value = CE->getValue();
2193 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2194 } else {
2195 addExpr(Inst, getImm());
2196 }
2197 }
2198
2199 template<int Shift>
2200 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2201 assert(N == 1 && "Invalid number of operands!");
2202
2203 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2204 uint64_t Value = CE->getValue();
2205 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2206 }
2207
2208 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2209 assert(N == 1 && "Invalid number of operands!");
2210 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2211 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2212 }
2213
2214 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2215 assert(N == 1 && "Invalid number of operands!");
2216 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2217 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2218 }
2219
2220 void print(raw_ostream &OS) const override;
2221
2222 static std::unique_ptr<AArch64Operand>
2223 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2224 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2225 Op->Tok.Data = Str.data();
2226 Op->Tok.Length = Str.size();
2227 Op->Tok.IsSuffix = IsSuffix;
2228 Op->StartLoc = S;
2229 Op->EndLoc = S;
2230 return Op;
2231 }
2232
2233 static std::unique_ptr<AArch64Operand>
2234 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2235 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2237 unsigned ShiftAmount = 0,
2238 unsigned HasExplicitAmount = false) {
2239 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2240 Op->Reg.RegNum = RegNum;
2241 Op->Reg.Kind = Kind;
2242 Op->Reg.ElementWidth = 0;
2243 Op->Reg.EqualityTy = EqTy;
2244 Op->Reg.ShiftExtend.Type = ExtTy;
2245 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2246 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2247 Op->StartLoc = S;
2248 Op->EndLoc = E;
2249 return Op;
2250 }
2251
2252 static std::unique_ptr<AArch64Operand>
2253 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2254 SMLoc S, SMLoc E, MCContext &Ctx,
2256 unsigned ShiftAmount = 0,
2257 unsigned HasExplicitAmount = false) {
2258 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2259 Kind == RegKind::SVEPredicateVector ||
2260 Kind == RegKind::SVEPredicateAsCounter) &&
2261 "Invalid vector kind");
2262 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2263 HasExplicitAmount);
2264 Op->Reg.ElementWidth = ElementWidth;
2265 return Op;
2266 }
2267
2268 static std::unique_ptr<AArch64Operand>
2269 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2270 unsigned NumElements, unsigned ElementWidth,
2271 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2272 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2273 Op->VectorList.RegNum = RegNum;
2274 Op->VectorList.Count = Count;
2275 Op->VectorList.Stride = Stride;
2276 Op->VectorList.NumElements = NumElements;
2277 Op->VectorList.ElementWidth = ElementWidth;
2278 Op->VectorList.RegisterKind = RegisterKind;
2279 Op->StartLoc = S;
2280 Op->EndLoc = E;
2281 return Op;
2282 }
2283
2284 static std::unique_ptr<AArch64Operand>
2285 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2286 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2287 Op->VectorIndex.Val = Idx;
2288 Op->StartLoc = S;
2289 Op->EndLoc = E;
2290 return Op;
2291 }
2292
2293 static std::unique_ptr<AArch64Operand>
2294 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2295 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2296 Op->MatrixTileList.RegMask = RegMask;
2297 Op->StartLoc = S;
2298 Op->EndLoc = E;
2299 return Op;
2300 }
2301
2302 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2303 const unsigned ElementWidth) {
2304 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2305 RegMap = {
2306 {{0, AArch64::ZAB0},
2307 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2308 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2309 {{8, AArch64::ZAB0},
2310 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2311 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2312 {{16, AArch64::ZAH0},
2313 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2314 {{16, AArch64::ZAH1},
2315 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2316 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2317 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2318 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2319 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2320 };
2321
2322 if (ElementWidth == 64)
2323 OutRegs.insert(Reg);
2324 else {
2325 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2326 assert(!Regs.empty() && "Invalid tile or element width!");
2327 for (auto OutReg : Regs)
2328 OutRegs.insert(OutReg);
2329 }
2330 }
2331
2332 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2333 SMLoc E, MCContext &Ctx) {
2334 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2335 Op->Imm.Val = Val;
2336 Op->StartLoc = S;
2337 Op->EndLoc = E;
2338 return Op;
2339 }
2340
2341 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2342 unsigned ShiftAmount,
2343 SMLoc S, SMLoc E,
2344 MCContext &Ctx) {
2345 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2346 Op->ShiftedImm .Val = Val;
2347 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2348 Op->StartLoc = S;
2349 Op->EndLoc = E;
2350 return Op;
2351 }
2352
2353 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2354 unsigned Last, SMLoc S,
2355 SMLoc E,
2356 MCContext &Ctx) {
2357 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2358 Op->ImmRange.First = First;
2359 Op->ImmRange.Last = Last;
2360 Op->EndLoc = E;
2361 return Op;
2362 }
2363
2364 static std::unique_ptr<AArch64Operand>
2365 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2366 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2367 Op->CondCode.Code = Code;
2368 Op->StartLoc = S;
2369 Op->EndLoc = E;
2370 return Op;
2371 }
2372
2373 static std::unique_ptr<AArch64Operand>
2374 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2375 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2376 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2377 Op->FPImm.IsExact = IsExact;
2378 Op->StartLoc = S;
2379 Op->EndLoc = S;
2380 return Op;
2381 }
2382
2383 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2384 StringRef Str,
2385 SMLoc S,
2386 MCContext &Ctx,
2387 bool HasnXSModifier) {
2388 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2389 Op->Barrier.Val = Val;
2390 Op->Barrier.Data = Str.data();
2391 Op->Barrier.Length = Str.size();
2392 Op->Barrier.HasnXSModifier = HasnXSModifier;
2393 Op->StartLoc = S;
2394 Op->EndLoc = S;
2395 return Op;
2396 }
2397
2398 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2399 uint32_t MRSReg,
2400 uint32_t MSRReg,
2401 uint32_t PStateField,
2402 MCContext &Ctx) {
2403 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2404 Op->SysReg.Data = Str.data();
2405 Op->SysReg.Length = Str.size();
2406 Op->SysReg.MRSReg = MRSReg;
2407 Op->SysReg.MSRReg = MSRReg;
2408 Op->SysReg.PStateField = PStateField;
2409 Op->StartLoc = S;
2410 Op->EndLoc = S;
2411 return Op;
2412 }
2413
2414 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2415 SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2417 Op->SysCRImm.Val = Val;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2424 StringRef Str,
2425 SMLoc S,
2426 MCContext &Ctx) {
2427 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2428 Op->Prefetch.Val = Val;
2429 Op->Barrier.Data = Str.data();
2430 Op->Barrier.Length = Str.size();
2431 Op->StartLoc = S;
2432 Op->EndLoc = S;
2433 return Op;
2434 }
2435
2436 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2437 StringRef Str,
2438 SMLoc S,
2439 MCContext &Ctx) {
2440 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2441 Op->PSBHint.Val = Val;
2442 Op->PSBHint.Data = Str.data();
2443 Op->PSBHint.Length = Str.size();
2444 Op->StartLoc = S;
2445 Op->EndLoc = S;
2446 return Op;
2447 }
2448
2449 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2450 StringRef Str,
2451 SMLoc S,
2452 MCContext &Ctx) {
2453 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2454 Op->BTIHint.Val = Val | 32;
2455 Op->BTIHint.Data = Str.data();
2456 Op->BTIHint.Length = Str.size();
2457 Op->StartLoc = S;
2458 Op->EndLoc = S;
2459 return Op;
2460 }
2461
2462 static std::unique_ptr<AArch64Operand>
2463 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2464 SMLoc S, SMLoc E, MCContext &Ctx) {
2465 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2466 Op->MatrixReg.RegNum = RegNum;
2467 Op->MatrixReg.ElementWidth = ElementWidth;
2468 Op->MatrixReg.Kind = Kind;
2469 Op->StartLoc = S;
2470 Op->EndLoc = E;
2471 return Op;
2472 }
2473
2474 static std::unique_ptr<AArch64Operand>
2475 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2476 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2477 Op->SVCR.PStateField = PStateField;
2478 Op->SVCR.Data = Str.data();
2479 Op->SVCR.Length = Str.size();
2480 Op->StartLoc = S;
2481 Op->EndLoc = S;
2482 return Op;
2483 }
2484
2485 static std::unique_ptr<AArch64Operand>
2486 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2487 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2488 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2489 Op->ShiftExtend.Type = ShOp;
2490 Op->ShiftExtend.Amount = Val;
2491 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2492 Op->StartLoc = S;
2493 Op->EndLoc = E;
2494 return Op;
2495 }
2496};
2497
2498} // end anonymous namespace.
2499
2500void AArch64Operand::print(raw_ostream &OS) const {
2501 switch (Kind) {
2502 case k_FPImm:
2503 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2504 if (!getFPImmIsExact())
2505 OS << " (inexact)";
2506 OS << ">";
2507 break;
2508 case k_Barrier: {
2509 StringRef Name = getBarrierName();
2510 if (!Name.empty())
2511 OS << "<barrier " << Name << ">";
2512 else
2513 OS << "<barrier invalid #" << getBarrier() << ">";
2514 break;
2515 }
2516 case k_Immediate:
2517 OS << *getImm();
2518 break;
2519 case k_ShiftedImm: {
2520 unsigned Shift = getShiftedImmShift();
2521 OS << "<shiftedimm ";
2522 OS << *getShiftedImmVal();
2523 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2524 break;
2525 }
2526 case k_ImmRange: {
2527 OS << "<immrange ";
2528 OS << getFirstImmVal();
2529 OS << ":" << getLastImmVal() << ">";
2530 break;
2531 }
2532 case k_CondCode:
2533 OS << "<condcode " << getCondCode() << ">";
2534 break;
2535 case k_VectorList: {
2536 OS << "<vectorlist ";
2537 unsigned Reg = getVectorListStart();
2538 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2539 OS << Reg + i * getVectorListStride() << " ";
2540 OS << ">";
2541 break;
2542 }
2543 case k_VectorIndex:
2544 OS << "<vectorindex " << getVectorIndex() << ">";
2545 break;
2546 case k_SysReg:
2547 OS << "<sysreg: " << getSysReg() << '>';
2548 break;
2549 case k_Token:
2550 OS << "'" << getToken() << "'";
2551 break;
2552 case k_SysCR:
2553 OS << "c" << getSysCR();
2554 break;
2555 case k_Prefetch: {
2556 StringRef Name = getPrefetchName();
2557 if (!Name.empty())
2558 OS << "<prfop " << Name << ">";
2559 else
2560 OS << "<prfop invalid #" << getPrefetch() << ">";
2561 break;
2562 }
2563 case k_PSBHint:
2564 OS << getPSBHintName();
2565 break;
2566 case k_BTIHint:
2567 OS << getBTIHintName();
2568 break;
2569 case k_MatrixRegister:
2570 OS << "<matrix " << getMatrixReg() << ">";
2571 break;
2572 case k_MatrixTileList: {
2573 OS << "<matrixlist ";
2574 unsigned RegMask = getMatrixTileListRegMask();
2575 unsigned MaxBits = 8;
2576 for (unsigned I = MaxBits; I > 0; --I)
2577 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2578 OS << '>';
2579 break;
2580 }
2581 case k_SVCR: {
2582 OS << getSVCR();
2583 break;
2584 }
2585 case k_Register:
2586 OS << "<register " << getReg() << ">";
2587 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2588 break;
2589 [[fallthrough]];
2590 case k_ShiftExtend:
2591 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2592 << getShiftExtendAmount();
2593 if (!hasShiftExtendAmount())
2594 OS << "<imp>";
2595 OS << '>';
2596 break;
2597 }
2598}
2599
2600/// @name Auto-generated Match Functions
2601/// {
2602
2604
2605/// }
2606
2608 return StringSwitch<unsigned>(Name.lower())
2609 .Case("v0", AArch64::Q0)
2610 .Case("v1", AArch64::Q1)
2611 .Case("v2", AArch64::Q2)
2612 .Case("v3", AArch64::Q3)
2613 .Case("v4", AArch64::Q4)
2614 .Case("v5", AArch64::Q5)
2615 .Case("v6", AArch64::Q6)
2616 .Case("v7", AArch64::Q7)
2617 .Case("v8", AArch64::Q8)
2618 .Case("v9", AArch64::Q9)
2619 .Case("v10", AArch64::Q10)
2620 .Case("v11", AArch64::Q11)
2621 .Case("v12", AArch64::Q12)
2622 .Case("v13", AArch64::Q13)
2623 .Case("v14", AArch64::Q14)
2624 .Case("v15", AArch64::Q15)
2625 .Case("v16", AArch64::Q16)
2626 .Case("v17", AArch64::Q17)
2627 .Case("v18", AArch64::Q18)
2628 .Case("v19", AArch64::Q19)
2629 .Case("v20", AArch64::Q20)
2630 .Case("v21", AArch64::Q21)
2631 .Case("v22", AArch64::Q22)
2632 .Case("v23", AArch64::Q23)
2633 .Case("v24", AArch64::Q24)
2634 .Case("v25", AArch64::Q25)
2635 .Case("v26", AArch64::Q26)
2636 .Case("v27", AArch64::Q27)
2637 .Case("v28", AArch64::Q28)
2638 .Case("v29", AArch64::Q29)
2639 .Case("v30", AArch64::Q30)
2640 .Case("v31", AArch64::Q31)
2641 .Default(0);
2642}
2643
2644/// Returns an optional pair of (#elements, element-width) if Suffix
2645/// is a valid vector kind. Where the number of elements in a vector
2646/// or the vector width is implicit or explicitly unknown (but still a
2647/// valid suffix kind), 0 is used.
2648static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2649 RegKind VectorKind) {
2650 std::pair<int, int> Res = {-1, -1};
2651
2652 switch (VectorKind) {
2653 case RegKind::NeonVector:
2655 .Case("", {0, 0})
2656 .Case(".1d", {1, 64})
2657 .Case(".1q", {1, 128})
2658 // '.2h' needed for fp16 scalar pairwise reductions
2659 .Case(".2h", {2, 16})
2660 .Case(".2b", {2, 8})
2661 .Case(".2s", {2, 32})
2662 .Case(".2d", {2, 64})
2663 // '.4b' is another special case for the ARMv8.2a dot product
2664 // operand
2665 .Case(".4b", {4, 8})
2666 .Case(".4h", {4, 16})
2667 .Case(".4s", {4, 32})
2668 .Case(".8b", {8, 8})
2669 .Case(".8h", {8, 16})
2670 .Case(".16b", {16, 8})
2671 // Accept the width neutral ones, too, for verbose syntax. If
2672 // those aren't used in the right places, the token operand won't
2673 // match so all will work out.
2674 .Case(".b", {0, 8})
2675 .Case(".h", {0, 16})
2676 .Case(".s", {0, 32})
2677 .Case(".d", {0, 64})
2678 .Default({-1, -1});
2679 break;
2680 case RegKind::SVEPredicateAsCounter:
2681 case RegKind::SVEPredicateVector:
2682 case RegKind::SVEDataVector:
2683 case RegKind::Matrix:
2685 .Case("", {0, 0})
2686 .Case(".b", {0, 8})
2687 .Case(".h", {0, 16})
2688 .Case(".s", {0, 32})
2689 .Case(".d", {0, 64})
2690 .Case(".q", {0, 128})
2691 .Default({-1, -1});
2692 break;
2693 default:
2694 llvm_unreachable("Unsupported RegKind");
2695 }
2696
2697 if (Res == std::make_pair(-1, -1))
2698 return std::nullopt;
2699
2700 return std::optional<std::pair<int, int>>(Res);
2701}
2702
2703static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2704 return parseVectorKind(Suffix, VectorKind).has_value();
2705}
2706
2708 return StringSwitch<unsigned>(Name.lower())
2709 .Case("z0", AArch64::Z0)
2710 .Case("z1", AArch64::Z1)
2711 .Case("z2", AArch64::Z2)
2712 .Case("z3", AArch64::Z3)
2713 .Case("z4", AArch64::Z4)
2714 .Case("z5", AArch64::Z5)
2715 .Case("z6", AArch64::Z6)
2716 .Case("z7", AArch64::Z7)
2717 .Case("z8", AArch64::Z8)
2718 .Case("z9", AArch64::Z9)
2719 .Case("z10", AArch64::Z10)
2720 .Case("z11", AArch64::Z11)
2721 .Case("z12", AArch64::Z12)
2722 .Case("z13", AArch64::Z13)
2723 .Case("z14", AArch64::Z14)
2724 .Case("z15", AArch64::Z15)
2725 .Case("z16", AArch64::Z16)
2726 .Case("z17", AArch64::Z17)
2727 .Case("z18", AArch64::Z18)
2728 .Case("z19", AArch64::Z19)
2729 .Case("z20", AArch64::Z20)
2730 .Case("z21", AArch64::Z21)
2731 .Case("z22", AArch64::Z22)
2732 .Case("z23", AArch64::Z23)
2733 .Case("z24", AArch64::Z24)
2734 .Case("z25", AArch64::Z25)
2735 .Case("z26", AArch64::Z26)
2736 .Case("z27", AArch64::Z27)
2737 .Case("z28", AArch64::Z28)
2738 .Case("z29", AArch64::Z29)
2739 .Case("z30", AArch64::Z30)
2740 .Case("z31", AArch64::Z31)
2741 .Default(0);
2742}
2743
2745 return StringSwitch<unsigned>(Name.lower())
2746 .Case("p0", AArch64::P0)
2747 .Case("p1", AArch64::P1)
2748 .Case("p2", AArch64::P2)
2749 .Case("p3", AArch64::P3)
2750 .Case("p4", AArch64::P4)
2751 .Case("p5", AArch64::P5)
2752 .Case("p6", AArch64::P6)
2753 .Case("p7", AArch64::P7)
2754 .Case("p8", AArch64::P8)
2755 .Case("p9", AArch64::P9)
2756 .Case("p10", AArch64::P10)
2757 .Case("p11", AArch64::P11)
2758 .Case("p12", AArch64::P12)
2759 .Case("p13", AArch64::P13)
2760 .Case("p14", AArch64::P14)
2761 .Case("p15", AArch64::P15)
2762 .Default(0);
2763}
2764
2766 return StringSwitch<unsigned>(Name.lower())
2767 .Case("pn0", AArch64::PN0)
2768 .Case("pn1", AArch64::PN1)
2769 .Case("pn2", AArch64::PN2)
2770 .Case("pn3", AArch64::PN3)
2771 .Case("pn4", AArch64::PN4)
2772 .Case("pn5", AArch64::PN5)
2773 .Case("pn6", AArch64::PN6)
2774 .Case("pn7", AArch64::PN7)
2775 .Case("pn8", AArch64::PN8)
2776 .Case("pn9", AArch64::PN9)
2777 .Case("pn10", AArch64::PN10)
2778 .Case("pn11", AArch64::PN11)
2779 .Case("pn12", AArch64::PN12)
2780 .Case("pn13", AArch64::PN13)
2781 .Case("pn14", AArch64::PN14)
2782 .Case("pn15", AArch64::PN15)
2783 .Default(0);
2784}
2785
2787 return StringSwitch<unsigned>(Name.lower())
2788 .Case("za0.d", AArch64::ZAD0)
2789 .Case("za1.d", AArch64::ZAD1)
2790 .Case("za2.d", AArch64::ZAD2)
2791 .Case("za3.d", AArch64::ZAD3)
2792 .Case("za4.d", AArch64::ZAD4)
2793 .Case("za5.d", AArch64::ZAD5)
2794 .Case("za6.d", AArch64::ZAD6)
2795 .Case("za7.d", AArch64::ZAD7)
2796 .Case("za0.s", AArch64::ZAS0)
2797 .Case("za1.s", AArch64::ZAS1)
2798 .Case("za2.s", AArch64::ZAS2)
2799 .Case("za3.s", AArch64::ZAS3)
2800 .Case("za0.h", AArch64::ZAH0)
2801 .Case("za1.h", AArch64::ZAH1)
2802 .Case("za0.b", AArch64::ZAB0)
2803 .Default(0);
2804}
2805
2807 return StringSwitch<unsigned>(Name.lower())
2808 .Case("za", AArch64::ZA)
2809 .Case("za0.q", AArch64::ZAQ0)
2810 .Case("za1.q", AArch64::ZAQ1)
2811 .Case("za2.q", AArch64::ZAQ2)
2812 .Case("za3.q", AArch64::ZAQ3)
2813 .Case("za4.q", AArch64::ZAQ4)
2814 .Case("za5.q", AArch64::ZAQ5)
2815 .Case("za6.q", AArch64::ZAQ6)
2816 .Case("za7.q", AArch64::ZAQ7)
2817 .Case("za8.q", AArch64::ZAQ8)
2818 .Case("za9.q", AArch64::ZAQ9)
2819 .Case("za10.q", AArch64::ZAQ10)
2820 .Case("za11.q", AArch64::ZAQ11)
2821 .Case("za12.q", AArch64::ZAQ12)
2822 .Case("za13.q", AArch64::ZAQ13)
2823 .Case("za14.q", AArch64::ZAQ14)
2824 .Case("za15.q", AArch64::ZAQ15)
2825 .Case("za0.d", AArch64::ZAD0)
2826 .Case("za1.d", AArch64::ZAD1)
2827 .Case("za2.d", AArch64::ZAD2)
2828 .Case("za3.d", AArch64::ZAD3)
2829 .Case("za4.d", AArch64::ZAD4)
2830 .Case("za5.d", AArch64::ZAD5)
2831 .Case("za6.d", AArch64::ZAD6)
2832 .Case("za7.d", AArch64::ZAD7)
2833 .Case("za0.s", AArch64::ZAS0)
2834 .Case("za1.s", AArch64::ZAS1)
2835 .Case("za2.s", AArch64::ZAS2)
2836 .Case("za3.s", AArch64::ZAS3)
2837 .Case("za0.h", AArch64::ZAH0)
2838 .Case("za1.h", AArch64::ZAH1)
2839 .Case("za0.b", AArch64::ZAB0)
2840 .Case("za0h.q", AArch64::ZAQ0)
2841 .Case("za1h.q", AArch64::ZAQ1)
2842 .Case("za2h.q", AArch64::ZAQ2)
2843 .Case("za3h.q", AArch64::ZAQ3)
2844 .Case("za4h.q", AArch64::ZAQ4)
2845 .Case("za5h.q", AArch64::ZAQ5)
2846 .Case("za6h.q", AArch64::ZAQ6)
2847 .Case("za7h.q", AArch64::ZAQ7)
2848 .Case("za8h.q", AArch64::ZAQ8)
2849 .Case("za9h.q", AArch64::ZAQ9)
2850 .Case("za10h.q", AArch64::ZAQ10)
2851 .Case("za11h.q", AArch64::ZAQ11)
2852 .Case("za12h.q", AArch64::ZAQ12)
2853 .Case("za13h.q", AArch64::ZAQ13)
2854 .Case("za14h.q", AArch64::ZAQ14)
2855 .Case("za15h.q", AArch64::ZAQ15)
2856 .Case("za0h.d", AArch64::ZAD0)
2857 .Case("za1h.d", AArch64::ZAD1)
2858 .Case("za2h.d", AArch64::ZAD2)
2859 .Case("za3h.d", AArch64::ZAD3)
2860 .Case("za4h.d", AArch64::ZAD4)
2861 .Case("za5h.d", AArch64::ZAD5)
2862 .Case("za6h.d", AArch64::ZAD6)
2863 .Case("za7h.d", AArch64::ZAD7)
2864 .Case("za0h.s", AArch64::ZAS0)
2865 .Case("za1h.s", AArch64::ZAS1)
2866 .Case("za2h.s", AArch64::ZAS2)
2867 .Case("za3h.s", AArch64::ZAS3)
2868 .Case("za0h.h", AArch64::ZAH0)
2869 .Case("za1h.h", AArch64::ZAH1)
2870 .Case("za0h.b", AArch64::ZAB0)
2871 .Case("za0v.q", AArch64::ZAQ0)
2872 .Case("za1v.q", AArch64::ZAQ1)
2873 .Case("za2v.q", AArch64::ZAQ2)
2874 .Case("za3v.q", AArch64::ZAQ3)
2875 .Case("za4v.q", AArch64::ZAQ4)
2876 .Case("za5v.q", AArch64::ZAQ5)
2877 .Case("za6v.q", AArch64::ZAQ6)
2878 .Case("za7v.q", AArch64::ZAQ7)
2879 .Case("za8v.q", AArch64::ZAQ8)
2880 .Case("za9v.q", AArch64::ZAQ9)
2881 .Case("za10v.q", AArch64::ZAQ10)
2882 .Case("za11v.q", AArch64::ZAQ11)
2883 .Case("za12v.q", AArch64::ZAQ12)
2884 .Case("za13v.q", AArch64::ZAQ13)
2885 .Case("za14v.q", AArch64::ZAQ14)
2886 .Case("za15v.q", AArch64::ZAQ15)
2887 .Case("za0v.d", AArch64::ZAD0)
2888 .Case("za1v.d", AArch64::ZAD1)
2889 .Case("za2v.d", AArch64::ZAD2)
2890 .Case("za3v.d", AArch64::ZAD3)
2891 .Case("za4v.d", AArch64::ZAD4)
2892 .Case("za5v.d", AArch64::ZAD5)
2893 .Case("za6v.d", AArch64::ZAD6)
2894 .Case("za7v.d", AArch64::ZAD7)
2895 .Case("za0v.s", AArch64::ZAS0)
2896 .Case("za1v.s", AArch64::ZAS1)
2897 .Case("za2v.s", AArch64::ZAS2)
2898 .Case("za3v.s", AArch64::ZAS3)
2899 .Case("za0v.h", AArch64::ZAH0)
2900 .Case("za1v.h", AArch64::ZAH1)
2901 .Case("za0v.b", AArch64::ZAB0)
2902 .Default(0);
2903}
2904
2905bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2906 SMLoc &EndLoc) {
2907 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2908}
2909
2910ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2911 SMLoc &EndLoc) {
2912 StartLoc = getLoc();
2913 ParseStatus Res = tryParseScalarRegister(Reg);
2914 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2915 return Res;
2916}
2917
2918// Matches a register name or register alias previously defined by '.req'
2919unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2920 RegKind Kind) {
2921 unsigned RegNum = 0;
2922 if ((RegNum = matchSVEDataVectorRegName(Name)))
2923 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2924
2925 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2926 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2927
2929 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2930
2931 if ((RegNum = MatchNeonVectorRegName(Name)))
2932 return Kind == RegKind::NeonVector ? RegNum : 0;
2933
2934 if ((RegNum = matchMatrixRegName(Name)))
2935 return Kind == RegKind::Matrix ? RegNum : 0;
2936
2937 if (Name.equals_insensitive("zt0"))
2938 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2939
2940 // The parsed register must be of RegKind Scalar
2941 if ((RegNum = MatchRegisterName(Name)))
2942 return (Kind == RegKind::Scalar) ? RegNum : 0;
2943
2944 if (!RegNum) {
2945 // Handle a few common aliases of registers.
2946 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2947 .Case("fp", AArch64::FP)
2948 .Case("lr", AArch64::LR)
2949 .Case("x31", AArch64::XZR)
2950 .Case("w31", AArch64::WZR)
2951 .Default(0))
2952 return Kind == RegKind::Scalar ? RegNum : 0;
2953
2954 // Check for aliases registered via .req. Canonicalize to lower case.
2955 // That's more consistent since register names are case insensitive, and
2956 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2957 auto Entry = RegisterReqs.find(Name.lower());
2958 if (Entry == RegisterReqs.end())
2959 return 0;
2960
2961 // set RegNum if the match is the right kind of register
2962 if (Kind == Entry->getValue().first)
2963 RegNum = Entry->getValue().second;
2964 }
2965 return RegNum;
2966}
2967
2968unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2969 switch (K) {
2970 case RegKind::Scalar:
2971 case RegKind::NeonVector:
2972 case RegKind::SVEDataVector:
2973 return 32;
2974 case RegKind::Matrix:
2975 case RegKind::SVEPredicateVector:
2976 case RegKind::SVEPredicateAsCounter:
2977 return 16;
2978 case RegKind::LookupTable:
2979 return 1;
2980 }
2981 llvm_unreachable("Unsupported RegKind");
2982}
2983
2984/// tryParseScalarRegister - Try to parse a register name. The token must be an
2985/// Identifier when called, and if it is a register name the token is eaten and
2986/// the register is added to the operand list.
2987ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2988 const AsmToken &Tok = getTok();
2989 if (Tok.isNot(AsmToken::Identifier))
2990 return ParseStatus::NoMatch;
2991
2992 std::string lowerCase = Tok.getString().lower();
2993 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2994 if (Reg == 0)
2995 return ParseStatus::NoMatch;
2996
2997 RegNum = Reg;
2998 Lex(); // Eat identifier token.
2999 return ParseStatus::Success;
3000}
3001
3002/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3003ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3004 SMLoc S = getLoc();
3005
3006 if (getTok().isNot(AsmToken::Identifier))
3007 return Error(S, "Expected cN operand where 0 <= N <= 15");
3008
3009 StringRef Tok = getTok().getIdentifier();
3010 if (Tok[0] != 'c' && Tok[0] != 'C')
3011 return Error(S, "Expected cN operand where 0 <= N <= 15");
3012
3013 uint32_t CRNum;
3014 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3015 if (BadNum || CRNum > 15)
3016 return Error(S, "Expected cN operand where 0 <= N <= 15");
3017
3018 Lex(); // Eat identifier token.
3019 Operands.push_back(
3020 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3021 return ParseStatus::Success;
3022}
3023
3024// Either an identifier for named values or a 6-bit immediate.
3025ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3026 SMLoc S = getLoc();
3027 const AsmToken &Tok = getTok();
3028
3029 unsigned MaxVal = 63;
3030
3031 // Immediate case, with optional leading hash:
3032 if (parseOptionalToken(AsmToken::Hash) ||
3033 Tok.is(AsmToken::Integer)) {
3034 const MCExpr *ImmVal;
3035 if (getParser().parseExpression(ImmVal))
3036 return ParseStatus::Failure;
3037
3038 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3039 if (!MCE)
3040 return TokError("immediate value expected for prefetch operand");
3041 unsigned prfop = MCE->getValue();
3042 if (prfop > MaxVal)
3043 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3044 "] expected");
3045
3046 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3047 Operands.push_back(AArch64Operand::CreatePrefetch(
3048 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3049 return ParseStatus::Success;
3050 }
3051
3052 if (Tok.isNot(AsmToken::Identifier))
3053 return TokError("prefetch hint expected");
3054
3055 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3056 if (!RPRFM)
3057 return TokError("prefetch hint expected");
3058
3059 Operands.push_back(AArch64Operand::CreatePrefetch(
3060 RPRFM->Encoding, Tok.getString(), S, getContext()));
3061 Lex(); // Eat identifier token.
3062 return ParseStatus::Success;
3063}
3064
3065/// tryParsePrefetch - Try to parse a prefetch operand.
3066template <bool IsSVEPrefetch>
3067ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3068 SMLoc S = getLoc();
3069 const AsmToken &Tok = getTok();
3070
3071 auto LookupByName = [](StringRef N) {
3072 if (IsSVEPrefetch) {
3073 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3074 return std::optional<unsigned>(Res->Encoding);
3075 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3076 return std::optional<unsigned>(Res->Encoding);
3077 return std::optional<unsigned>();
3078 };
3079
3080 auto LookupByEncoding = [](unsigned E) {
3081 if (IsSVEPrefetch) {
3082 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3083 return std::optional<StringRef>(Res->Name);
3084 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3085 return std::optional<StringRef>(Res->Name);
3086 return std::optional<StringRef>();
3087 };
3088 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3089
3090 // Either an identifier for named values or a 5-bit immediate.
3091 // Eat optional hash.
3092 if (parseOptionalToken(AsmToken::Hash) ||
3093 Tok.is(AsmToken::Integer)) {
3094 const MCExpr *ImmVal;
3095 if (getParser().parseExpression(ImmVal))
3096 return ParseStatus::Failure;
3097
3098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3099 if (!MCE)
3100 return TokError("immediate value expected for prefetch operand");
3101 unsigned prfop = MCE->getValue();
3102 if (prfop > MaxVal)
3103 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3104 "] expected");
3105
3106 auto PRFM = LookupByEncoding(MCE->getValue());
3107 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3108 S, getContext()));
3109 return ParseStatus::Success;
3110 }
3111
3112 if (Tok.isNot(AsmToken::Identifier))
3113 return TokError("prefetch hint expected");
3114
3115 auto PRFM = LookupByName(Tok.getString());
3116 if (!PRFM)
3117 return TokError("prefetch hint expected");
3118
3119 Operands.push_back(AArch64Operand::CreatePrefetch(
3120 *PRFM, Tok.getString(), S, getContext()));
3121 Lex(); // Eat identifier token.
3122 return ParseStatus::Success;
3123}
3124
3125/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3126ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3127 SMLoc S = getLoc();
3128 const AsmToken &Tok = getTok();
3129 if (Tok.isNot(AsmToken::Identifier))
3130 return TokError("invalid operand for instruction");
3131
3132 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3133 if (!PSB)
3134 return TokError("invalid operand for instruction");
3135
3136 Operands.push_back(AArch64Operand::CreatePSBHint(
3137 PSB->Encoding, Tok.getString(), S, getContext()));
3138 Lex(); // Eat identifier token.
3139 return ParseStatus::Success;
3140}
3141
3142ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3143 SMLoc StartLoc = getLoc();
3144
3145 MCRegister RegNum;
3146
3147 // The case where xzr, xzr is not present is handled by an InstAlias.
3148
3149 auto RegTok = getTok(); // in case we need to backtrack
3150 if (!tryParseScalarRegister(RegNum).isSuccess())
3151 return ParseStatus::NoMatch;
3152
3153 if (RegNum != AArch64::XZR) {
3154 getLexer().UnLex(RegTok);
3155 return ParseStatus::NoMatch;
3156 }
3157
3158 if (parseComma())
3159 return ParseStatus::Failure;
3160
3161 if (!tryParseScalarRegister(RegNum).isSuccess())
3162 return TokError("expected register operand");
3163
3164 if (RegNum != AArch64::XZR)
3165 return TokError("xzr must be followed by xzr");
3166
3167 // We need to push something, since we claim this is an operand in .td.
3168 // See also AArch64AsmParser::parseKeywordOperand.
3169 Operands.push_back(AArch64Operand::CreateReg(
3170 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3171
3172 return ParseStatus::Success;
3173}
3174
3175/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3176ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3177 SMLoc S = getLoc();
3178 const AsmToken &Tok = getTok();
3179 if (Tok.isNot(AsmToken::Identifier))
3180 return TokError("invalid operand for instruction");
3181
3182 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3183 if (!BTI)
3184 return TokError("invalid operand for instruction");
3185
3186 Operands.push_back(AArch64Operand::CreateBTIHint(
3187 BTI->Encoding, Tok.getString(), S, getContext()));
3188 Lex(); // Eat identifier token.
3189 return ParseStatus::Success;
3190}
3191
3192/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3193/// instruction.
3194ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3195 SMLoc S = getLoc();
3196 const MCExpr *Expr = nullptr;
3197
3198 if (getTok().is(AsmToken::Hash)) {
3199 Lex(); // Eat hash token.
3200 }
3201
3202 if (parseSymbolicImmVal(Expr))
3203 return ParseStatus::Failure;
3204
3205 AArch64MCExpr::VariantKind ELFRefKind;
3206 MCSymbolRefExpr::VariantKind DarwinRefKind;
3207 int64_t Addend;
3208 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3209 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3210 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3211 // No modifier was specified at all; this is the syntax for an ELF basic
3212 // ADRP relocation (unfortunately).
3213 Expr =
3215 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3216 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3217 Addend != 0) {
3218 return Error(S, "gotpage label reference not allowed an addend");
3219 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3220 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3221 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3222 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3223 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3224 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3225 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3226 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3227 // The operand must be an @page or @gotpage qualified symbolref.
3228 return Error(S, "page or gotpage label reference expected");
3229 }
3230 }
3231
3232 // We have either a label reference possibly with addend or an immediate. The
3233 // addend is a raw value here. The linker will adjust it to only reference the
3234 // page.
3235 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3237
3238 return ParseStatus::Success;
3239}
3240
3241/// tryParseAdrLabel - Parse and validate a source label for the ADR
3242/// instruction.
3243ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3244 SMLoc S = getLoc();
3245 const MCExpr *Expr = nullptr;
3246
3247 // Leave anything with a bracket to the default for SVE
3248 if (getTok().is(AsmToken::LBrac))
3249 return ParseStatus::NoMatch;
3250
3251 if (getTok().is(AsmToken::Hash))
3252 Lex(); // Eat hash token.
3253
3254 if (parseSymbolicImmVal(Expr))
3255 return ParseStatus::Failure;
3256
3257 AArch64MCExpr::VariantKind ELFRefKind;
3258 MCSymbolRefExpr::VariantKind DarwinRefKind;
3259 int64_t Addend;
3260 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3261 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3262 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3263 // No modifier was specified at all; this is the syntax for an ELF basic
3264 // ADR relocation (unfortunately).
3265 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3266 } else {
3267 return Error(S, "unexpected adr label");
3268 }
3269 }
3270
3271 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3272 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3273 return ParseStatus::Success;
3274}
3275
3276/// tryParseFPImm - A floating point immediate expression operand.
3277template <bool AddFPZeroAsLiteral>
3278ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3279 SMLoc S = getLoc();
3280
3281 bool Hash = parseOptionalToken(AsmToken::Hash);
3282
3283 // Handle negation, as that still comes through as a separate token.
3284 bool isNegative = parseOptionalToken(AsmToken::Minus);
3285
3286 const AsmToken &Tok = getTok();
3287 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3288 if (!Hash)
3289 return ParseStatus::NoMatch;
3290 return TokError("invalid floating point immediate");
3291 }
3292
3293 // Parse hexadecimal representation.
3294 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3295 if (Tok.getIntVal() > 255 || isNegative)
3296 return TokError("encoded floating point value out of range");
3297
3299 Operands.push_back(
3300 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3301 } else {
3302 // Parse FP representation.
3303 APFloat RealVal(APFloat::IEEEdouble());
3304 auto StatusOrErr =
3305 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3306 if (errorToBool(StatusOrErr.takeError()))
3307 return TokError("invalid floating point representation");
3308
3309 if (isNegative)
3310 RealVal.changeSign();
3311
3312 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3313 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3314 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3315 } else
3316 Operands.push_back(AArch64Operand::CreateFPImm(
3317 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3318 }
3319
3320 Lex(); // Eat the token.
3321
3322 return ParseStatus::Success;
3323}
3324
3325/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3326/// a shift suffix, for example '#1, lsl #12'.
3328AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3329 SMLoc S = getLoc();
3330
3331 if (getTok().is(AsmToken::Hash))
3332 Lex(); // Eat '#'
3333 else if (getTok().isNot(AsmToken::Integer))
3334 // Operand should start from # or should be integer, emit error otherwise.
3335 return ParseStatus::NoMatch;
3336
3337 if (getTok().is(AsmToken::Integer) &&
3338 getLexer().peekTok().is(AsmToken::Colon))
3339 return tryParseImmRange(Operands);
3340
3341 const MCExpr *Imm = nullptr;
3342 if (parseSymbolicImmVal(Imm))
3343 return ParseStatus::Failure;
3344 else if (getTok().isNot(AsmToken::Comma)) {
3345 Operands.push_back(
3346 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3347 return ParseStatus::Success;
3348 }
3349
3350 // Eat ','
3351 Lex();
3352 StringRef VecGroup;
3353 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3354 Operands.push_back(
3355 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3356 Operands.push_back(
3357 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3358 return ParseStatus::Success;
3359 }
3360
3361 // The optional operand must be "lsl #N" where N is non-negative.
3362 if (!getTok().is(AsmToken::Identifier) ||
3363 !getTok().getIdentifier().equals_insensitive("lsl"))
3364 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3365
3366 // Eat 'lsl'
3367 Lex();
3368
3369 parseOptionalToken(AsmToken::Hash);
3370
3371 if (getTok().isNot(AsmToken::Integer))
3372 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3373
3374 int64_t ShiftAmount = getTok().getIntVal();
3375
3376 if (ShiftAmount < 0)
3377 return Error(getLoc(), "positive shift amount required");
3378 Lex(); // Eat the number
3379
3380 // Just in case the optional lsl #0 is used for immediates other than zero.
3381 if (ShiftAmount == 0 && Imm != nullptr) {
3382 Operands.push_back(
3383 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3384 return ParseStatus::Success;
3385 }
3386
3387 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3388 getLoc(), getContext()));
3389 return ParseStatus::Success;
3390}
3391
3392/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3393/// suggestion to help common typos.
3395AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3397 .Case("eq", AArch64CC::EQ)
3398 .Case("ne", AArch64CC::NE)
3399 .Case("cs", AArch64CC::HS)
3400 .Case("hs", AArch64CC::HS)
3401 .Case("cc", AArch64CC::LO)
3402 .Case("lo", AArch64CC::LO)
3403 .Case("mi", AArch64CC::MI)
3404 .Case("pl", AArch64CC::PL)
3405 .Case("vs", AArch64CC::VS)
3406 .Case("vc", AArch64CC::VC)
3407 .Case("hi", AArch64CC::HI)
3408 .Case("ls", AArch64CC::LS)
3409 .Case("ge", AArch64CC::GE)
3410 .Case("lt", AArch64CC::LT)
3411 .Case("gt", AArch64CC::GT)
3412 .Case("le", AArch64CC::LE)
3413 .Case("al", AArch64CC::AL)
3414 .Case("nv", AArch64CC::NV)
3416
3417 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3419 .Case("none", AArch64CC::EQ)
3420 .Case("any", AArch64CC::NE)
3421 .Case("nlast", AArch64CC::HS)
3422 .Case("last", AArch64CC::LO)
3423 .Case("first", AArch64CC::MI)
3424 .Case("nfrst", AArch64CC::PL)
3425 .Case("pmore", AArch64CC::HI)
3426 .Case("plast", AArch64CC::LS)
3427 .Case("tcont", AArch64CC::GE)
3428 .Case("tstop", AArch64CC::LT)
3430
3431 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3432 Suggestion = "nfrst";
3433 }
3434 return CC;
3435}
3436
3437/// parseCondCode - Parse a Condition Code operand.
3438bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3439 bool invertCondCode) {
3440 SMLoc S = getLoc();
3441 const AsmToken &Tok = getTok();
3442 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3443
3444 StringRef Cond = Tok.getString();
3445 std::string Suggestion;
3446 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3447 if (CC == AArch64CC::Invalid) {
3448 std::string Msg = "invalid condition code";
3449 if (!Suggestion.empty())
3450 Msg += ", did you mean " + Suggestion + "?";
3451 return TokError(Msg);
3452 }
3453 Lex(); // Eat identifier token.
3454
3455 if (invertCondCode) {
3456 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3457 return TokError("condition codes AL and NV are invalid for this instruction");
3459 }
3460
3461 Operands.push_back(
3462 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3463 return false;
3464}
3465
3466ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3467 const AsmToken &Tok = getTok();
3468 SMLoc S = getLoc();
3469
3470 if (Tok.isNot(AsmToken::Identifier))
3471 return TokError("invalid operand for instruction");
3472
3473 unsigned PStateImm = -1;
3474 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3475 if (!SVCR)
3476 return ParseStatus::NoMatch;
3477 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3478 PStateImm = SVCR->Encoding;
3479
3480 Operands.push_back(
3481 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3482 Lex(); // Eat identifier token.
3483 return ParseStatus::Success;
3484}
3485
3486ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3487 const AsmToken &Tok = getTok();
3488 SMLoc S = getLoc();
3489
3490 StringRef Name = Tok.getString();
3491
3492 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3493 Lex(); // eat "za[.(b|h|s|d)]"
3494 unsigned ElementWidth = 0;
3495 auto DotPosition = Name.find('.');
3496 if (DotPosition != StringRef::npos) {
3497 const auto &KindRes =
3498 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3499 if (!KindRes)
3500 return TokError(
3501 "Expected the register to be followed by element width suffix");
3502 ElementWidth = KindRes->second;
3503 }
3504 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3505 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3506 getContext()));
3507 if (getLexer().is(AsmToken::LBrac)) {
3508 // There's no comma after matrix operand, so we can parse the next operand
3509 // immediately.
3510 if (parseOperand(Operands, false, false))
3511 return ParseStatus::NoMatch;
3512 }
3513 return ParseStatus::Success;
3514 }
3515
3516 // Try to parse matrix register.
3517 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3518 if (!Reg)
3519 return ParseStatus::NoMatch;
3520
3521 size_t DotPosition = Name.find('.');
3522 assert(DotPosition != StringRef::npos && "Unexpected register");
3523
3524 StringRef Head = Name.take_front(DotPosition);
3525 StringRef Tail = Name.drop_front(DotPosition);
3526 StringRef RowOrColumn = Head.take_back();
3527
3528 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3529 .Case("h", MatrixKind::Row)
3530 .Case("v", MatrixKind::Col)
3531 .Default(MatrixKind::Tile);
3532
3533 // Next up, parsing the suffix
3534 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3535 if (!KindRes)
3536 return TokError(
3537 "Expected the register to be followed by element width suffix");
3538 unsigned ElementWidth = KindRes->second;
3539
3540 Lex();
3541
3542 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3543 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3544
3545 if (getLexer().is(AsmToken::LBrac)) {
3546 // There's no comma after matrix operand, so we can parse the next operand
3547 // immediately.
3548 if (parseOperand(Operands, false, false))
3549 return ParseStatus::NoMatch;
3550 }
3551 return ParseStatus::Success;
3552}
3553
3554/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3555/// them if present.
3557AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3558 const AsmToken &Tok = getTok();
3559 std::string LowerID = Tok.getString().lower();
3562 .Case("lsl", AArch64_AM::LSL)
3563 .Case("lsr", AArch64_AM::LSR)
3564 .Case("asr", AArch64_AM::ASR)
3565 .Case("ror", AArch64_AM::ROR)
3566 .Case("msl", AArch64_AM::MSL)
3567 .Case("uxtb", AArch64_AM::UXTB)
3568 .Case("uxth", AArch64_AM::UXTH)
3569 .Case("uxtw", AArch64_AM::UXTW)
3570 .Case("uxtx", AArch64_AM::UXTX)
3571 .Case("sxtb", AArch64_AM::SXTB)
3572 .Case("sxth", AArch64_AM::SXTH)
3573 .Case("sxtw", AArch64_AM::SXTW)
3574 .Case("sxtx", AArch64_AM::SXTX)
3576
3578 return ParseStatus::NoMatch;
3579
3580 SMLoc S = Tok.getLoc();
3581 Lex();
3582
3583 bool Hash = parseOptionalToken(AsmToken::Hash);
3584
3585 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3586 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3587 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3588 ShOp == AArch64_AM::MSL) {
3589 // We expect a number here.
3590 return TokError("expected #imm after shift specifier");
3591 }
3592
3593 // "extend" type operations don't need an immediate, #0 is implicit.
3594 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3595 Operands.push_back(
3596 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3597 return ParseStatus::Success;
3598 }
3599
3600 // Make sure we do actually have a number, identifier or a parenthesized
3601 // expression.
3602 SMLoc E = getLoc();
3603 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3604 !getTok().is(AsmToken::Identifier))
3605 return Error(E, "expected integer shift amount");
3606
3607 const MCExpr *ImmVal;
3608 if (getParser().parseExpression(ImmVal))
3609 return ParseStatus::Failure;
3610
3611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3612 if (!MCE)
3613 return Error(E, "expected constant '#imm' after shift specifier");
3614
3615 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3616 Operands.push_back(AArch64Operand::CreateShiftExtend(
3617 ShOp, MCE->getValue(), true, S, E, getContext()));
3618 return ParseStatus::Success;
3619}
3620
3621static const struct Extension {
3622 const char *Name;
3624} ExtensionMap[] = {
3625 {"crc", {AArch64::FeatureCRC}},
3626 {"sm4", {AArch64::FeatureSM4}},
3627 {"sha3", {AArch64::FeatureSHA3}},
3628 {"sha2", {AArch64::FeatureSHA2}},
3629 {"aes", {AArch64::FeatureAES}},
3630 {"crypto", {AArch64::FeatureCrypto}},
3631 {"fp", {AArch64::FeatureFPARMv8}},
3632 {"simd", {AArch64::FeatureNEON}},
3633 {"ras", {AArch64::FeatureRAS}},
3634 {"rasv2", {AArch64::FeatureRASv2}},
3635 {"lse", {AArch64::FeatureLSE}},
3636 {"predres", {AArch64::FeaturePredRes}},
3637 {"predres2", {AArch64::FeatureSPECRES2}},
3638 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3639 {"mte", {AArch64::FeatureMTE}},
3640 {"memtag", {AArch64::FeatureMTE}},
3641 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3642 {"pan", {AArch64::FeaturePAN}},
3643 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3644 {"ccpp", {AArch64::FeatureCCPP}},
3645 {"rcpc", {AArch64::FeatureRCPC}},
3646 {"rng", {AArch64::FeatureRandGen}},
3647 {"sve", {AArch64::FeatureSVE}},
3648 {"sve2", {AArch64::FeatureSVE2}},
3649 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3650 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3651 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3652 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3653 {"sve2p1", {AArch64::FeatureSVE2p1}},
3654 {"b16b16", {AArch64::FeatureB16B16}},
3655 {"ls64", {AArch64::FeatureLS64}},
3656 {"xs", {AArch64::FeatureXS}},
3657 {"pauth", {AArch64::FeaturePAuth}},
3658 {"flagm", {AArch64::FeatureFlagM}},
3659 {"rme", {AArch64::FeatureRME}},
3660 {"sme", {AArch64::FeatureSME}},
3661 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3662 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3663 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3664 {"sme2", {AArch64::FeatureSME2}},
3665 {"sme2p1", {AArch64::FeatureSME2p1}},
3666 {"hbc", {AArch64::FeatureHBC}},
3667 {"mops", {AArch64::FeatureMOPS}},
3668 {"mec", {AArch64::FeatureMEC}},
3669 {"the", {AArch64::FeatureTHE}},
3670 {"d128", {AArch64::FeatureD128}},
3671 {"lse128", {AArch64::FeatureLSE128}},
3672 {"ite", {AArch64::FeatureITE}},
3673 {"cssc", {AArch64::FeatureCSSC}},
3674 {"rcpc3", {AArch64::FeatureRCPC3}},
3675 {"gcs", {AArch64::FeatureGCS}},
3676 {"bf16", {AArch64::FeatureBF16}},
3677 {"compnum", {AArch64::FeatureComplxNum}},
3678 {"dotprod", {AArch64::FeatureDotProd}},
3679 {"f32mm", {AArch64::FeatureMatMulFP32}},
3680 {"f64mm", {AArch64::FeatureMatMulFP64}},
3681 {"fp16", {AArch64::FeatureFullFP16}},
3682 {"fp16fml", {AArch64::FeatureFP16FML}},
3683 {"i8mm", {AArch64::FeatureMatMulInt8}},
3684 {"lor", {AArch64::FeatureLOR}},
3685 {"profile", {AArch64::FeatureSPE}},
3686 // "rdma" is the name documented by binutils for the feature, but
3687 // binutils also accepts incomplete prefixes of features, so "rdm"
3688 // works too. Support both spellings here.
3689 {"rdm", {AArch64::FeatureRDM}},
3690 {"rdma", {AArch64::FeatureRDM}},
3691 {"sb", {AArch64::FeatureSB}},
3692 {"ssbs", {AArch64::FeatureSSBS}},
3693 {"tme", {AArch64::FeatureTME}},
3694 {"fpmr", {AArch64::FeatureFPMR}},
3695 {"fp8", {AArch64::FeatureFP8}},
3696 {"faminmax", {AArch64::FeatureFAMINMAX}},
3697 {"fp8fma", {AArch64::FeatureFP8FMA}},
3698 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3699 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3700 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3701 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3702 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3703 {"lut", {AArch64::FeatureLUT}},
3704 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3705 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3706 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3707 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3708 {"cpa", {AArch64::FeatureCPA}},
3709 {"tlbiw", {AArch64::FeatureTLBIW}},
3711
3712static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3713 if (FBS[AArch64::HasV8_0aOps])
3714 Str += "ARMv8a";
3715 if (FBS[AArch64::HasV8_1aOps])
3716 Str += "ARMv8.1a";
3717 else if (FBS[AArch64::HasV8_2aOps])
3718 Str += "ARMv8.2a";
3719 else if (FBS[AArch64::HasV8_3aOps])
3720 Str += "ARMv8.3a";
3721 else if (FBS[AArch64::HasV8_4aOps])
3722 Str += "ARMv8.4a";
3723 else if (FBS[AArch64::HasV8_5aOps])
3724 Str += "ARMv8.5a";
3725 else if (FBS[AArch64::HasV8_6aOps])
3726 Str += "ARMv8.6a";
3727 else if (FBS[AArch64::HasV8_7aOps])
3728 Str += "ARMv8.7a";
3729 else if (FBS[AArch64::HasV8_8aOps])
3730 Str += "ARMv8.8a";
3731 else if (FBS[AArch64::HasV8_9aOps])
3732 Str += "ARMv8.9a";
3733 else if (FBS[AArch64::HasV9_0aOps])
3734 Str += "ARMv9-a";
3735 else if (FBS[AArch64::HasV9_1aOps])
3736 Str += "ARMv9.1a";
3737 else if (FBS[AArch64::HasV9_2aOps])
3738 Str += "ARMv9.2a";
3739 else if (FBS[AArch64::HasV9_3aOps])
3740 Str += "ARMv9.3a";
3741 else if (FBS[AArch64::HasV9_4aOps])
3742 Str += "ARMv9.4a";
3743 else if (FBS[AArch64::HasV9_5aOps])
3744 Str += "ARMv9.5a";
3745 else if (FBS[AArch64::HasV8_0rOps])
3746 Str += "ARMv8r";
3747 else {
3748 SmallVector<std::string, 2> ExtMatches;
3749 for (const auto& Ext : ExtensionMap) {
3750 // Use & in case multiple features are enabled
3751 if ((FBS & Ext.Features) != FeatureBitset())
3752 ExtMatches.push_back(Ext.Name);
3753 }
3754 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3755 }
3756}
3757
3758void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3759 SMLoc S) {
3760 const uint16_t Op2 = Encoding & 7;
3761 const uint16_t Cm = (Encoding & 0x78) >> 3;
3762 const uint16_t Cn = (Encoding & 0x780) >> 7;
3763 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3764
3765 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3766
3767 Operands.push_back(
3768 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3769 Operands.push_back(
3770 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3771 Operands.push_back(
3772 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3773 Expr = MCConstantExpr::create(Op2, getContext());
3774 Operands.push_back(
3775 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3776}
3777
3778/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3779/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3780bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3782 if (Name.contains('.'))
3783 return TokError("invalid operand");
3784
3785 Mnemonic = Name;
3786 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3787
3788 const AsmToken &Tok = getTok();
3789 StringRef Op = Tok.getString();
3790 SMLoc S = Tok.getLoc();
3791
3792 if (Mnemonic == "ic") {
3793 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3794 if (!IC)
3795 return TokError("invalid operand for IC instruction");
3796 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3797 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3799 return TokError(Str);
3800 }
3801 createSysAlias(IC->Encoding, Operands, S);
3802 } else if (Mnemonic == "dc") {
3803 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3804 if (!DC)
3805 return TokError("invalid operand for DC instruction");
3806 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3807 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3809 return TokError(Str);
3810 }
3811 createSysAlias(DC->Encoding, Operands, S);
3812 } else if (Mnemonic == "at") {
3813 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3814 if (!AT)
3815 return TokError("invalid operand for AT instruction");
3816 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3817 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3819 return TokError(Str);
3820 }
3821 createSysAlias(AT->Encoding, Operands, S);
3822 } else if (Mnemonic == "tlbi") {
3823 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3824 if (!TLBI)
3825 return TokError("invalid operand for TLBI instruction");
3826 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3827 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3829 return TokError(Str);
3830 }
3831 createSysAlias(TLBI->Encoding, Operands, S);
3832 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3833
3834 if (Op.lower() != "rctx")
3835 return TokError("invalid operand for prediction restriction instruction");
3836
3837 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3838 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3839 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3840
3841 if (Mnemonic == "cosp" && !hasSpecres2)
3842 return TokError("COSP requires: predres2");
3843 if (!hasPredres)
3844 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3845
3846 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3847 : Mnemonic == "dvp" ? 0b101
3848 : Mnemonic == "cosp" ? 0b110
3849 : Mnemonic == "cpp" ? 0b111
3850 : 0;
3851 assert(PRCTX_Op2 &&
3852 "Invalid mnemonic for prediction restriction instruction");
3853 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3854 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3855
3856 createSysAlias(Encoding, Operands, S);
3857 }
3858
3859 Lex(); // Eat operand.
3860
3861 bool ExpectRegister = !Op.contains_insensitive("all");
3862 bool HasRegister = false;
3863
3864 // Check for the optional register operand.
3865 if (parseOptionalToken(AsmToken::Comma)) {
3866 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3867 return TokError("expected register operand");
3868 HasRegister = true;
3869 }
3870
3871 if (ExpectRegister && !HasRegister)
3872 return TokError("specified " + Mnemonic + " op requires a register");
3873 else if (!ExpectRegister && HasRegister)
3874 return TokError("specified " + Mnemonic + " op does not use a register");
3875
3876 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3877 return true;
3878
3879 return false;
3880}
3881
3882/// parseSyspAlias - The TLBIP instructions are simple aliases for
3883/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3884bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3886 if (Name.contains('.'))
3887 return TokError("invalid operand");
3888
3889 Mnemonic = Name;
3890 Operands.push_back(
3891 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3892
3893 const AsmToken &Tok = getTok();
3894 StringRef Op = Tok.getString();
3895 SMLoc S = Tok.getLoc();
3896
3897 if (Mnemonic == "tlbip") {
3898 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3899 if (HasnXSQualifier) {
3900 Op = Op.drop_back(3);
3901 }
3902 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3903 if (!TLBIorig)
3904 return TokError("invalid operand for TLBIP instruction");
3905 const AArch64TLBI::TLBI TLBI(
3906 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3907 TLBIorig->NeedsReg,
3908 HasnXSQualifier
3909 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3910 : TLBIorig->FeaturesRequired);
3911 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3912 std::string Name =
3913 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3914 std::string Str("TLBIP " + Name + " requires: ");
3916 return TokError(Str);
3917 }
3918 createSysAlias(TLBI.Encoding, Operands, S);
3919 }
3920
3921 Lex(); // Eat operand.
3922
3923 if (parseComma())
3924 return true;
3925
3926 if (Tok.isNot(AsmToken::Identifier))
3927 return TokError("expected register identifier");
3928 auto Result = tryParseSyspXzrPair(Operands);
3929 if (Result.isNoMatch())
3930 Result = tryParseGPRSeqPair(Operands);
3931 if (!Result.isSuccess())
3932 return TokError("specified " + Mnemonic +
3933 " op requires a pair of registers");
3934
3935 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3936 return true;
3937
3938 return false;
3939}
3940
3941ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3942 MCAsmParser &Parser = getParser();
3943 const AsmToken &Tok = getTok();
3944
3945 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3946 return TokError("'csync' operand expected");
3947 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3948 // Immediate operand.
3949 const MCExpr *ImmVal;
3950 SMLoc ExprLoc = getLoc();
3951 AsmToken IntTok = Tok;
3952 if (getParser().parseExpression(ImmVal))
3953 return ParseStatus::Failure;
3954 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3955 if (!MCE)
3956 return Error(ExprLoc, "immediate value expected for barrier operand");
3957 int64_t Value = MCE->getValue();
3958 if (Mnemonic == "dsb" && Value > 15) {
3959 // This case is a no match here, but it might be matched by the nXS
3960 // variant. Deliberately not unlex the optional '#' as it is not necessary
3961 // to characterize an integer immediate.
3962 Parser.getLexer().UnLex(IntTok);
3963 return ParseStatus::NoMatch;
3964 }
3965 if (Value < 0 || Value > 15)
3966 return Error(ExprLoc, "barrier operand out of range");
3967 auto DB = AArch64DB::lookupDBByEncoding(Value);
3968 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3969 ExprLoc, getContext(),
3970 false /*hasnXSModifier*/));
3971 return ParseStatus::Success;
3972 }
3973
3974 if (Tok.isNot(AsmToken::Identifier))
3975 return TokError("invalid operand for instruction");
3976
3977 StringRef Operand = Tok.getString();
3978 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3979 auto DB = AArch64DB::lookupDBByName(Operand);
3980 // The only valid named option for ISB is 'sy'
3981 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
3982 return TokError("'sy' or #imm operand expected");
3983 // The only valid named option for TSB is 'csync'
3984 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
3985 return TokError("'csync' operand expected");
3986 if (!DB && !TSB) {
3987 if (Mnemonic == "dsb") {
3988 // This case is a no match here, but it might be matched by the nXS
3989 // variant.
3990 return ParseStatus::NoMatch;
3991 }
3992 return TokError("invalid barrier option name");
3993 }
3994
3995 Operands.push_back(AArch64Operand::CreateBarrier(
3996 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3997 getContext(), false /*hasnXSModifier*/));
3998 Lex(); // Consume the option
3999
4000 return ParseStatus::Success;
4001}
4002
4004AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4005 const AsmToken &Tok = getTok();
4006
4007 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4008 if (Mnemonic != "dsb")
4009 return ParseStatus::Failure;
4010
4011 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4012 // Immediate operand.
4013 const MCExpr *ImmVal;
4014 SMLoc ExprLoc = getLoc();
4015 if (getParser().parseExpression(ImmVal))
4016 return ParseStatus::Failure;
4017 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4018 if (!MCE)
4019 return Error(ExprLoc, "immediate value expected for barrier operand");
4020 int64_t Value = MCE->getValue();
4021 // v8.7-A DSB in the nXS variant accepts only the following immediate
4022 // values: 16, 20, 24, 28.
4023 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4024 return Error(ExprLoc, "barrier operand out of range");
4025 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4026 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4027 ExprLoc, getContext(),
4028 true /*hasnXSModifier*/));
4029 return ParseStatus::Success;
4030 }
4031
4032 if (Tok.isNot(AsmToken::Identifier))
4033 return TokError("invalid operand for instruction");
4034
4035 StringRef Operand = Tok.getString();
4036 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4037
4038 if (!DB)
4039 return TokError("invalid barrier option name");
4040
4041 Operands.push_back(
4042 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4043 getContext(), true /*hasnXSModifier*/));
4044 Lex(); // Consume the option
4045
4046 return ParseStatus::Success;
4047}
4048
4049ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4050 const AsmToken &Tok = getTok();
4051
4052 if (Tok.isNot(AsmToken::Identifier))
4053 return ParseStatus::NoMatch;
4054
4055 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4056 return ParseStatus::NoMatch;
4057
4058 int MRSReg, MSRReg;
4059 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4060 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4061 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4062 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4063 } else
4064 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4065
4066 unsigned PStateImm = -1;
4067 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4068 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4069 PStateImm = PState15->Encoding;
4070 if (!PState15) {
4071 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4072 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4073 PStateImm = PState1->Encoding;
4074 }
4075
4076 Operands.push_back(
4077 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4078 PStateImm, getContext()));
4079 Lex(); // Eat identifier
4080
4081 return ParseStatus::Success;
4082}
4083
4084/// tryParseNeonVectorRegister - Parse a vector register operand.
4085bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4086 if (getTok().isNot(AsmToken::Identifier))
4087 return true;
4088
4089 SMLoc S = getLoc();
4090 // Check for a vector register specifier first.
4093 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4094 if (!Res.isSuccess())
4095 return true;
4096
4097 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4098 if (!KindRes)
4099 return true;
4100
4101 unsigned ElementWidth = KindRes->second;
4102 Operands.push_back(
4103 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4104 S, getLoc(), getContext()));
4105
4106 // If there was an explicit qualifier, that goes on as a literal text
4107 // operand.
4108 if (!Kind.empty())
4109 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4110
4111 return tryParseVectorIndex(Operands).isFailure();
4112}
4113
4114ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4115 SMLoc SIdx = getLoc();
4116 if (parseOptionalToken(AsmToken::LBrac)) {
4117 const MCExpr *ImmVal;
4118 if (getParser().parseExpression(ImmVal))
4119 return ParseStatus::NoMatch;
4120 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4121 if (!MCE)
4122 return TokError("immediate value expected for vector index");
4123
4124 SMLoc E = getLoc();
4125
4126 if (parseToken(AsmToken::RBrac, "']' expected"))
4127 return ParseStatus::Failure;
4128
4129 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4130 E, getContext()));
4131 return ParseStatus::Success;
4132 }
4133
4134 return ParseStatus::NoMatch;
4135}
4136
4137// tryParseVectorRegister - Try to parse a vector register name with
4138// optional kind specifier. If it is a register specifier, eat the token
4139// and return it.
4140ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4141 StringRef &Kind,
4142 RegKind MatchKind) {
4143 const AsmToken &Tok = getTok();
4144
4145 if (Tok.isNot(AsmToken::Identifier))
4146 return ParseStatus::NoMatch;
4147
4148 StringRef Name = Tok.getString();
4149 // If there is a kind specifier, it's separated from the register name by
4150 // a '.'.
4151 size_t Start = 0, Next = Name.find('.');
4152 StringRef Head = Name.slice(Start, Next);
4153 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4154
4155 if (RegNum) {
4156 if (Next != StringRef::npos) {
4157 Kind = Name.slice(Next, StringRef::npos);
4158 if (!isValidVectorKind(Kind, MatchKind))
4159 return TokError("invalid vector kind qualifier");
4160 }
4161 Lex(); // Eat the register token.
4162
4163 Reg = RegNum;
4164 return ParseStatus::Success;
4165 }
4166
4167 return ParseStatus::NoMatch;
4168}
4169
4170/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4171template <RegKind RK>
4173AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4174 // Check for a SVE predicate register specifier first.
4175 const SMLoc S = getLoc();
4177 MCRegister RegNum;
4178 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4179 if (!Res.isSuccess())
4180 return Res;
4181
4182 const auto &KindRes = parseVectorKind(Kind, RK);
4183 if (!KindRes)
4184 return ParseStatus::NoMatch;
4185
4186 unsigned ElementWidth = KindRes->second;
4187 Operands.push_back(AArch64Operand::CreateVectorReg(
4188 RegNum, RK, ElementWidth, S,
4189 getLoc(), getContext()));
4190
4191 if (getLexer().is(AsmToken::LBrac)) {
4192 if (RK == RegKind::SVEPredicateAsCounter) {
4193 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4194 if (ResIndex.isSuccess())
4195 return ParseStatus::Success;
4196 } else {
4197 // Indexed predicate, there's no comma so try parse the next operand
4198 // immediately.
4199 if (parseOperand(Operands, false, false))
4200 return ParseStatus::NoMatch;
4201 }
4202 }
4203
4204 // Not all predicates are followed by a '/m' or '/z'.
4205 if (getTok().isNot(AsmToken::Slash))
4206 return ParseStatus::Success;
4207
4208 // But when they do they shouldn't have an element type suffix.
4209 if (!Kind.empty())
4210 return Error(S, "not expecting size suffix");
4211
4212 // Add a literal slash as operand
4213 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4214
4215 Lex(); // Eat the slash.
4216
4217 // Zeroing or merging?
4218 auto Pred = getTok().getString().lower();
4219 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4220 return Error(getLoc(), "expecting 'z' predication");
4221
4222 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4223 return Error(getLoc(), "expecting 'm' or 'z' predication");
4224
4225 // Add zero/merge token.
4226 const char *ZM = Pred == "z" ? "z" : "m";
4227 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4228
4229 Lex(); // Eat zero/merge token.
4230 return ParseStatus::Success;
4231}
4232
4233/// parseRegister - Parse a register operand.
4234bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4235 // Try for a Neon vector register.
4236 if (!tryParseNeonVectorRegister(Operands))
4237 return false;
4238
4239 if (tryParseZTOperand(Operands).isSuccess())
4240 return false;
4241
4242 // Otherwise try for a scalar register.
4243 if (tryParseGPROperand<false>(Operands).isSuccess())
4244 return false;
4245
4246 return true;
4247}
4248
4249bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4250 bool HasELFModifier = false;
4252
4253 if (parseOptionalToken(AsmToken::Colon)) {
4254 HasELFModifier = true;
4255
4256 if (getTok().isNot(AsmToken::Identifier))
4257 return TokError("expect relocation specifier in operand after ':'");
4258
4259 std::string LowerCase = getTok().getIdentifier().lower();
4260 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4262 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4263 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4264 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4265 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4266 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4267 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4268 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4269 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4270 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4271 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4272 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4273 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4274 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4275 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4276 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4277 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4278 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4279 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4280 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4281 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4282 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4283 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4284 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4285 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4286 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4287 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4288 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4289 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4290 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4291 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4292 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4293 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4294 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4295 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4296 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4298 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4299 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4301 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4302 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4303 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4305 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4306 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4308
4309 if (RefKind == AArch64MCExpr::VK_INVALID)
4310 return TokError("expect relocation specifier in operand after ':'");
4311
4312 Lex(); // Eat identifier
4313
4314 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4315 return true;
4316 }
4317
4318 if (getParser().parseExpression(ImmVal))
4319 return true;
4320
4321 if (HasELFModifier)
4322 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4323
4324 return false;
4325}
4326
4327ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4328 if (getTok().isNot(AsmToken::LCurly))
4329 return ParseStatus::NoMatch;
4330
4331 auto ParseMatrixTile = [this](unsigned &Reg,
4332 unsigned &ElementWidth) -> ParseStatus {
4333 StringRef Name = getTok().getString();
4334 size_t DotPosition = Name.find('.');
4335 if (DotPosition == StringRef::npos)
4336 return ParseStatus::NoMatch;
4337
4338 unsigned RegNum = matchMatrixTileListRegName(Name);
4339 if (!RegNum)
4340 return ParseStatus::NoMatch;
4341
4342 StringRef Tail = Name.drop_front(DotPosition);
4343 const std::optional<std::pair<int, int>> &KindRes =
4344 parseVectorKind(Tail, RegKind::Matrix);
4345 if (!KindRes)
4346 return TokError(
4347 "Expected the register to be followed by element width suffix");
4348 ElementWidth = KindRes->second;
4349 Reg = RegNum;
4350 Lex(); // Eat the register.
4351 return ParseStatus::Success;
4352 };
4353
4354 SMLoc S = getLoc();
4355 auto LCurly = getTok();
4356 Lex(); // Eat left bracket token.
4357
4358 // Empty matrix list
4359 if (parseOptionalToken(AsmToken::RCurly)) {
4360 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4361 /*RegMask=*/0, S, getLoc(), getContext()));
4362 return ParseStatus::Success;
4363 }
4364
4365 // Try parse {za} alias early
4366 if (getTok().getString().equals_insensitive("za")) {
4367 Lex(); // Eat 'za'
4368
4369 if (parseToken(AsmToken::RCurly, "'}' expected"))
4370 return ParseStatus::Failure;
4371
4372 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4373 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4374 return ParseStatus::Success;
4375 }
4376
4377 SMLoc TileLoc = getLoc();
4378
4379 unsigned FirstReg, ElementWidth;
4380 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4381 if (!ParseRes.isSuccess()) {
4382 getLexer().UnLex(LCurly);
4383 return ParseRes;
4384 }
4385
4386 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4387
4388 unsigned PrevReg = FirstReg;
4389
4391 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4392
4393 SmallSet<unsigned, 8> SeenRegs;
4394 SeenRegs.insert(FirstReg);
4395
4396 while (parseOptionalToken(AsmToken::Comma)) {
4397 TileLoc = getLoc();
4398 unsigned Reg, NextElementWidth;
4399 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4400 if (!ParseRes.isSuccess())
4401 return ParseRes;
4402
4403 // Element size must match on all regs in the list.
4404 if (ElementWidth != NextElementWidth)
4405 return Error(TileLoc, "mismatched register size suffix");
4406
4407 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4408 Warning(TileLoc, "tile list not in ascending order");
4409
4410 if (SeenRegs.contains(Reg))
4411 Warning(TileLoc, "duplicate tile in list");
4412 else {
4413 SeenRegs.insert(Reg);
4414 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4415 }
4416
4417 PrevReg = Reg;
4418 }
4419
4420 if (parseToken(AsmToken::RCurly, "'}' expected"))
4421 return ParseStatus::Failure;
4422
4423 unsigned RegMask = 0;
4424 for (auto Reg : DRegs)
4425 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4426 RI->getEncodingValue(AArch64::ZAD0));
4427 Operands.push_back(
4428 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4429
4430 return ParseStatus::Success;
4431}
4432
4433template <RegKind VectorKind>
4434ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4435 bool ExpectMatch) {
4436 MCAsmParser &Parser = getParser();
4437 if (!getTok().is(AsmToken::LCurly))
4438 return ParseStatus::NoMatch;
4439
4440 // Wrapper around parse function
4441 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4442 bool NoMatchIsError) -> ParseStatus {
4443 auto RegTok = getTok();
4444 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4445 if (ParseRes.isSuccess()) {
4446 if (parseVectorKind(Kind, VectorKind))
4447 return ParseRes;
4448 llvm_unreachable("Expected a valid vector kind");
4449 }
4450
4451 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4452 RegTok.getString().equals_insensitive("zt0"))
4453 return ParseStatus::NoMatch;
4454
4455 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4456 (ParseRes.isNoMatch() && NoMatchIsError &&
4457 !RegTok.getString().starts_with_insensitive("za")))
4458 return Error(Loc, "vector register expected");
4459
4460 return ParseStatus::NoMatch;
4461 };
4462
4463 int NumRegs = getNumRegsForRegKind(VectorKind);
4464 SMLoc S = getLoc();
4465 auto LCurly = getTok();
4466 Lex(); // Eat left bracket token.
4467
4469 MCRegister FirstReg;
4470 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4471
4472 // Put back the original left bracket if there was no match, so that
4473 // different types of list-operands can be matched (e.g. SVE, Neon).
4474 if (ParseRes.isNoMatch())
4475 Parser.getLexer().UnLex(LCurly);
4476
4477 if (!ParseRes.isSuccess())
4478 return ParseRes;
4479
4480 int64_t PrevReg = FirstReg;
4481 unsigned Count = 1;
4482
4483 int Stride = 1;
4484 if (parseOptionalToken(AsmToken::Minus)) {
4485 SMLoc Loc = getLoc();
4486 StringRef NextKind;
4487
4489 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4490 if (!ParseRes.isSuccess())
4491 return ParseRes;
4492
4493 // Any Kind suffices must match on all regs in the list.
4494 if (Kind != NextKind)
4495 return Error(Loc, "mismatched register size suffix");
4496
4497 unsigned Space =
4498 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4499
4500 if (Space == 0 || Space > 3)
4501 return Error(Loc, "invalid number of vectors");
4502
4503 Count += Space;
4504 }
4505 else {
4506 bool HasCalculatedStride = false;
4507 while (parseOptionalToken(AsmToken::Comma)) {
4508 SMLoc Loc = getLoc();
4509 StringRef NextKind;
4511 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4512 if (!ParseRes.isSuccess())
4513 return ParseRes;
4514
4515 // Any Kind suffices must match on all regs in the list.
4516 if (Kind != NextKind)
4517 return Error(Loc, "mismatched register size suffix");
4518
4519 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4520 unsigned PrevRegVal =
4521 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4522 if (!HasCalculatedStride) {
4523 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4524 : (RegVal + NumRegs - PrevRegVal);
4525 HasCalculatedStride = true;
4526 }
4527
4528 // Register must be incremental (with a wraparound at last register).
4529 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4530 return Error(Loc, "registers must have the same sequential stride");
4531
4532 PrevReg = Reg;
4533 ++Count;
4534 }
4535 }
4536
4537 if (parseToken(AsmToken::RCurly, "'}' expected"))
4538 return ParseStatus::Failure;
4539
4540 if (Count > 4)
4541 return Error(S, "invalid number of vectors");
4542
4543 unsigned NumElements = 0;
4544 unsigned ElementWidth = 0;
4545 if (!Kind.empty()) {
4546 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4547 std::tie(NumElements, ElementWidth) = *VK;
4548 }
4549
4550 Operands.push_back(AArch64Operand::CreateVectorList(
4551 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4552 getLoc(), getContext()));
4553
4554 return ParseStatus::Success;
4555}
4556
4557/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4558bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4559 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4560 if (!ParseRes.isSuccess())
4561 return true;
4562
4563 return tryParseVectorIndex(Operands).isFailure();
4564}
4565
4566ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4567 SMLoc StartLoc = getLoc();
4568
4569 MCRegister RegNum;
4570 ParseStatus Res = tryParseScalarRegister(RegNum);
4571 if (!Res.isSuccess())
4572 return Res;
4573
4574 if (!parseOptionalToken(AsmToken::Comma)) {
4575 Operands.push_back(AArch64Operand::CreateReg(
4576 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4577 return ParseStatus::Success;
4578 }
4579
4580 parseOptionalToken(AsmToken::Hash);
4581
4582 if (getTok().isNot(AsmToken::Integer))
4583 return Error(getLoc(), "index must be absent or #0");
4584
4585 const MCExpr *ImmVal;
4586 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4587 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4588 return Error(getLoc(), "index must be absent or #0");
4589
4590 Operands.push_back(AArch64Operand::CreateReg(
4591 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4592 return ParseStatus::Success;
4593}
4594
4595ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4596 SMLoc StartLoc = getLoc();
4597 const AsmToken &Tok = getTok();
4598 std::string Name = Tok.getString().lower();
4599
4600 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4601
4602 if (RegNum == 0)
4603 return ParseStatus::NoMatch;
4604
4605 Operands.push_back(AArch64Operand::CreateReg(
4606 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4607 Lex(); // Eat register.
4608
4609 // Check if register is followed by an index
4610 if (parseOptionalToken(AsmToken::LBrac)) {
4611 Operands.push_back(
4612 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4613 const MCExpr *ImmVal;
4614 if (getParser().parseExpression(ImmVal))
4615 return ParseStatus::NoMatch;
4616 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4617 if (!MCE)
4618 return TokError("immediate value expected for vector index");
4619 Operands.push_back(AArch64Operand::CreateImm(
4620 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4621 getLoc(), getContext()));
4622 if (parseOptionalToken(AsmToken::Comma))
4623 if (parseOptionalMulOperand(Operands))
4624 return ParseStatus::Failure;
4625 if (parseToken(AsmToken::RBrac, "']' expected"))
4626 return ParseStatus::Failure;
4627 Operands.push_back(
4628 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4629 }
4630 return ParseStatus::Success;
4631}
4632
4633template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4634ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4635 SMLoc StartLoc = getLoc();
4636
4637 MCRegister RegNum;
4638 ParseStatus Res = tryParseScalarRegister(RegNum);
4639 if (!Res.isSuccess())
4640 return Res;
4641
4642 // No shift/extend is the default.
4643 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4644 Operands.push_back(AArch64Operand::CreateReg(
4645 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4646 return ParseStatus::Success;
4647 }
4648
4649 // Eat the comma
4650 Lex();
4651
4652 // Match the shift
4654 Res = tryParseOptionalShiftExtend(ExtOpnd);
4655 if (!Res.isSuccess())
4656 return Res;
4657
4658 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4659 Operands.push_back(AArch64Operand::CreateReg(
4660 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4661 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4662 Ext->hasShiftExtendAmount()));
4663
4664 return ParseStatus::Success;
4665}
4666
4667bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4668 MCAsmParser &Parser = getParser();
4669
4670 // Some SVE instructions have a decoration after the immediate, i.e.
4671 // "mul vl". We parse them here and add tokens, which must be present in the
4672 // asm string in the tablegen instruction.
4673 bool NextIsVL =
4674 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4675 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4676 if (!getTok().getString().equals_insensitive("mul") ||
4677 !(NextIsVL || NextIsHash))
4678 return true;
4679
4680 Operands.push_back(
4681 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4682 Lex(); // Eat the "mul"
4683
4684 if (NextIsVL) {
4685 Operands.push_back(
4686 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4687 Lex(); // Eat the "vl"
4688 return false;
4689 }
4690
4691 if (NextIsHash) {
4692 Lex(); // Eat the #
4693 SMLoc S = getLoc();
4694
4695 // Parse immediate operand.
4696 const MCExpr *ImmVal;
4697 if (!Parser.parseExpression(ImmVal))
4698 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4699 Operands.push_back(AArch64Operand::CreateImm(
4700 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4701 getContext()));
4702 return false;
4703 }
4704 }
4705
4706 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4707}
4708
4709bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4710 StringRef &VecGroup) {
4711 MCAsmParser &Parser = getParser();
4712 auto Tok = Parser.getTok();
4713 if (Tok.isNot(AsmToken::Identifier))
4714 return true;
4715
4717 .Case("vgx2", "vgx2")
4718 .Case("vgx4", "vgx4")
4719 .Default("");
4720
4721 if (VG.empty())
4722 return true;
4723
4724 VecGroup = VG;
4725 Parser.Lex(); // Eat vgx[2|4]
4726 return false;
4727}
4728
4729bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4730 auto Tok = getTok();
4731 if (Tok.isNot(AsmToken::Identifier))
4732 return true;
4733
4734 auto Keyword = Tok.getString();
4736 .Case("sm", "sm")
4737 .Case("za", "za")
4738 .Default(Keyword);
4739 Operands.push_back(
4740 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4741
4742 Lex();
4743 return false;
4744}
4745
4746/// parseOperand - Parse a arm instruction operand. For now this parses the
4747/// operand regardless of the mnemonic.
4748bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4749 bool invertCondCode) {
4750 MCAsmParser &Parser = getParser();
4751
4752 ParseStatus ResTy =
4753 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4754
4755 // Check if the current operand has a custom associated parser, if so, try to
4756 // custom parse the operand, or fallback to the general approach.
4757 if (ResTy.isSuccess())
4758 return false;
4759 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4760 // there was a match, but an error occurred, in which case, just return that
4761 // the operand parsing failed.
4762 if (ResTy.isFailure())
4763 return true;
4764
4765 // Nothing custom, so do general case parsing.
4766 SMLoc S, E;
4767 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4768 if (parseOptionalToken(AsmToken::Comma)) {
4769 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4770 if (!Res.isNoMatch())
4771 return Res.isFailure();
4772 getLexer().UnLex(SavedTok);
4773 }
4774 return false;
4775 };
4776 switch (getLexer().getKind()) {
4777 default: {
4778 SMLoc S = getLoc();
4779 const MCExpr *Expr;
4780 if (parseSymbolicImmVal(Expr))
4781 return Error(S, "invalid operand");
4782
4783 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4784 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4785 return parseOptionalShiftExtend(getTok());
4786 }
4787 case AsmToken::LBrac: {
4788 Operands.push_back(
4789 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4790 Lex(); // Eat '['
4791
4792 // There's no comma after a '[', so we can parse the next operand
4793 // immediately.
4794 return parseOperand(Operands, false, false);
4795 }
4796 case AsmToken::LCurly: {
4797 if (!parseNeonVectorList(Operands))
4798 return false;
4799
4800 Operands.push_back(
4801 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4802 Lex(); // Eat '{'
4803
4804 // There's no comma after a '{', so we can parse the next operand
4805 // immediately.
4806 return parseOperand(Operands, false, false);
4807 }
4808 case AsmToken::Identifier: {
4809 // See if this is a "VG" decoration used by SME instructions.
4810 StringRef VecGroup;
4811 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4812 Operands.push_back(
4813 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4814 return false;
4815 }
4816 // If we're expecting a Condition Code operand, then just parse that.
4817 if (isCondCode)
4818 return parseCondCode(Operands, invertCondCode);
4819
4820 // If it's a register name, parse it.
4821 if (!parseRegister(Operands)) {
4822 // Parse an optional shift/extend modifier.
4823 AsmToken SavedTok = getTok();
4824 if (parseOptionalToken(AsmToken::Comma)) {
4825 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4826 // such cases and don't report an error when <label> happens to match a
4827 // shift/extend modifier.
4828 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4829 /*ParseForAllFeatures=*/true);
4830 if (!Res.isNoMatch())
4831 return Res.isFailure();
4832 Res = tryParseOptionalShiftExtend(Operands);
4833 if (!Res.isNoMatch())
4834 return Res.isFailure();
4835 getLexer().UnLex(SavedTok);
4836 }
4837 return false;
4838 }
4839
4840 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4841 // by SVE instructions.
4842 if (!parseOptionalMulOperand(Operands))
4843 return false;
4844
4845 // If this is a two-word mnemonic, parse its special keyword
4846 // operand as an identifier.
4847 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4848 Mnemonic == "gcsb")
4849 return parseKeywordOperand(Operands);
4850
4851 // This was not a register so parse other operands that start with an
4852 // identifier (like labels) as expressions and create them as immediates.
4853 const MCExpr *IdVal;
4854 S = getLoc();
4855 if (getParser().parseExpression(IdVal))
4856 return true;
4857 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4858 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4859 return false;
4860 }
4861 case AsmToken::Integer:
4862 case AsmToken::Real:
4863 case AsmToken::Hash: {
4864 // #42 -> immediate.
4865 S = getLoc();
4866
4867 parseOptionalToken(AsmToken::Hash);
4868
4869 // Parse a negative sign
4870 bool isNegative = false;
4871 if (getTok().is(AsmToken::Minus)) {
4872 isNegative = true;
4873 // We need to consume this token only when we have a Real, otherwise
4874 // we let parseSymbolicImmVal take care of it
4875 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4876 Lex();
4877 }
4878
4879 // The only Real that should come through here is a literal #0.0 for
4880 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4881 // so convert the value.
4882 const AsmToken &Tok = getTok();
4883 if (Tok.is(AsmToken::Real)) {
4884 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4885 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4886 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4887 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4888 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4889 return TokError("unexpected floating point literal");
4890 else if (IntVal != 0 || isNegative)
4891 return TokError("expected floating-point constant #0.0");
4892 Lex(); // Eat the token.
4893
4894 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4895 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4896 return false;
4897 }
4898
4899 const MCExpr *ImmVal;
4900 if (parseSymbolicImmVal(ImmVal))
4901 return true;
4902
4903 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4904 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4905
4906 // Parse an optional shift/extend modifier.
4907 return parseOptionalShiftExtend(Tok);
4908 }
4909 case AsmToken::Equal: {
4910 SMLoc Loc = getLoc();
4911 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4912 return TokError("unexpected token in operand");
4913 Lex(); // Eat '='
4914 const MCExpr *SubExprVal;
4915 if (getParser().parseExpression(SubExprVal))
4916 return true;
4917
4918 if (Operands.size() < 2 ||
4919 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4920 return Error(Loc, "Only valid when first operand is register");
4921
4922 bool IsXReg =
4923 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4924 Operands[1]->getReg());
4925
4926 MCContext& Ctx = getContext();
4927 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4928 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4929 if (isa<MCConstantExpr>(SubExprVal)) {
4930 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4931 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4932 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4933 ShiftAmt += 16;
4934 Imm >>= 16;
4935 }
4936 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4937 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4938 Operands.push_back(AArch64Operand::CreateImm(
4939 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4940 if (ShiftAmt)
4941 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4942 ShiftAmt, true, S, E, Ctx));
4943 return false;
4944 }
4945 APInt Simm = APInt(64, Imm << ShiftAmt);
4946 // check if the immediate is an unsigned or signed 32-bit int for W regs
4947 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4948 return Error(Loc, "Immediate too large for register");
4949 }
4950 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4951 const MCExpr *CPLoc =
4952 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4953 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4954 return false;
4955 }
4956 }
4957}
4958
4959bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4960 const MCExpr *Expr = nullptr;
4961 SMLoc L = getLoc();
4962 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4963 return true;
4964 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4965 if (check(!Value, L, "expected constant expression"))
4966 return true;
4967 Out = Value->getValue();
4968 return false;
4969}
4970
4971bool AArch64AsmParser::parseComma() {
4972 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4973 return true;
4974 // Eat the comma
4975 Lex();
4976 return false;
4977}
4978
4979bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4980 unsigned First, unsigned Last) {
4982 SMLoc Start, End;
4983 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
4984 return true;
4985
4986 // Special handling for FP and LR; they aren't linearly after x28 in
4987 // the registers enum.
4988 unsigned RangeEnd = Last;
4989 if (Base == AArch64::X0) {
4990 if (Last == AArch64::FP) {
4991 RangeEnd = AArch64::X28;
4992 if (Reg == AArch64::FP) {
4993 Out = 29;
4994 return false;
4995 }
4996 }
4997 if (Last == AArch64::LR) {
4998 RangeEnd = AArch64::X28;
4999 if (Reg == AArch64::FP) {
5000 Out = 29;
5001 return false;
5002 } else if (Reg == AArch64::LR) {
5003 Out = 30;
5004 return false;
5005 }
5006 }
5007 }
5008
5009 if (check(Reg < First || Reg > RangeEnd, Start,
5010 Twine("expected register in range ") +
5013 return true;
5014 Out = Reg - Base;
5015 return false;
5016}
5017
5018bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5019 const MCParsedAsmOperand &Op2) const {
5020 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5021 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5022
5023 if (AOp1.isVectorList() && AOp2.isVectorList())
5024 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5025 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5026 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5027
5028 if (!AOp1.isReg() || !AOp2.isReg())
5029 return false;
5030
5031 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5032 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5033 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5034
5035 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5036 "Testing equality of non-scalar registers not supported");
5037
5038 // Check if a registers match their sub/super register classes.
5039 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5040 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5041 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5042 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5043 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5044 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5045 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5046 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5047
5048 return false;
5049}
5050
5051/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5052/// operands.
5053bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5054 StringRef Name, SMLoc NameLoc,
5057 .Case("beq", "b.eq")
5058 .Case("bne", "b.ne")
5059 .Case("bhs", "b.hs")
5060 .Case("bcs", "b.cs")
5061 .Case("blo", "b.lo")
5062 .Case("bcc", "b.cc")
5063 .Case("bmi", "b.mi")
5064 .Case("bpl", "b.pl")
5065 .Case("bvs", "b.vs")
5066 .Case("bvc", "b.vc")
5067 .Case("bhi", "b.hi")
5068 .Case("bls", "b.ls")
5069 .Case("bge", "b.ge")
5070 .Case("blt", "b.lt")
5071 .Case("bgt", "b.gt")
5072 .Case("ble", "b.le")
5073 .Case("bal", "b.al")
5074 .Case("bnv", "b.nv")
5075 .Default(Name);
5076
5077 // First check for the AArch64-specific .req directive.
5078 if (getTok().is(AsmToken::Identifier) &&
5079 getTok().getIdentifier().lower() == ".req") {
5080 parseDirectiveReq(Name, NameLoc);
5081 // We always return 'error' for this, as we're done with this
5082 // statement and don't need to match the 'instruction."
5083 return true;
5084 }
5085
5086 // Create the leading tokens for the mnemonic, split by '.' characters.
5087 size_t Start = 0, Next = Name.find('.');
5088 StringRef Head = Name.slice(Start, Next);
5089
5090 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5091 // the SYS instruction.
5092 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5093 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5094 return parseSysAlias(Head, NameLoc, Operands);
5095
5096 // TLBIP instructions are aliases for the SYSP instruction.
5097 if (Head == "tlbip")
5098 return parseSyspAlias(Head, NameLoc, Operands);
5099
5100 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5101 Mnemonic = Head;
5102
5103 // Handle condition codes for a branch mnemonic
5104 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5105 Start = Next;
5106 Next = Name.find('.', Start + 1);
5107 Head = Name.slice(Start + 1, Next);
5108
5109 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5110 (Head.data() - Name.data()));
5111 std::string Suggestion;
5112 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5113 if (CC == AArch64CC::Invalid) {
5114 std::string Msg = "invalid condition code";
5115 if (!Suggestion.empty())
5116 Msg += ", did you mean " + Suggestion + "?";
5117 return Error(SuffixLoc, Msg);
5118 }
5119 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5120 /*IsSuffix=*/true));
5121 Operands.push_back(
5122 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5123 }
5124
5125 // Add the remaining tokens in the mnemonic.
5126 while (Next != StringRef::npos) {
5127 Start = Next;
5128 Next = Name.find('.', Start + 1);
5129 Head = Name.slice(Start, Next);
5130 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5131 (Head.data() - Name.data()) + 1);
5132 Operands.push_back(AArch64Operand::CreateToken(
5133 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5134 }
5135
5136 // Conditional compare instructions have a Condition Code operand, which needs
5137 // to be parsed and an immediate operand created.
5138 bool condCodeFourthOperand =
5139 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5140 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5141 Head == "csinc" || Head == "csinv" || Head == "csneg");
5142
5143 // These instructions are aliases to some of the conditional select
5144 // instructions. However, the condition code is inverted in the aliased
5145 // instruction.
5146 //
5147 // FIXME: Is this the correct way to handle these? Or should the parser
5148 // generate the aliased instructions directly?
5149 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5150 bool condCodeThirdOperand =
5151 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5152
5153 // Read the remaining operands.
5154 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5155
5156 unsigned N = 1;
5157 do {
5158 // Parse and remember the operand.
5159 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5160 (N == 3 && condCodeThirdOperand) ||
5161 (N == 2 && condCodeSecondOperand),
5162 condCodeSecondOperand || condCodeThirdOperand)) {
5163 return true;
5164 }
5165
5166 // After successfully parsing some operands there are three special cases
5167 // to consider (i.e. notional operands not separated by commas). Two are
5168 // due to memory specifiers:
5169 // + An RBrac will end an address for load/store/prefetch
5170 // + An '!' will indicate a pre-indexed operation.
5171 //
5172 // And a further case is '}', which ends a group of tokens specifying the
5173 // SME accumulator array 'ZA' or tile vector, i.e.
5174 //
5175 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5176 //
5177 // It's someone else's responsibility to make sure these tokens are sane
5178 // in the given context!
5179
5180 if (parseOptionalToken(AsmToken::RBrac))
5181 Operands.push_back(
5182 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5183 if (parseOptionalToken(AsmToken::Exclaim))
5184 Operands.push_back(
5185 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5186 if (parseOptionalToken(AsmToken::RCurly))
5187 Operands.push_back(
5188 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5189
5190 ++N;
5191 } while (parseOptionalToken(AsmToken::Comma));
5192 }
5193
5194 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5195 return true;
5196
5197 return false;
5198}
5199
5200static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5201 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5202 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5203 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5204 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5205 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5206 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5207 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5208}
5209
5210// FIXME: This entire function is a giant hack to provide us with decent
5211// operand range validation/diagnostics until TableGen/MC can be extended
5212// to support autogeneration of this kind of validation.
5213bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5215 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5216 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5217
5218 // A prefix only applies to the instruction following it. Here we extract
5219 // prefix information for the next instruction before validating the current
5220 // one so that in the case of failure we don't erronously continue using the
5221 // current prefix.
5222 PrefixInfo Prefix = NextPrefix;
5223 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5224
5225 // Before validating the instruction in isolation we run through the rules
5226 // applicable when it follows a prefix instruction.
5227 // NOTE: brk & hlt can be prefixed but require no additional validation.
5228 if (Prefix.isActive() &&
5229 (Inst.getOpcode() != AArch64::BRK) &&
5230 (Inst.getOpcode() != AArch64::HLT)) {
5231
5232 // Prefixed intructions must have a destructive operand.
5235 return Error(IDLoc, "instruction is unpredictable when following a"
5236 " movprfx, suggest replacing movprfx with mov");
5237
5238 // Destination operands must match.
5239 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5240 return Error(Loc[0], "instruction is unpredictable when following a"
5241 " movprfx writing to a different destination");
5242
5243 // Destination operand must not be used in any other location.
5244 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5245 if (Inst.getOperand(i).isReg() &&
5246 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5247 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5248 return Error(Loc[0], "instruction is unpredictable when following a"
5249 " movprfx and destination also used as non-destructive"
5250 " source");
5251 }
5252
5253 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5254 if (Prefix.isPredicated()) {
5255 int PgIdx = -1;
5256
5257 // Find the instructions general predicate.
5258 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5259 if (Inst.getOperand(i).isReg() &&
5260 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5261 PgIdx = i;
5262 break;
5263 }
5264
5265 // Instruction must be predicated if the movprfx is predicated.
5266 if (PgIdx == -1 ||
5268 return Error(IDLoc, "instruction is unpredictable when following a"
5269 " predicated movprfx, suggest using unpredicated movprfx");
5270
5271 // Instruction must use same general predicate as the movprfx.
5272 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5273 return Error(IDLoc, "instruction is unpredictable when following a"
5274 " predicated movprfx using a different general predicate");
5275
5276 // Instruction element type must match the movprfx.
5277 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5278 return Error(IDLoc, "instruction is unpredictable when following a"
5279 " predicated movprfx with a different element size");
5280 }
5281 }
5282
5283 // Check for indexed addressing modes w/ the base register being the
5284 // same as a destination/source register or pair load where
5285 // the Rt == Rt2. All of those are undefined behaviour.
5286 switch (Inst.getOpcode()) {
5287 case AArch64::LDPSWpre:
5288 case AArch64::LDPWpost:
5289 case AArch64::LDPWpre:
5290 case AArch64::LDPXpost:
5291 case AArch64::LDPXpre: {
5292 unsigned Rt = Inst.getOperand(1).getReg();
5293 unsigned Rt2 = Inst.getOperand(2).getReg();
5294 unsigned Rn = Inst.getOperand(3).getReg();
5295 if (RI->isSubRegisterEq(Rn, Rt))
5296 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5297 "is also a destination");
5298 if (RI->isSubRegisterEq(Rn, Rt2))
5299 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5300 "is also a destination");
5301 [[fallthrough]];
5302 }
5303 case AArch64::LDR_ZA:
5304 case AArch64::STR_ZA: {
5305 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5306 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5307 return Error(Loc[1],
5308 "unpredictable instruction, immediate and offset mismatch.");
5309 break;
5310 }
5311 case AArch64::LDPDi:
5312 case AArch64::LDPQi:
5313 case AArch64::LDPSi:
5314 case AArch64::LDPSWi:
5315 case AArch64::LDPWi:
5316 case AArch64::LDPXi: {
5317 unsigned Rt = Inst.getOperand(0).getReg();
5318 unsigned Rt2 = Inst.getOperand(1).getReg();
5319 if (Rt == Rt2)
5320 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5321 break;
5322 }
5323 case AArch64::LDPDpost:
5324 case AArch64::LDPDpre:
5325 case AArch64::LDPQpost:
5326 case AArch64::LDPQpre:
5327 case AArch64::LDPSpost:
5328 case AArch64::LDPSpre:
5329 case AArch64::LDPSWpost: {
5330 unsigned Rt = Inst.getOperand(1).getReg();
5331 unsigned Rt2 = Inst.getOperand(2).getReg();
5332 if (Rt == Rt2)
5333 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5334 break;
5335 }
5336 case AArch64::STPDpost:
5337 case AArch64::STPDpre:
5338 case AArch64::STPQpost:
5339 case AArch64::STPQpre:
5340 case AArch64::STPSpost:
5341 case AArch64::STPSpre:
5342 case AArch64::STPWpost:
5343 case AArch64::STPWpre:
5344 case AArch64::STPXpost:
5345 case AArch64::STPXpre: {
5346 unsigned Rt = Inst.getOperand(1).getReg();
5347 unsigned Rt2 = Inst.getOperand(2).getReg();
5348 unsigned Rn = Inst.getOperand(3).getReg();
5349 if (RI->isSubRegisterEq(Rn, Rt))
5350 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5351 "is also a source");
5352 if (RI->isSubRegisterEq(Rn, Rt2))
5353 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5354 "is also a source");
5355 break;
5356 }
5357 case AArch64::LDRBBpre:
5358 case AArch64::LDRBpre:
5359 case AArch64::LDRHHpre:
5360 case AArch64::LDRHpre:
5361 case AArch64::LDRSBWpre:
5362 case AArch64::LDRSBXpre:
5363 case AArch64::LDRSHWpre:
5364 case AArch64::LDRSHXpre:
5365 case AArch64::LDRSWpre:
5366 case AArch64::LDRWpre:
5367 case AArch64::LDRXpre:
5368 case AArch64::LDRBBpost:
5369 case AArch64::LDRBpost:
5370 case AArch64::LDRHHpost:
5371 case AArch64::LDRHpost:
5372 case AArch64::LDRSBWpost:
5373 case AArch64::LDRSBXpost:
5374 case AArch64::LDRSHWpost:
5375 case AArch64::LDRSHXpost:
5376 case AArch64::LDRSWpost:
5377 case AArch64::LDRWpost:
5378 case AArch64::LDRXpost: {
5379 unsigned Rt = Inst.getOperand(1).getReg();
5380 unsigned Rn = Inst.getOperand(2).getReg();
5381 if (RI->isSubRegisterEq(Rn, Rt))
5382 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5383 "is also a source");
5384 break;
5385 }
5386 case AArch64::STRBBpost:
5387 case AArch64::STRBpost:
5388 case AArch64::STRHHpost:
5389 case AArch64::STRHpost:
5390 case AArch64::STRWpost:
5391 case AArch64::STRXpost:
5392 case AArch64::STRBBpre:
5393 case AArch64::STRBpre:
5394 case AArch64::STRHHpre:
5395 case AArch64::STRHpre:
5396 case AArch64::STRWpre:
5397 case AArch64::STRXpre: {
5398 unsigned Rt = Inst.getOperand(1).getReg();
5399 unsigned Rn = Inst.getOperand(2).getReg();
5400 if (RI->isSubRegisterEq(Rn, Rt))
5401 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5402 "is also a source");
5403 break;
5404 }
5405 case AArch64::STXRB:
5406 case AArch64::STXRH:
5407 case AArch64::STXRW:
5408 case AArch64::STXRX:
5409 case AArch64::STLXRB:
5410 case AArch64::STLXRH:
5411 case AArch64::STLXRW:
5412 case AArch64::STLXRX: {
5413 unsigned Rs = Inst.getOperand(0).getReg();
5414 unsigned Rt = Inst.getOperand(1).getReg();
5415 unsigned Rn = Inst.getOperand(2).getReg();
5416 if (RI->isSubRegisterEq(Rt, Rs) ||
5417 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5418 return Error(Loc[0],
5419 "unpredictable STXR instruction, status is also a source");
5420 break;
5421 }
5422 case AArch64::STXPW:
5423 case AArch64::STXPX:
5424 case AArch64::STLXPW:
5425 case AArch64::STLXPX: {
5426 unsigned Rs = Inst.getOperand(0).getReg();
5427 unsigned Rt1 = Inst.getOperand(1).getReg();
5428 unsigned Rt2 = Inst.getOperand(2).getReg();
5429 unsigned Rn = Inst.getOperand(3).getReg();
5430 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5431 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5432 return Error(Loc[0],
5433 "unpredictable STXP instruction, status is also a source");
5434 break;
5435 }
5436 case AArch64::LDRABwriteback:
5437 case AArch64::LDRAAwriteback: {
5438 unsigned Xt = Inst.getOperand(0).getReg();
5439 unsigned Xn = Inst.getOperand(1).getReg();
5440 if (Xt == Xn)
5441 return Error(Loc[0],
5442 "unpredictable LDRA instruction, writeback base"
5443 " is also a destination");
5444 break;
5445 }
5446 }
5447
5448 // Check v8.8-A memops instructions.
5449 switch (Inst.getOpcode()) {
5450 case AArch64::CPYFP:
5451 case AArch64::CPYFPWN:
5452 case AArch64::CPYFPRN:
5453 case AArch64::CPYFPN:
5454 case AArch64::CPYFPWT:
5455 case AArch64::CPYFPWTWN:
5456 case AArch64::CPYFPWTRN:
5457 case AArch64::CPYFPWTN:
5458 case AArch64::CPYFPRT:
5459 case AArch64::CPYFPRTWN:
5460 case AArch64::CPYFPRTRN:
5461 case AArch64::CPYFPRTN:
5462 case AArch64::CPYFPT:
5463 case AArch64::CPYFPTWN:
5464 case AArch64::CPYFPTRN:
5465 case AArch64::CPYFPTN:
5466 case AArch64::CPYFM:
5467 case AArch64::CPYFMWN:
5468 case AArch64::CPYFMRN:
5469 case AArch64::CPYFMN:
5470 case AArch64::CPYFMWT:
5471 case AArch64::CPYFMWTWN:
5472 case AArch64::CPYFMWTRN:
5473 case AArch64::CPYFMWTN:
5474 case AArch64::CPYFMRT:
5475 case AArch64::CPYFMRTWN:
5476 case AArch64::CPYFMRTRN:
5477 case AArch64::CPYFMRTN:
5478 case AArch64::CPYFMT:
5479 case AArch64::CPYFMTWN:
5480 case AArch64::CPYFMTRN:
5481 case AArch64::CPYFMTN:
5482 case AArch64::CPYFE:
5483 case AArch64::CPYFEWN:
5484 case AArch64::CPYFERN:
5485 case AArch64::CPYFEN:
5486 case AArch64::CPYFEWT:
5487 case AArch64::CPYFEWTWN:
5488 case AArch64::CPYFEWTRN:
5489 case AArch64::CPYFEWTN:
5490 case AArch64::CPYFERT:
5491 case AArch64::CPYFERTWN:
5492 case AArch64::CPYFERTRN:
5493 case AArch64::CPYFERTN:
5494 case AArch64::CPYFET:
5495 case AArch64::CPYFETWN:
5496 case AArch64::CPYFETRN:
5497 case AArch64::CPYFETN:
5498 case AArch64::CPYP:
5499 case AArch64::CPYPWN:
5500 case AArch64::CPYPRN:
5501 case AArch64::CPYPN:
5502 case AArch64::CPYPWT:
5503 case AArch64::CPYPWTWN:
5504 case AArch64::CPYPWTRN:
5505 case AArch64::CPYPWTN:
5506 case AArch64::CPYPRT:
5507 case AArch64::CPYPRTWN:
5508 case AArch64::CPYPRTRN:
5509 case AArch64::CPYPRTN:
5510 case AArch64::CPYPT:
5511 case AArch64::CPYPTWN:
5512 case AArch64::CPYPTRN:
5513 case AArch64::CPYPTN:
5514 case AArch64::CPYM:
5515 case AArch64::CPYMWN:
5516 case AArch64::CPYMRN:
5517 case AArch64::CPYMN:
5518 case AArch64::CPYMWT:
5519 case AArch64::CPYMWTWN:
5520 case AArch64::CPYMWTRN:
5521 case AArch64::CPYMWTN:
5522 case AArch64::CPYMRT:
5523 case AArch64::CPYMRTWN:
5524 case AArch64::CPYMRTRN:
5525 case AArch64::CPYMRTN:
5526 case AArch64::CPYMT:
5527 case AArch64::CPYMTWN:
5528 case AArch64::CPYMTRN:
5529 case AArch64::CPYMTN:
5530 case AArch64::CPYE:
5531 case AArch64::CPYEWN:
5532 case AArch64::CPYERN:
5533 case AArch64::CPYEN:
5534 case AArch64::CPYEWT:
5535 case AArch64::CPYEWTWN:
5536 case AArch64::CPYEWTRN:
5537 case AArch64::CPYEWTN:
5538 case AArch64::CPYERT:
5539 case AArch64::CPYERTWN:
5540 case AArch64::CPYERTRN:
5541 case AArch64::CPYERTN:
5542 case AArch64::CPYET:
5543 case AArch64::CPYETWN:
5544 case AArch64::CPYETRN:
5545 case AArch64::CPYETN: {
5546 unsigned Xd_wb = Inst.getOperand(0).getReg();
5547 unsigned Xs_wb = Inst.getOperand(1).getReg();
5548 unsigned Xn_wb = Inst.getOperand(2).getReg();
5549 unsigned Xd = Inst.getOperand(3).getReg();
5550 unsigned Xs = Inst.getOperand(4).getReg();
5551 unsigned Xn = Inst.getOperand(5).getReg();
5552 if (Xd_wb != Xd)
5553 return Error(Loc[0],
5554 "invalid CPY instruction, Xd_wb and Xd do not match");
5555 if (Xs_wb != Xs)
5556 return Error(Loc[0],
5557 "invalid CPY instruction, Xs_wb and Xs do not match");
5558 if (Xn_wb != Xn)
5559 return Error(Loc[0],
5560 "invalid CPY instruction, Xn_wb and Xn do not match");
5561 if (Xd == Xs)
5562 return Error(Loc[0], "invalid CPY instruction, destination and source"
5563 " registers are the same");
5564 if (Xd == Xn)
5565 return Error(Loc[0], "invalid CPY instruction, destination and size"
5566 " registers are the same");
5567 if (Xs == Xn)
5568 return Error(Loc[0], "invalid CPY instruction, source and size"
5569 " registers are the same");
5570 break;
5571 }
5572 case AArch64::SETP:
5573 case AArch64::SETPT:
5574 case AArch64::SETPN:
5575 case AArch64::SETPTN:
5576 case AArch64::SETM:
5577 case AArch64::SETMT:
5578 case AArch64::SETMN:
5579 case AArch64::SETMTN:
5580 case AArch64::SETE:
5581 case AArch64::SETET:
5582 case AArch64::SETEN:
5583 case AArch64::SETETN:
5584 case AArch64::SETGP:
5585 case AArch64::SETGPT:
5586 case AArch64::SETGPN:
5587 case AArch64::SETGPTN:
5588 case AArch64::SETGM:
5589 case AArch64::SETGMT:
5590 case AArch64::SETGMN:
5591 case AArch64::SETGMTN:
5592 case AArch64::MOPSSETGE:
5593 case AArch64::MOPSSETGET:
5594 case AArch64::MOPSSETGEN:
5595 case AArch64::MOPSSETGETN: {
5596 unsigned Xd_wb = Inst.getOperand(0).getReg();
5597 unsigned Xn_wb = Inst.getOperand(1).getReg();
5598 unsigned Xd = Inst.getOperand(2).getReg();
5599 unsigned Xn = Inst.getOperand(3).getReg();
5600 unsigned Xm = Inst.getOperand(4).getReg();
5601 if (Xd_wb != Xd)
5602 return Error(Loc[0],
5603 "invalid SET instruction, Xd_wb and Xd do not match");
5604 if (Xn_wb != Xn)
5605 return Error(Loc[0],
5606 "invalid SET instruction, Xn_wb and Xn do not match");
5607 if (Xd == Xn)
5608 return Error(Loc[0], "invalid SET instruction, destination and size"
5609 " registers are the same");
5610 if (Xd == Xm)
5611 return Error(Loc[0], "invalid SET instruction, destination and source"
5612 " registers are the same");
5613 if (Xn == Xm)
5614 return Error(Loc[0], "invalid SET instruction, source and size"
5615 " registers are the same");
5616 break;
5617 }
5618 }
5619
5620 // Now check immediate ranges. Separate from the above as there is overlap
5621 // in the instructions being checked and this keeps the nested conditionals
5622 // to a minimum.
5623 switch (Inst.getOpcode()) {
5624 case AArch64::ADDSWri:
5625 case AArch64::ADDSXri:
5626 case AArch64::ADDWri:
5627 case AArch64::ADDXri:
5628 case AArch64::SUBSWri:
5629 case AArch64::SUBSXri:
5630 case AArch64::SUBWri:
5631 case AArch64::SUBXri: {
5632 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5633 // some slight duplication here.
5634 if (Inst.getOperand(2).isExpr()) {
5635 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5636 AArch64MCExpr::VariantKind ELFRefKind;
5637 MCSymbolRefExpr::VariantKind DarwinRefKind;
5638 int64_t Addend;
5639 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5640
5641 // Only allow these with ADDXri.
5642 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5643 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5644 Inst.getOpcode() == AArch64::ADDXri)
5645 return false;
5646
5647 // Only allow these with ADDXri/ADDWri
5648 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5649 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5650 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5651 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5652 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5653 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5654 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5655 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5656 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5657 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5658 (Inst.getOpcode() == AArch64::ADDXri ||
5659 Inst.getOpcode() == AArch64::ADDWri))
5660 return false;
5661
5662 // Don't allow symbol refs in the immediate field otherwise
5663 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5664 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5665 // 'cmp w0, 'borked')
5666 return Error(Loc.back(), "invalid immediate expression");
5667 }
5668 // We don't validate more complex expressions here
5669 }
5670 return false;
5671 }
5672 default:
5673 return false;
5674 }
5675}
5676
5678 const FeatureBitset &FBS,
5679 unsigned VariantID = 0);
5680
5681bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5684 switch (ErrCode) {
5685 case Match_InvalidTiedOperand: {
5686 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5687 if (Op.isVectorList())
5688 return Error(Loc, "operand must match destination register list");
5689
5690 assert(Op.isReg() && "Unexpected operand type");
5691 switch (Op.getRegEqualityTy()) {
5692 case RegConstraintEqualityTy::EqualsSubReg:
5693 return Error(Loc, "operand must be 64-bit form of destination register");
5694 case RegConstraintEqualityTy::EqualsSuperReg:
5695 return Error(Loc, "operand must be 32-bit form of destination register");
5696 case RegConstraintEqualityTy::EqualsReg:
5697 return Error(Loc, "operand must match destination register");
5698 }
5699 llvm_unreachable("Unknown RegConstraintEqualityTy");
5700 }
5701 case Match_MissingFeature:
5702 return Error(Loc,
5703 "instruction requires a CPU feature not currently enabled");
5704 case Match_InvalidOperand:
5705 return Error(Loc, "invalid operand for instruction");
5706 case Match_InvalidSuffix:
5707 return Error(Loc, "invalid type suffix for instruction");
5708 case Match_InvalidCondCode:
5709 return Error(Loc, "expected AArch64 condition code");
5710 case Match_AddSubRegExtendSmall:
5711 return Error(Loc,
5712 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5713 case Match_AddSubRegExtendLarge:
5714 return Error(Loc,
5715 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5716 case Match_AddSubSecondSource:
5717 return Error(Loc,
5718 "expected compatible register, symbol or integer in range [0, 4095]");
5719 case Match_LogicalSecondSource:
5720 return Error(Loc, "expected compatible register or logical immediate");
5721 case Match_InvalidMovImm32Shift:
5722 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5723 case Match_InvalidMovImm64Shift:
5724 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5725 case Match_AddSubRegShift32:
5726 return Error(Loc,
5727 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5728 case Match_AddSubRegShift64:
5729 return Error(Loc,
5730 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5731 case Match_InvalidFPImm:
5732 return Error(Loc,
5733 "expected compatible register or floating-point constant");
5734 case Match_InvalidMemoryIndexedSImm6:
5735 return Error(Loc, "index must be an integer in range [-32, 31].");
5736 case Match_InvalidMemoryIndexedSImm5:
5737 return Error(Loc, "index must be an integer in range [-16, 15].");
5738 case Match_InvalidMemoryIndexed1SImm4:
5739 return Error(Loc, "index must be an integer in range [-8, 7].");
5740 case Match_InvalidMemoryIndexed2SImm4:
5741 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5742 case Match_InvalidMemoryIndexed3SImm4:
5743 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5744 case Match_InvalidMemoryIndexed4SImm4:
5745 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5746 case Match_InvalidMemoryIndexed16SImm4:
5747 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5748 case Match_InvalidMemoryIndexed32SImm4:
5749 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5750 case Match_InvalidMemoryIndexed1SImm6:
5751 return Error(Loc, "index must be an integer in range [-32, 31].");
5752 case Match_InvalidMemoryIndexedSImm8:
5753 return Error(Loc, "index must be an integer in range [-128, 127].");
5754 case Match_InvalidMemoryIndexedSImm9:
5755 return Error(Loc, "index must be an integer in range [-256, 255].");
5756 case Match_InvalidMemoryIndexed16SImm9:
5757 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5758 case Match_InvalidMemoryIndexed8SImm10:
5759 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5760 case Match_InvalidMemoryIndexed4SImm7:
5761 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5762 case Match_InvalidMemoryIndexed8SImm7:
5763 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5764 case Match_InvalidMemoryIndexed16SImm7:
5765 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5766 case Match_InvalidMemoryIndexed8UImm5:
5767 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5768 case Match_InvalidMemoryIndexed8UImm3:
5769 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5770 case Match_InvalidMemoryIndexed4UImm5:
5771 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5772 case Match_InvalidMemoryIndexed2UImm5:
5773 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5774 case Match_InvalidMemoryIndexed8UImm6:
5775 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5776 case Match_InvalidMemoryIndexed16UImm6:
5777 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5778 case Match_InvalidMemoryIndexed4UImm6:
5779 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5780 case Match_InvalidMemoryIndexed2UImm6:
5781 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5782 case Match_InvalidMemoryIndexed1UImm6:
5783 return Error(Loc, "index must be in range [0, 63].");
5784 case Match_InvalidMemoryWExtend8:
5785 return Error(Loc,
5786 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5787 case Match_InvalidMemoryWExtend16:
5788 return Error(Loc,
5789 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5790 case Match_InvalidMemoryWExtend32:
5791 return Error(Loc,
5792 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5793 case Match_InvalidMemoryWExtend64:
5794 return Error(Loc,
5795 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5796 case Match_InvalidMemoryWExtend128:
5797 return Error(Loc,
5798 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5799 case Match_InvalidMemoryXExtend8:
5800 return Error(Loc,
5801 "expected 'lsl' or 'sxtx' with optional shift of #0");
5802 case Match_InvalidMemoryXExtend16:
5803 return Error(Loc,
5804 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5805 case Match_InvalidMemoryXExtend32:
5806 return Error(Loc,
5807 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5808 case Match_InvalidMemoryXExtend64:
5809 return Error(Loc,
5810 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5811 case Match_InvalidMemoryXExtend128:
5812 return Error(Loc,
5813 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5814 case Match_InvalidMemoryIndexed1:
5815 return Error(Loc, "index must be an integer in range [0, 4095].");
5816 case Match_InvalidMemoryIndexed2:
5817 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5818 case Match_InvalidMemoryIndexed4:
5819 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5820 case Match_InvalidMemoryIndexed8:
5821 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5822 case Match_InvalidMemoryIndexed16:
5823 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5824 case Match_InvalidImm0_0:
5825 return Error(Loc, "immediate must be 0.");
5826 case Match_InvalidImm0_1:
5827 return Error(Loc, "immediate must be an integer in range [0, 1].");
5828 case Match_InvalidImm0_3:
5829 return Error(Loc, "immediate must be an integer in range [0, 3].");
5830 case Match_InvalidImm0_7:
5831 return Error(Loc, "immediate must be an integer in range [0, 7].");
5832 case Match_InvalidImm0_15:
5833 return Error(Loc, "immediate must be an integer in range [0, 15].");
5834 case Match_InvalidImm0_31:
5835 return Error(Loc, "immediate must be an integer in range [0, 31].");
5836 case Match_InvalidImm0_63:
5837 return Error(Loc, "immediate must be an integer in range [0, 63].");
5838 case Match_InvalidImm0_127:
5839 return Error(Loc, "immediate must be an integer in range [0, 127].");
5840 case Match_InvalidImm0_255:
5841 return Error(Loc, "immediate must be an integer in range [0, 255].");
5842 case Match_InvalidImm0_65535:
5843 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5844 case Match_InvalidImm1_8:
5845 return Error(Loc, "immediate must be an integer in range [1, 8].");
5846 case Match_InvalidImm1_16:
5847 return Error(Loc, "immediate must be an integer in range [1, 16].");
5848 case Match_InvalidImm1_32:
5849 return Error(Loc, "immediate must be an integer in range [1, 32].");
5850 case Match_InvalidImm1_64:
5851 return Error(Loc, "immediate must be an integer in range [1, 64].");
5852 case Match_InvalidMemoryIndexedRange2UImm0:
5853 return Error(Loc, "vector select offset must be the immediate range 0:1.");
5854 case Match_InvalidMemoryIndexedRange2UImm1:
5855 return Error(Loc, "vector select offset must be an immediate range of the "
5856 "form <immf>:<imml>, where the first "
5857 "immediate is a multiple of 2 in the range [0, 2], and "
5858 "the second immediate is immf + 1.");
5859 case Match_InvalidMemoryIndexedRange2UImm2:
5860 case Match_InvalidMemoryIndexedRange2UImm3:
5861 return Error(
5862 Loc,
5863 "vector select offset must be an immediate range of the form "
5864 "<immf>:<imml>, "
5865 "where the first immediate is a multiple of 2 in the range [0, 6] or "
5866 "[0, 14] "
5867 "depending on the instruction, and the second immediate is immf + 1.");
5868 case Match_InvalidMemoryIndexedRange4UImm0:
5869 return Error(Loc, "vector select offset must be the immediate range 0:3.");
5870 case Match_InvalidMemoryIndexedRange4UImm1:
5871 case Match_InvalidMemoryIndexedRange4UImm2:
5872 return Error(
5873 Loc,
5874 "vector select offset must be an immediate range of the form "
5875 "<immf>:<imml>, "
5876 "where the first immediate is a multiple of 4 in the range [0, 4] or "
5877 "[0, 12] "
5878 "depending on the instruction, and the second immediate is immf + 3.");
5879 case Match_InvalidSVEAddSubImm8:
5880 return Error(Loc, "immediate must be an integer in range [0, 255]"
5881 " with a shift amount of 0");
5882 case Match_InvalidSVEAddSubImm16:
5883 case Match_InvalidSVEAddSubImm32:
5884 case Match_InvalidSVEAddSubImm64:
5885 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5886 "multiple of 256 in range [256, 65280]");
5887 case Match_InvalidSVECpyImm8:
5888 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5889 " with a shift amount of 0");
5890 case Match_InvalidSVECpyImm16:
5891 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5892 "multiple of 256 in range [-32768, 65280]");
5893 case Match_InvalidSVECpyImm32:
5894 case Match_InvalidSVECpyImm64:
5895 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5896 "multiple of 256 in range [-32768, 32512]");
5897 case Match_InvalidIndexRange0_0:
5898 return Error(Loc, "expected lane specifier '[0]'");
5899 case Match_InvalidIndexRange1_1:
5900 return Error(Loc, "expected lane specifier '[1]'");
5901 case Match_InvalidIndexRange0_15:
5902 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5903 case Match_InvalidIndexRange0_7:
5904 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5905 case Match_InvalidIndexRange0_3:
5906 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5907 case Match_InvalidIndexRange0_1:
5908 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5909 case Match_InvalidSVEIndexRange0_63:
5910 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5911 case Match_InvalidSVEIndexRange0_31:
5912 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5913 case Match_InvalidSVEIndexRange0_15:
5914 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5915 case Match_InvalidSVEIndexRange0_7:
5916 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5917 case Match_InvalidSVEIndexRange0_3:
5918 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5919 case Match_InvalidLabel:
5920 return Error(Loc, "expected label or encodable integer pc offset");
5921 case Match_MRS:
5922 return Error(Loc, "expected readable system register");
5923 case Match_MSR:
5924 case Match_InvalidSVCR:
5925 return Error(Loc, "expected writable system register or pstate");
5926 case Match_InvalidComplexRotationEven:
5927 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5928 case Match_InvalidComplexRotationOdd:
5929 return Error(Loc, "complex rotation must be 90 or 270.");
5930 case Match_MnemonicFail: {
5931 std::string Suggestion = AArch64MnemonicSpellCheck(
5932 ((AArch64Operand &)*Operands[0]).getToken(),
5933 ComputeAvailableFeatures(STI->getFeatureBits()));
5934 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5935 }
5936 case Match_InvalidGPR64shifted8:
5937 return Error(Loc, "register must be x0..x30 or xzr, without shift");
5938 case Match_InvalidGPR64shifted16:
5939 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5940 case Match_InvalidGPR64shifted32:
5941 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5942 case Match_InvalidGPR64shifted64:
5943 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5944 case Match_InvalidGPR64shifted128:
5945 return Error(
5946 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5947 case Match_InvalidGPR64NoXZRshifted8:
5948 return Error(Loc, "register must be x0..x30 without shift");
5949 case Match_InvalidGPR64NoXZRshifted16:
5950 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5951 case Match_InvalidGPR64NoXZRshifted32:
5952 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5953 case Match_InvalidGPR64NoXZRshifted64:
5954 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5955 case Match_InvalidGPR64NoXZRshifted128:
5956 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5957 case Match_InvalidZPR32UXTW8:
5958 case Match_InvalidZPR32SXTW8:
5959 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5960 case Match_InvalidZPR32UXTW16:
5961 case Match_InvalidZPR32SXTW16:
5962 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5963 case Match_InvalidZPR32UXTW32:
5964 case Match_InvalidZPR32SXTW32:
5965 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5966 case Match_InvalidZPR32UXTW64:
5967 case Match_InvalidZPR32SXTW64:
5968 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5969 case Match_InvalidZPR64UXTW8:
5970 case Match_InvalidZPR64SXTW8:
5971 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5972 case Match_InvalidZPR64UXTW16:
5973 case Match_InvalidZPR64SXTW16:
5974 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5975 case Match_InvalidZPR64UXTW32:
5976 case Match_InvalidZPR64SXTW32:
5977 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5978 case Match_InvalidZPR64UXTW64:
5979 case Match_InvalidZPR64SXTW64:
5980 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5981 case Match_InvalidZPR32LSL8:
5982 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5983 case Match_InvalidZPR32LSL16:
5984 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5985 case Match_InvalidZPR32LSL32:
5986 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5987 case Match_InvalidZPR32LSL64:
5988 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5989 case Match_InvalidZPR64LSL8:
5990 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5991 case Match_InvalidZPR64LSL16:
5992 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5993 case Match_InvalidZPR64LSL32:
5994 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5995 case Match_InvalidZPR64LSL64:
5996 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5997 case Match_InvalidZPR0:
5998 return Error(Loc, "expected register without element width suffix");
5999 case Match_InvalidZPR8:
6000 case Match_InvalidZPR16:
6001 case Match_InvalidZPR32:
6002 case Match_InvalidZPR64:
6003 case Match_InvalidZPR128:
6004 return Error(Loc, "invalid element width");
6005 case Match_InvalidZPR_3b8:
6006 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6007 case Match_InvalidZPR_3b16:
6008 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6009 case Match_InvalidZPR_3b32:
6010 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6011 case Match_InvalidZPR_4b8:
6012 return Error(Loc,
6013 "Invalid restricted vector register, expected z0.b..z15.b");
6014 case Match_InvalidZPR_4b16:
6015 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6016 case Match_InvalidZPR_4b32:
6017 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6018 case Match_InvalidZPR_4b64:
6019 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6020 case Match_InvalidSVEPattern:
6021 return Error(Loc, "invalid predicate pattern");
6022 case Match_InvalidSVEPredicateAnyReg:
6023 case Match_InvalidSVEPredicateBReg:
6024 case Match_InvalidSVEPredicateHReg:
6025 case Match_InvalidSVEPredicateSReg:
6026 case Match_InvalidSVEPredicateDReg:
6027 return Error(Loc, "invalid predicate register.");
6028 case Match_InvalidSVEPredicate3bAnyReg:
6029 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6030 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6031 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6032 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6033 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6034 return Error(Loc, "Invalid predicate register, expected PN in range "
6035 "pn8..pn15 with element suffix.");
6036 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6037 return Error(Loc, "invalid restricted predicate-as-counter register "
6038 "expected pn8..pn15");
6039 case Match_InvalidSVEPNPredicateBReg:
6040 case Match_InvalidSVEPNPredicateHReg:
6041 case Match_InvalidSVEPNPredicateSReg:
6042 case Match_InvalidSVEPNPredicateDReg:
6043 return Error(Loc, "Invalid predicate register, expected PN in range "
6044 "pn0..pn15 with element suffix.");
6045 case Match_InvalidSVEVecLenSpecifier:
6046 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6047 case Match_InvalidSVEPredicateListMul2x8:
6048 case Match_InvalidSVEPredicateListMul2x16:
6049 case Match_InvalidSVEPredicateListMul2x32:
6050 case Match_InvalidSVEPredicateListMul2x64:
6051 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6052 "predicate registers, where the first vector is a multiple of 2 "
6053 "and with correct element type");
6054 case Match_InvalidSVEExactFPImmOperandHalfOne:
6055 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6056 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6057 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6058 case Match_InvalidSVEExactFPImmOperandZeroOne:
6059 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6060 case Match_InvalidMatrixTileVectorH8:
6061 case Match_InvalidMatrixTileVectorV8:
6062 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6063 case Match_InvalidMatrixTileVectorH16:
6064 case Match_InvalidMatrixTileVectorV16:
6065 return Error(Loc,
6066 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6067 case Match_InvalidMatrixTileVectorH32:
6068 case Match_InvalidMatrixTileVectorV32:
6069 return Error(Loc,
6070 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6071 case Match_InvalidMatrixTileVectorH64:
6072 case Match_InvalidMatrixTileVectorV64:
6073 return Error(Loc,
6074 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6075 case Match_InvalidMatrixTileVectorH128:
6076 case Match_InvalidMatrixTileVectorV128:
6077 return Error(Loc,
6078 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6079 case Match_InvalidMatrixTile32:
6080 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6081 case Match_InvalidMatrixTile64:
6082 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6083 case Match_InvalidMatrix:
6084 return Error(Loc, "invalid matrix operand, expected za");
6085 case Match_InvalidMatrix8:
6086 return Error(Loc, "invalid matrix operand, expected suffix .b");
6087 case Match_InvalidMatrix16:
6088 return Error(Loc, "invalid matrix operand, expected suffix .h");
6089 case Match_InvalidMatrix32:
6090 return Error(Loc, "invalid matrix operand, expected suffix .s");
6091 case Match_InvalidMatrix64:
6092 return Error(Loc, "invalid matrix operand, expected suffix .d");
6093 case Match_InvalidMatrixIndexGPR32_12_15:
6094 return Error(Loc, "operand must be a register in range [w12, w15]");
6095 case Match_InvalidMatrixIndexGPR32_8_11:
6096 return Error(Loc, "operand must be a register in range [w8, w11]");
6097 case Match_InvalidSVEVectorListMul2x8:
6098 case Match_InvalidSVEVectorListMul2x16:
6099 case Match_InvalidSVEVectorListMul2x32:
6100 case Match_InvalidSVEVectorListMul2x64:
6101 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6102 "SVE vectors, where the first vector is a multiple of 2 "
6103 "and with matching element types");
6104 case Match_InvalidSVEVectorListMul4x8:
6105 case Match_InvalidSVEVectorListMul4x16:
6106 case Match_InvalidSVEVectorListMul4x32:
6107 case Match_InvalidSVEVectorListMul4x64:
6108 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6109 "SVE vectors, where the first vector is a multiple of 4 "
6110 "and with matching element types");
6111 case Match_InvalidLookupTable:
6112 return Error(Loc, "Invalid lookup table, expected zt0");
6113 case Match_InvalidSVEVectorListStrided2x8:
6114 case Match_InvalidSVEVectorListStrided2x16:
6115 case Match_InvalidSVEVectorListStrided2x32:
6116 case Match_InvalidSVEVectorListStrided2x64:
6117 return Error(
6118 Loc,
6119 "Invalid vector list, expected list with each SVE vector in the list "
6120 "8 registers apart, and the first register in the range [z0, z7] or "
6121 "[z16, z23] and with correct element type");
6122 case Match_InvalidSVEVectorListStrided4x8:
6123 case Match_InvalidSVEVectorListStrided4x16:
6124 case Match_InvalidSVEVectorListStrided4x32:
6125 case Match_InvalidSVEVectorListStrided4x64:
6126 return Error(
6127 Loc,
6128 "Invalid vector list, expected list with each SVE vector in the list "
6129 "4 registers apart, and the first register in the range [z0, z3] or "
6130 "[z16, z19] and with correct element type");
6131 case Match_AddSubLSLImm3ShiftLarge:
6132 return Error(Loc,
6133 "expected 'lsl' with optional integer in range [0, 7]");
6134 case Match_InvalidSVEPNRasPPRPredicateBReg:
6135 return Error(Loc,
6136 "Expected predicate-as-counter register name with .B suffix");
6137 default:
6138 llvm_unreachable("unexpected error code!");
6139 }
6140}
6141
6142static const char *getSubtargetFeatureName(uint64_t Val);
6143
6144bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6146 MCStreamer &Out,
6148 bool MatchingInlineAsm) {
6149 assert(!Operands.empty() && "Unexpect empty operand list!");
6150 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6151 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6152
6153 StringRef Tok = Op.getToken();
6154 unsigned NumOperands = Operands.size();
6155
6156 if (NumOperands == 4 && Tok == "lsl") {
6157 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6158 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6159 if (Op2.isScalarReg() && Op3.isImm()) {
6160 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6161 if (Op3CE) {
6162 uint64_t Op3Val = Op3CE->getValue();
6163 uint64_t NewOp3Val = 0;
6164 uint64_t NewOp4Val = 0;
6165 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6166 Op2.getReg())) {
6167 NewOp3Val = (32 - Op3Val) & 0x1f;
6168 NewOp4Val = 31 - Op3Val;
6169 } else {
6170 NewOp3Val = (64 - Op3Val) & 0x3f;
6171 NewOp4Val = 63 - Op3Val;
6172 }
6173
6174 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6175 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6176
6177 Operands[0] =
6178 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6179 Operands.push_back(AArch64Operand::CreateImm(
6180 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6181 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6182 Op3.getEndLoc(), getContext());
6183 }
6184 }
6185 } else if (NumOperands == 4 && Tok == "bfc") {
6186 // FIXME: Horrible hack to handle BFC->BFM alias.
6187 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6188 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6189 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6190
6191 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6192 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6193 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6194
6195 if (LSBCE && WidthCE) {
6196 uint64_t LSB = LSBCE->getValue();
6197 uint64_t Width = WidthCE->getValue();
6198
6199 uint64_t RegWidth = 0;
6200 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6201 Op1.getReg()))
6202 RegWidth = 64;
6203 else
6204 RegWidth = 32;
6205
6206 if (LSB >= RegWidth)
6207 return Error(LSBOp.getStartLoc(),
6208 "expected integer in range [0, 31]");
6209 if (Width < 1 || Width > RegWidth)
6210 return Error(WidthOp.getStartLoc(),
6211 "expected integer in range [1, 32]");
6212
6213 uint64_t ImmR = 0;
6214 if (RegWidth == 32)
6215 ImmR = (32 - LSB) & 0x1f;
6216 else
6217 ImmR = (64 - LSB) & 0x3f;
6218
6219 uint64_t ImmS = Width - 1;
6220
6221 if (ImmR != 0 && ImmS >= ImmR)
6222 return Error(WidthOp.getStartLoc(),
6223 "requested insert overflows register");
6224
6225 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6226 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6227 Operands[0] =
6228 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6229 Operands[2] = AArch64Operand::CreateReg(
6230 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6231 SMLoc(), SMLoc(), getContext());
6232 Operands[3] = AArch64Operand::CreateImm(
6233 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6234 Operands.emplace_back(
6235 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6236 WidthOp.getEndLoc(), getContext()));
6237 }
6238 }
6239 } else if (NumOperands == 5) {
6240 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6241 // UBFIZ -> UBFM aliases.
6242 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6243 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6244 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6245 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6246
6247 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6248 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6249 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6250
6251 if (Op3CE && Op4CE) {
6252 uint64_t Op3Val = Op3CE->getValue();
6253 uint64_t Op4Val = Op4CE->getValue();
6254
6255 uint64_t RegWidth = 0;
6256 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6257 Op1.getReg()))
6258 RegWidth = 64;
6259 else
6260 RegWidth = 32;
6261
6262 if (Op3Val >= RegWidth)
6263 return Error(Op3.getStartLoc(),
6264 "expected integer in range [0, 31]");
6265 if (Op4Val < 1 || Op4Val > RegWidth)
6266 return Error(Op4.getStartLoc(),
6267 "expected integer in range [1, 32]");
6268
6269 uint64_t NewOp3Val = 0;
6270 if (RegWidth == 32)
6271 NewOp3Val = (32 - Op3Val) & 0x1f;
6272 else
6273 NewOp3Val = (64 - Op3Val) & 0x3f;
6274
6275 uint64_t NewOp4Val = Op4Val - 1;
6276
6277 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6278 return Error(Op4.getStartLoc(),
6279 "requested insert overflows register");
6280
6281 const MCExpr *NewOp3 =
6282 MCConstantExpr::create(NewOp3Val, getContext());
6283 const MCExpr *NewOp4 =
6284 MCConstantExpr::create(NewOp4Val, getContext());
6285 Operands[3] = AArch64Operand::CreateImm(
6286 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6287 Operands[4] = AArch64Operand::CreateImm(
6288 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6289 if (Tok == "bfi")
6290 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6291 getContext());
6292 else if (Tok == "sbfiz")
6293 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6294 getContext());
6295 else if (Tok == "ubfiz")
6296 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6297 getContext());
6298 else
6299 llvm_unreachable("No valid mnemonic for alias?");
6300 }
6301 }
6302
6303 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6304 // UBFX -> UBFM aliases.
6305 } else if (NumOperands == 5 &&
6306 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6307 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6308 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6309 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6310
6311 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6312 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6313 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6314
6315 if (Op3CE && Op4CE) {
6316 uint64_t Op3Val = Op3CE->getValue();
6317 uint64_t Op4Val = Op4CE->getValue();
6318
6319 uint64_t RegWidth = 0;
6320 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6321 Op1.getReg()))
6322 RegWidth = 64;
6323 else
6324 RegWidth = 32;
6325
6326 if (Op3Val >= RegWidth)
6327 return Error(Op3.getStartLoc(),
6328 "expected integer in range [0, 31]");
6329 if (Op4Val < 1 || Op4Val > RegWidth)
6330 return Error(Op4.getStartLoc(),
6331 "expected integer in range [1, 32]");
6332
6333 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6334
6335 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6336 return Error(Op4.getStartLoc(),
6337 "requested extract overflows register");
6338
6339 const MCExpr *NewOp4 =
6340 MCConstantExpr::create(NewOp4Val, getContext());
6341 Operands[4] = AArch64Operand::CreateImm(
6342 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6343 if (Tok == "bfxil")
6344 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6345 getContext());
6346 else if (Tok == "sbfx")
6347 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6348 getContext());
6349 else if (Tok == "ubfx")
6350 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6351 getContext());
6352 else
6353 llvm_unreachable("No valid mnemonic for alias?");
6354 }
6355 }
6356 }
6357 }
6358
6359 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6360 // instruction for FP registers correctly in some rare circumstances. Convert
6361 // it to a safe instruction and warn (because silently changing someone's
6362 // assembly is rude).
6363 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6364 NumOperands == 4 && Tok == "movi") {
6365 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6366 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6367 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6368 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6369 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6370 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6371 if (Suffix.lower() == ".2d" &&
6372 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6373 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6374 " correctly on this CPU, converting to equivalent movi.16b");
6375 // Switch the suffix to .16b.
6376 unsigned Idx = Op1.isToken() ? 1 : 2;
6377 Operands[Idx] =
6378 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6379 }
6380 }
6381 }
6382
6383 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6384 // InstAlias can't quite handle this since the reg classes aren't
6385 // subclasses.
6386 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6387 // The source register can be Wn here, but the matcher expects a
6388 // GPR64. Twiddle it here if necessary.
6389 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6390 if (Op.isScalarReg()) {
6391 unsigned Reg = getXRegFromWReg(Op.getReg());
6392 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6393 Op.getStartLoc(), Op.getEndLoc(),
6394 getContext());
6395 }
6396 }
6397 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6398 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6399 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6400 if (Op.isScalarReg() &&
6401 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6402 Op.getReg())) {
6403 // The source register can be Wn here, but the matcher expects a
6404 // GPR64. Twiddle it here if necessary.
6405 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6406 if (Op.isScalarReg()) {
6407 unsigned Reg = getXRegFromWReg(Op.getReg());
6408 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6409 Op.getStartLoc(),
6410 Op.getEndLoc(), getContext());
6411 }
6412 }
6413 }
6414 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6415 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6416 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6417 if (Op.isScalarReg() &&
6418 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6419 Op.getReg())) {
6420 // The source register can be Wn here, but the matcher expects a
6421 // GPR32. Twiddle it here if necessary.
6422 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6423 if (Op.isScalarReg()) {
6424 unsigned Reg = getWRegFromXReg(Op.getReg());
6425 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6426 Op.getStartLoc(),
6427 Op.getEndLoc(), getContext());
6428 }
6429 }
6430 }
6431
6432 MCInst Inst;
6433 FeatureBitset MissingFeatures;
6434 // First try to match against the secondary set of tables containing the
6435 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6436 unsigned MatchResult =
6437 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6438 MatchingInlineAsm, 1);
6439
6440 // If that fails, try against the alternate table containing long-form NEON:
6441 // "fadd v0.2s, v1.2s, v2.2s"
6442 if (MatchResult != Match_Success) {
6443 // But first, save the short-form match result: we can use it in case the
6444 // long-form match also fails.
6445 auto ShortFormNEONErrorInfo = ErrorInfo;
6446 auto ShortFormNEONMatchResult = MatchResult;
6447 auto ShortFormNEONMissingFeatures = MissingFeatures;
6448
6449 MatchResult =
6450 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6451 MatchingInlineAsm, 0);
6452
6453 // Now, both matches failed, and the long-form match failed on the mnemonic
6454 // suffix token operand. The short-form match failure is probably more
6455 // relevant: use it instead.
6456 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6457 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6458 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6459 MatchResult = ShortFormNEONMatchResult;
6460 ErrorInfo = ShortFormNEONErrorInfo;
6461 MissingFeatures = ShortFormNEONMissingFeatures;
6462 }
6463 }
6464
6465 switch (MatchResult) {
6466 case Match_Success: {
6467 // Perform range checking and other semantic validations
6468 SmallVector<SMLoc, 8> OperandLocs;
6469 NumOperands = Operands.size();
6470 for (unsigned i = 1; i < NumOperands; ++i)
6471 OperandLocs.push_back(Operands[i]->getStartLoc());
6472 if (validateInstruction(Inst, IDLoc, OperandLocs))
6473 return true;
6474
6475 Inst.setLoc(IDLoc);
6476 Out.emitInstruction(Inst, getSTI());
6477 return false;
6478 }
6479 case Match_MissingFeature: {
6480 assert(MissingFeatures.any() && "Unknown missing feature!");
6481 // Special case the error message for the very common case where only
6482 // a single subtarget feature is missing (neon, e.g.).
6483 std::string Msg = "instruction requires:";
6484 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6485 if (MissingFeatures[i]) {
6486 Msg += " ";
6488 }
6489 }
6490 return Error(IDLoc, Msg);
6491 }
6492 case Match_MnemonicFail:
6493 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6494 case Match_InvalidOperand: {
6495 SMLoc ErrorLoc = IDLoc;
6496
6497 if (ErrorInfo != ~0ULL) {
6498 if (ErrorInfo >= Operands.size())
6499 return Error(IDLoc, "too few operands for instruction",
6500 SMRange(IDLoc, getTok().getLoc()));
6501
6502 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6503 if (ErrorLoc == SMLoc())
6504 ErrorLoc = IDLoc;
6505 }
6506 // If the match failed on a suffix token operand, tweak the diagnostic
6507 // accordingly.
6508 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6509 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6510 MatchResult = Match_InvalidSuffix;
6511
6512 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6513 }
6514 case Match_InvalidTiedOperand:
6515 case Match_InvalidMemoryIndexed1:
6516 case Match_InvalidMemoryIndexed2:
6517 case Match_InvalidMemoryIndexed4:
6518 case Match_InvalidMemoryIndexed8:
6519 case Match_InvalidMemoryIndexed16:
6520 case Match_InvalidCondCode:
6521 case Match_AddSubLSLImm3ShiftLarge:
6522 case Match_AddSubRegExtendSmall:
6523 case Match_AddSubRegExtendLarge:
6524 case Match_AddSubSecondSource:
6525 case Match_LogicalSecondSource:
6526 case Match_AddSubRegShift32:
6527 case Match_AddSubRegShift64:
6528 case Match_InvalidMovImm32Shift:
6529 case Match_InvalidMovImm64Shift:
6530 case Match_InvalidFPImm:
6531 case Match_InvalidMemoryWExtend8:
6532 case Match_InvalidMemoryWExtend16:
6533 case Match_InvalidMemoryWExtend32:
6534 case Match_InvalidMemoryWExtend64:
6535 case Match_InvalidMemoryWExtend128:
6536 case Match_InvalidMemoryXExtend8:
6537 case Match_InvalidMemoryXExtend16:
6538 case Match_InvalidMemoryXExtend32:
6539 case Match_InvalidMemoryXExtend64:
6540 case Match_InvalidMemoryXExtend128:
6541 case Match_InvalidMemoryIndexed1SImm4:
6542 case Match_InvalidMemoryIndexed2SImm4:
6543 case Match_InvalidMemoryIndexed3SImm4:
6544 case Match_InvalidMemoryIndexed4SImm4:
6545 case Match_InvalidMemoryIndexed1SImm6:
6546 case Match_InvalidMemoryIndexed16SImm4:
6547 case Match_InvalidMemoryIndexed32SImm4:
6548 case Match_InvalidMemoryIndexed4SImm7:
6549 case Match_InvalidMemoryIndexed8SImm7:
6550 case Match_InvalidMemoryIndexed16SImm7:
6551 case Match_InvalidMemoryIndexed8UImm5:
6552 case Match_InvalidMemoryIndexed8UImm3:
6553 case Match_InvalidMemoryIndexed4UImm5:
6554 case Match_InvalidMemoryIndexed2UImm5:
6555 case Match_InvalidMemoryIndexed1UImm6:
6556 case Match_InvalidMemoryIndexed2UImm6:
6557 case Match_InvalidMemoryIndexed4UImm6:
6558 case Match_InvalidMemoryIndexed8UImm6:
6559 case Match_InvalidMemoryIndexed16UImm6:
6560 case Match_InvalidMemoryIndexedSImm6:
6561 case Match_InvalidMemoryIndexedSImm5:
6562 case Match_InvalidMemoryIndexedSImm8:
6563 case Match_InvalidMemoryIndexedSImm9:
6564 case Match_InvalidMemoryIndexed16SImm9:
6565 case Match_InvalidMemoryIndexed8SImm10:
6566 case Match_InvalidImm0_0:
6567 case Match_InvalidImm0_1:
6568 case Match_InvalidImm0_3:
6569 case Match_InvalidImm0_7:
6570 case Match_InvalidImm0_15:
6571 case Match_InvalidImm0_31:
6572 case Match_InvalidImm0_63:
6573 case Match_InvalidImm0_127:
6574 case Match_InvalidImm0_255:
6575 case Match_InvalidImm0_65535:
6576 case Match_InvalidImm1_8:
6577 case Match_InvalidImm1_16:
6578 case Match_InvalidImm1_32:
6579 case Match_InvalidImm1_64:
6580 case Match_InvalidMemoryIndexedRange2UImm0:
6581 case Match_InvalidMemoryIndexedRange2UImm1:
6582 case Match_InvalidMemoryIndexedRange2UImm2:
6583 case Match_InvalidMemoryIndexedRange2UImm3:
6584 case Match_InvalidMemoryIndexedRange4UImm0:
6585 case Match_InvalidMemoryIndexedRange4UImm1:
6586 case Match_InvalidMemoryIndexedRange4UImm2:
6587 case Match_InvalidSVEAddSubImm8:
6588 case Match_InvalidSVEAddSubImm16:
6589 case Match_InvalidSVEAddSubImm32:
6590 case Match_InvalidSVEAddSubImm64:
6591 case Match_InvalidSVECpyImm8:
6592 case Match_InvalidSVECpyImm16:
6593 case Match_InvalidSVECpyImm32:
6594 case Match_InvalidSVECpyImm64:
6595 case Match_InvalidIndexRange0_0:
6596 case Match_InvalidIndexRange1_1:
6597 case Match_InvalidIndexRange0_15:
6598 case Match_InvalidIndexRange0_7:
6599 case Match_InvalidIndexRange0_3:
6600 case Match_InvalidIndexRange0_1:
6601 case Match_InvalidSVEIndexRange0_63:
6602 case Match_InvalidSVEIndexRange0_31:
6603 case Match_InvalidSVEIndexRange0_15:
6604 case Match_InvalidSVEIndexRange0_7:
6605 case Match_InvalidSVEIndexRange0_3:
6606 case Match_InvalidLabel:
6607 case Match_InvalidComplexRotationEven:
6608 case Match_InvalidComplexRotationOdd:
6609 case Match_InvalidGPR64shifted8:
6610 case Match_InvalidGPR64shifted16:
6611 case Match_InvalidGPR64shifted32:
6612 case Match_InvalidGPR64shifted64:
6613 case Match_InvalidGPR64shifted128:
6614 case Match_InvalidGPR64NoXZRshifted8:
6615 case Match_InvalidGPR64NoXZRshifted16:
6616 case Match_InvalidGPR64NoXZRshifted32:
6617 case Match_InvalidGPR64NoXZRshifted64:
6618 case Match_InvalidGPR64NoXZRshifted128:
6619 case Match_InvalidZPR32UXTW8:
6620 case Match_InvalidZPR32UXTW16:
6621 case Match_InvalidZPR32UXTW32:
6622 case Match_InvalidZPR32UXTW64:
6623 case Match_InvalidZPR32SXTW8:
6624 case Match_InvalidZPR32SXTW16:
6625 case Match_InvalidZPR32SXTW32:
6626 case Match_InvalidZPR32SXTW64:
6627 case Match_InvalidZPR64UXTW8:
6628 case Match_InvalidZPR64SXTW8:
6629 case Match_InvalidZPR64UXTW16:
6630 case Match_InvalidZPR64SXTW16:
6631 case Match_InvalidZPR64UXTW32:
6632 case Match_InvalidZPR64SXTW32:
6633 case Match_InvalidZPR64UXTW64:
6634 case Match_InvalidZPR64SXTW64:
6635 case Match_InvalidZPR32LSL8:
6636 case Match_InvalidZPR32LSL16:
6637 case Match_InvalidZPR32LSL32:
6638 case Match_InvalidZPR32LSL64:
6639 case Match_InvalidZPR64LSL8:
6640 case Match_InvalidZPR64LSL16:
6641 case Match_InvalidZPR64LSL32:
6642 case Match_InvalidZPR64LSL64:
6643 case Match_InvalidZPR0:
6644 case Match_InvalidZPR8:
6645 case Match_InvalidZPR16:
6646 case Match_InvalidZPR32:
6647 case Match_InvalidZPR64:
6648 case Match_InvalidZPR128:
6649 case Match_InvalidZPR_3b8:
6650 case Match_InvalidZPR_3b16:
6651 case Match_InvalidZPR_3b32:
6652 case Match_InvalidZPR_4b8:
6653 case Match_InvalidZPR_4b16:
6654 case Match_InvalidZPR_4b32:
6655 case Match_InvalidZPR_4b64:
6656 case Match_InvalidSVEPredicateAnyReg:
6657 case Match_InvalidSVEPattern:
6658 case Match_InvalidSVEVecLenSpecifier:
6659 case Match_InvalidSVEPredicateBReg:
6660 case Match_InvalidSVEPredicateHReg:
6661 case Match_InvalidSVEPredicateSReg:
6662 case Match_InvalidSVEPredicateDReg:
6663 case Match_InvalidSVEPredicate3bAnyReg:
6664 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6665 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6666 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6667 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6668 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6669 case Match_InvalidSVEPNPredicateBReg:
6670 case Match_InvalidSVEPNPredicateHReg:
6671 case Match_InvalidSVEPNPredicateSReg:
6672 case Match_InvalidSVEPNPredicateDReg:
6673 case Match_InvalidSVEPredicateListMul2x8:
6674 case Match_InvalidSVEPredicateListMul2x16:
6675 case Match_InvalidSVEPredicateListMul2x32:
6676 case Match_InvalidSVEPredicateListMul2x64:
6677 case Match_InvalidSVEExactFPImmOperandHalfOne:
6678 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6679 case Match_InvalidSVEExactFPImmOperandZeroOne:
6680 case Match_InvalidMatrixTile32:
6681 case Match_InvalidMatrixTile64:
6682 case Match_InvalidMatrix:
6683 case Match_InvalidMatrix8:
6684 case Match_InvalidMatrix16:
6685 case Match_InvalidMatrix32:
6686 case Match_InvalidMatrix64:
6687 case Match_InvalidMatrixTileVectorH8:
6688 case Match_InvalidMatrixTileVectorH16:
6689 case Match_InvalidMatrixTileVectorH32:
6690 case Match_InvalidMatrixTileVectorH64:
6691 case Match_InvalidMatrixTileVectorH128:
6692 case Match_InvalidMatrixTileVectorV8:
6693 case Match_InvalidMatrixTileVectorV16:
6694 case Match_InvalidMatrixTileVectorV32:
6695 case Match_InvalidMatrixTileVectorV64:
6696 case Match_InvalidMatrixTileVectorV128:
6697 case Match_InvalidSVCR:
6698 case Match_InvalidMatrixIndexGPR32_12_15:
6699 case Match_InvalidMatrixIndexGPR32_8_11:
6700 case Match_InvalidLookupTable:
6701 case Match_InvalidSVEVectorListMul2x8:
6702 case Match_InvalidSVEVectorListMul2x16:
6703 case Match_InvalidSVEVectorListMul2x32:
6704 case Match_InvalidSVEVectorListMul2x64:
6705 case Match_InvalidSVEVectorListMul4x8:
6706 case Match_InvalidSVEVectorListMul4x16:
6707 case Match_InvalidSVEVectorListMul4x32:
6708 case Match_InvalidSVEVectorListMul4x64:
6709 case Match_InvalidSVEVectorListStrided2x8:
6710 case Match_InvalidSVEVectorListStrided2x16:
6711 case Match_InvalidSVEVectorListStrided2x32:
6712 case Match_InvalidSVEVectorListStrided2x64:
6713 case Match_InvalidSVEVectorListStrided4x8:
6714 case Match_InvalidSVEVectorListStrided4x16:
6715 case Match_InvalidSVEVectorListStrided4x32:
6716 case Match_InvalidSVEVectorListStrided4x64:
6717 case Match_InvalidSVEPNRasPPRPredicateBReg:
6718 case Match_MSR:
6719 case Match_MRS: {
6720 if (ErrorInfo >= Operands.size())
6721 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6722 // Any time we get here, there's nothing fancy to do. Just get the
6723 // operand SMLoc and display the diagnostic.
6724 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6725 if (ErrorLoc == SMLoc())
6726 ErrorLoc = IDLoc;
6727 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6728 }
6729 }
6730
6731 llvm_unreachable("Implement any new match types added!");
6732}
6733
6734/// ParseDirective parses the arm specific directives
6735bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6736 const MCContext::Environment Format = getContext().getObjectFileType();
6737 bool IsMachO = Format == MCContext::IsMachO;
6738 bool IsCOFF = Format == MCContext::IsCOFF;
6739
6740 auto IDVal = DirectiveID.getIdentifier().lower();
6741 SMLoc Loc = DirectiveID.getLoc();
6742 if (IDVal == ".arch")
6743 parseDirectiveArch(Loc);
6744 else if (IDVal == ".cpu")
6745 parseDirectiveCPU(Loc);
6746 else if (IDVal == ".tlsdesccall")
6747 parseDirectiveTLSDescCall(Loc);
6748 else if (IDVal == ".ltorg" || IDVal == ".pool")
6749 parseDirectiveLtorg(Loc);
6750 else if (IDVal == ".unreq")
6751 parseDirectiveUnreq(Loc);
6752 else if (IDVal == ".inst")
6753 parseDirectiveInst(Loc);
6754 else if (IDVal == ".cfi_negate_ra_state")
6755 parseDirectiveCFINegateRAState();
6756 else if (IDVal == ".cfi_b_key_frame")
6757 parseDirectiveCFIBKeyFrame();
6758 else if (IDVal == ".cfi_mte_tagged_frame")
6759 parseDirectiveCFIMTETaggedFrame();
6760 else if (IDVal == ".arch_extension")
6761 parseDirectiveArchExtension(Loc);
6762 else if (IDVal == ".variant_pcs")
6763 parseDirectiveVariantPCS(Loc);
6764 else if (IsMachO) {
6765 if (IDVal == MCLOHDirectiveName())
6766 parseDirectiveLOH(IDVal, Loc);
6767 else
6768 return true;
6769 } else if (IsCOFF) {
6770 if (IDVal == ".seh_stackalloc")
6771 parseDirectiveSEHAllocStack(Loc);
6772 else if (IDVal == ".seh_endprologue")
6773 parseDirectiveSEHPrologEnd(Loc);
6774 else if (IDVal == ".seh_save_r19r20_x")
6775 parseDirectiveSEHSaveR19R20X(Loc);
6776 else if (IDVal == ".seh_save_fplr")
6777 parseDirectiveSEHSaveFPLR(Loc);
6778 else if (IDVal == ".seh_save_fplr_x")
6779 parseDirectiveSEHSaveFPLRX(Loc);
6780 else if (IDVal == ".seh_save_reg")
6781 parseDirectiveSEHSaveReg(Loc);
6782 else if (IDVal == ".seh_save_reg_x")
6783 parseDirectiveSEHSaveRegX(Loc);
6784 else if (IDVal == ".seh_save_regp")
6785 parseDirectiveSEHSaveRegP(Loc);
6786 else if (IDVal == ".seh_save_regp_x")
6787 parseDirectiveSEHSaveRegPX(Loc);
6788 else if (IDVal == ".seh_save_lrpair")
6789 parseDirectiveSEHSaveLRPair(Loc);
6790 else if (IDVal == ".seh_save_freg")
6791 parseDirectiveSEHSaveFReg(Loc);
6792 else if (IDVal == ".seh_save_freg_x")
6793 parseDirectiveSEHSaveFRegX(Loc);
6794 else if (IDVal == ".seh_save_fregp")
6795 parseDirectiveSEHSaveFRegP(Loc);
6796 else if (IDVal == ".seh_save_fregp_x")
6797 parseDirectiveSEHSaveFRegPX(Loc);
6798 else if (IDVal == ".seh_set_fp")
6799 parseDirectiveSEHSetFP(Loc);
6800 else if (IDVal == ".seh_add_fp")
6801 parseDirectiveSEHAddFP(Loc);
6802 else if (IDVal == ".seh_nop")
6803 parseDirectiveSEHNop(Loc);
6804 else if (IDVal == ".seh_save_next")
6805 parseDirectiveSEHSaveNext(Loc);
6806 else if (IDVal == ".seh_startepilogue")
6807 parseDirectiveSEHEpilogStart(Loc);
6808 else if (IDVal == ".seh_endepilogue")
6809 parseDirectiveSEHEpilogEnd(Loc);
6810 else if (IDVal == ".seh_trap_frame")
6811 parseDirectiveSEHTrapFrame(Loc);
6812 else if (IDVal == ".seh_pushframe")
6813 parseDirectiveSEHMachineFrame(Loc);
6814 else if (IDVal == ".seh_context")
6815 parseDirectiveSEHContext(Loc);
6816 else if (IDVal == ".seh_ec_context")
6817 parseDirectiveSEHECContext(Loc);
6818 else if (IDVal == ".seh_clear_unwound_to_call")
6819 parseDirectiveSEHClearUnwoundToCall(Loc);
6820 else if (IDVal == ".seh_pac_sign_lr")
6821 parseDirectiveSEHPACSignLR(Loc);
6822 else if (IDVal == ".seh_save_any_reg")
6823 parseDirectiveSEHSaveAnyReg(Loc, false, false);
6824 else if (IDVal == ".seh_save_any_reg_p")
6825 parseDirectiveSEHSaveAnyReg(Loc, true, false);
6826 else if (IDVal == ".seh_save_any_reg_x")
6827 parseDirectiveSEHSaveAnyReg(Loc, false, true);
6828 else if (IDVal == ".seh_save_any_reg_px")
6829 parseDirectiveSEHSaveAnyReg(Loc, true, true);
6830 else
6831 return true;
6832 } else
6833 return true;
6834 return false;
6835}
6836
6837static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6838 SmallVector<StringRef, 4> &RequestedExtensions) {
6839 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6840 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6841
6842 if (!NoCrypto && Crypto) {
6843 // Map 'generic' (and others) to sha2 and aes, because
6844 // that was the traditional meaning of crypto.
6845 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6846 ArchInfo == AArch64::ARMV8_3A) {
6847 RequestedExtensions.push_back("sha2");
6848 RequestedExtensions.push_back("aes");
6849 }
6850 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6851 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6852 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6853 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6854 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6855 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6856 RequestedExtensions.push_back("sm4");
6857 RequestedExtensions.push_back("sha3");
6858 RequestedExtensions.push_back("sha2");
6859 RequestedExtensions.push_back("aes");
6860 }
6861 } else if (NoCrypto) {
6862 // Map 'generic' (and others) to sha2 and aes, because
6863 // that was the traditional meaning of crypto.
6864 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6865 ArchInfo == AArch64::ARMV8_3A) {
6866 RequestedExtensions.push_back("nosha2");
6867 RequestedExtensions.push_back("noaes");
6868 }
6869 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6870 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6871 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6872 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6873 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6874 ArchInfo == AArch64::ARMV9_4A) {
6875 RequestedExtensions.push_back("nosm4");
6876 RequestedExtensions.push_back("nosha3");
6877 RequestedExtensions.push_back("nosha2");
6878 RequestedExtensions.push_back("noaes");
6879 }
6880 }
6881}
6882
6883/// parseDirectiveArch
6884/// ::= .arch token
6885bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6886 SMLoc ArchLoc = getLoc();
6887
6888 StringRef Arch, ExtensionString;
6889 std::tie(Arch, ExtensionString) =
6890 getParser().parseStringToEndOfStatement().trim().split('+');
6891
6892 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
6893 if (!ArchInfo)
6894 return Error(ArchLoc, "unknown arch name");
6895
6896 if (parseToken(AsmToken::EndOfStatement))
6897 return true;
6898
6899 // Get the architecture and extension features.
6900 std::vector<StringRef> AArch64Features;
6901 AArch64Features.push_back(ArchInfo->ArchFeature);
6902 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
6903
6904 MCSubtargetInfo &STI = copySTI();
6905 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6906 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6907 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6908
6909 SmallVector<StringRef, 4> RequestedExtensions;
6910 if (!ExtensionString.empty())
6911 ExtensionString.split(RequestedExtensions, '+');
6912
6913 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
6914
6915 FeatureBitset Features = STI.getFeatureBits();
6916 setAvailableFeatures(ComputeAvailableFeatures(Features));
6917 for (auto Name : RequestedExtensions) {
6918 bool EnableFeature = !Name.consume_front_insensitive("no");
6919
6920 for (const auto &Extension : ExtensionMap) {
6921 if (Extension.Name != Name)
6922 continue;
6923
6924 if (Extension.Features.none())
6925 report_fatal_error("unsupported architectural extension: " + Name);
6926
6927 FeatureBitset ToggleFeatures =
6928 EnableFeature
6930 : STI.ToggleFeature(Features & Extension.Features);
6931 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6932 break;
6933 }
6934 }
6935 return false;
6936}
6937
6938/// parseDirectiveArchExtension
6939/// ::= .arch_extension [no]feature
6940bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6941 SMLoc ExtLoc = getLoc();
6942
6943 StringRef Name = getParser().parseStringToEndOfStatement().trim();
6944
6945 if (parseEOL())
6946 return true;
6947
6948 bool EnableFeature = true;
6949 if (Name.starts_with_insensitive("no")) {
6950 EnableFeature = false;
6951 Name = Name.substr(2);
6952 }
6953
6954 MCSubtargetInfo &STI = copySTI();
6955 FeatureBitset Features = STI.getFeatureBits();
6956 for (const auto &Extension : ExtensionMap) {
6957 if (Extension.Name != Name)
6958 continue;
6959
6960 if (Extension.Features.none())
6961 return Error(ExtLoc, "unsupported architectural extension: " + Name);
6962
6963 FeatureBitset ToggleFeatures =
6964 EnableFeature
6966 : STI.ToggleFeature(Features & Extension.Features);
6967 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6968 return false;
6969 }
6970
6971 return Error(ExtLoc, "unknown architectural extension: " + Name);
6972}
6973
6975 return SMLoc::getFromPointer(L.getPointer() + Offset);
6976}
6977
6978/// parseDirectiveCPU
6979/// ::= .cpu id
6980bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6981 SMLoc CurLoc = getLoc();
6982
6983 StringRef CPU, ExtensionString;
6984 std::tie(CPU, ExtensionString) =
6985 getParser().parseStringToEndOfStatement().trim().split('+');
6986
6987 if (parseToken(AsmToken::EndOfStatement))
6988 return true;
6989
6990 SmallVector<StringRef, 4> RequestedExtensions;
6991 if (!ExtensionString.empty())
6992 ExtensionString.split(RequestedExtensions, '+');
6993
6995 if (!CpuArch) {
6996 Error(CurLoc, "unknown CPU name");
6997 return false;
6998 }
6999 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7000
7001 MCSubtargetInfo &STI = copySTI();
7002 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7003 CurLoc = incrementLoc(CurLoc, CPU.size());
7004
7005 for (auto Name : RequestedExtensions) {
7006 // Advance source location past '+'.
7007 CurLoc = incrementLoc(CurLoc, 1);
7008
7009 bool EnableFeature = !Name.consume_front_insensitive("no");
7010
7011 bool FoundExtension = false;
7012 for (const auto &Extension : ExtensionMap) {
7013 if (Extension.Name != Name)
7014 continue;
7015
7016 if (Extension.Features.none())
7017 report_fatal_error("unsupported architectural extension: " + Name);
7018
7019 FeatureBitset Features = STI.getFeatureBits();
7020 FeatureBitset ToggleFeatures =
7021 EnableFeature
7023 : STI.ToggleFeature(Features & Extension.Features);
7024 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7025 FoundExtension = true;
7026
7027 break;
7028 }
7029
7030 if (!FoundExtension)
7031 Error(CurLoc, "unsupported architectural extension");
7032
7033 CurLoc = incrementLoc(CurLoc, Name.size());
7034 }
7035 return false;
7036}
7037
7038/// parseDirectiveInst
7039/// ::= .inst opcode [, ...]
7040bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7041 if (getLexer().is(AsmToken::EndOfStatement))
7042 return Error(Loc, "expected expression following '.inst' directive");
7043
7044 auto parseOp = [&]() -> bool {
7045 SMLoc L = getLoc();
7046 const MCExpr *Expr = nullptr;
7047 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7048 return true;
7049 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7050 if (check(!Value, L, "expected constant expression"))
7051 return true;
7052 getTargetStreamer().emitInst(Value->getValue());
7053 return false;
7054 };
7055
7056 return parseMany(parseOp);
7057}
7058
7059// parseDirectiveTLSDescCall:
7060// ::= .tlsdesccall symbol
7061bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7063 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7064 parseToken(AsmToken::EndOfStatement))
7065 return true;
7066
7067 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7068 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7069 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7070
7071 MCInst Inst;
7072 Inst.setOpcode(AArch64::TLSDESCCALL);
7074
7075 getParser().getStreamer().emitInstruction(Inst, getSTI());
7076 return false;
7077}
7078
7079/// ::= .loh <lohName | lohId> label1, ..., labelN
7080/// The number of arguments depends on the loh identifier.
7081bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7083 if (getTok().isNot(AsmToken::Identifier)) {
7084 if (getTok().isNot(AsmToken::Integer))
7085 return TokError("expected an identifier or a number in directive");
7086 // We successfully get a numeric value for the identifier.
7087 // Check if it is valid.
7088 int64_t Id = getTok().getIntVal();
7089 if (Id <= -1U && !isValidMCLOHType(Id))
7090 return TokError("invalid numeric identifier in directive");
7091 Kind = (MCLOHType)Id;
7092 } else {
7093 StringRef Name = getTok().getIdentifier();
7094 // We successfully parse an identifier.
7095 // Check if it is a recognized one.
7096 int Id = MCLOHNameToId(Name);
7097
7098 if (Id == -1)
7099 return TokError("invalid identifier in directive");
7100 Kind = (MCLOHType)Id;
7101 }
7102 // Consume the identifier.
7103 Lex();
7104 // Get the number of arguments of this LOH.
7105 int NbArgs = MCLOHIdToNbArgs(Kind);
7106
7107 assert(NbArgs != -1 && "Invalid number of arguments");
7108
7110 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7112 if (getParser().parseIdentifier(Name))
7113 return TokError("expected identifier in directive");
7114 Args.push_back(getContext().getOrCreateSymbol(Name));
7115
7116 if (Idx + 1 == NbArgs)
7117 break;
7118 if (parseComma())
7119 return true;
7120 }
7121 if (parseEOL())
7122 return true;
7123
7124 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7125 return false;
7126}
7127
7128/// parseDirectiveLtorg
7129/// ::= .ltorg | .pool
7130bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7131 if (parseEOL())
7132 return true;
7133 getTargetStreamer().emitCurrentConstantPool();
7134 return false;
7135}
7136
7137/// parseDirectiveReq
7138/// ::= name .req registername
7139bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7140 Lex(); // Eat the '.req' token.
7141 SMLoc SRegLoc = getLoc();
7142 RegKind RegisterKind = RegKind::Scalar;
7143 MCRegister RegNum;
7144 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7145
7146 if (!ParseRes.isSuccess()) {
7148 RegisterKind = RegKind::NeonVector;
7149 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7150
7151 if (ParseRes.isFailure())
7152 return true;
7153
7154 if (ParseRes.isSuccess() && !Kind.empty())
7155 return Error(SRegLoc, "vector register without type specifier expected");
7156 }
7157
7158 if (!ParseRes.isSuccess()) {
7160 RegisterKind = RegKind::SVEDataVector;
7161 ParseRes =
7162 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7163
7164 if (ParseRes.isFailure())
7165 return true;
7166
7167 if (ParseRes.isSuccess() && !Kind.empty())
7168 return Error(SRegLoc,
7169 "sve vector register without type specifier expected");
7170 }
7171
7172 if (!ParseRes.isSuccess()) {
7174 RegisterKind = RegKind::SVEPredicateVector;
7175 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7176
7177 if (ParseRes.isFailure())
7178 return true;
7179
7180 if (ParseRes.isSuccess() && !Kind.empty())
7181 return Error(SRegLoc,
7182 "sve predicate register without type specifier expected");
7183 }
7184
7185 if (!ParseRes.isSuccess())
7186 return Error(SRegLoc, "register name or alias expected");
7187
7188 // Shouldn't be anything else.
7189 if (parseEOL())
7190 return true;
7191
7192 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7193 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7194 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7195
7196 return false;
7197}
7198
7199/// parseDirectiveUneq
7200/// ::= .unreq registername
7201bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7202 if (getTok().isNot(AsmToken::Identifier))
7203 return TokError("unexpected input in .unreq directive.");
7204 RegisterReqs.erase(getTok().getIdentifier().lower());
7205 Lex(); // Eat the identifier.
7206 return parseToken(AsmToken::EndOfStatement);
7207}
7208
7209bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7210 if (parseEOL())
7211 return true;
7212 getStreamer().emitCFINegateRAState();
7213 return false;
7214}
7215
7216/// parseDirectiveCFIBKeyFrame
7217/// ::= .cfi_b_key
7218bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7219 if (parseEOL())
7220 return true;
7221 getStreamer().emitCFIBKeyFrame();
7222 return false;
7223}
7224
7225/// parseDirectiveCFIMTETaggedFrame
7226/// ::= .cfi_mte_tagged_frame
7227bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7228 if (parseEOL())
7229 return true;
7230 getStreamer().emitCFIMTETaggedFrame();
7231 return false;
7232}
7233
7234/// parseDirectiveVariantPCS
7235/// ::= .variant_pcs symbolname
7236bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7238 if (getParser().parseIdentifier(Name))
7239 return TokError("expected symbol name");
7240 if (parseEOL())
7241 return true;
7242 getTargetStreamer().emitDirectiveVariantPCS(
7243 getContext().getOrCreateSymbol(Name));
7244 return false;
7245}
7246
7247/// parseDirectiveSEHAllocStack
7248/// ::= .seh_stackalloc
7249bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7250 int64_t Size;
7251 if (parseImmExpr(Size))
7252 return true;
7253 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7254 return false;
7255}
7256
7257/// parseDirectiveSEHPrologEnd
7258/// ::= .seh_endprologue
7259bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7260 getTargetStreamer().emitARM64WinCFIPrologEnd();
7261 return false;
7262}
7263
7264/// parseDirectiveSEHSaveR19R20X
7265/// ::= .seh_save_r19r20_x
7266bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7267 int64_t Offset;
7268 if (parseImmExpr(Offset))
7269 return true;
7270 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7271 return false;
7272}
7273
7274/// parseDirectiveSEHSaveFPLR
7275/// ::= .seh_save_fplr
7276bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7277 int64_t Offset;
7278 if (parseImmExpr(Offset))
7279 return true;
7280 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7281 return false;
7282}
7283
7284/// parseDirectiveSEHSaveFPLRX
7285/// ::= .seh_save_fplr_x
7286bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7287 int64_t Offset;
7288 if (parseImmExpr(Offset))
7289 return true;
7290 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7291 return false;
7292}
7293
7294/// parseDirectiveSEHSaveReg
7295/// ::= .seh_save_reg
7296bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7297 unsigned Reg;
7298 int64_t Offset;
7299 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7300 parseComma() || parseImmExpr(Offset))
7301 return true;
7302 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7303 return false;
7304}
7305
7306/// parseDirectiveSEHSaveRegX
7307/// ::= .seh_save_reg_x
7308bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7309 unsigned Reg;
7310 int64_t Offset;
7311 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7312 parseComma() || parseImmExpr(Offset))
7313 return true;
7314 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7315 return false;
7316}
7317
7318/// parseDirectiveSEHSaveRegP
7319/// ::= .seh_save_regp
7320bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7321 unsigned Reg;
7322 int64_t Offset;
7323 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7324 parseComma() || parseImmExpr(Offset))
7325 return true;
7326 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7327 return false;
7328}
7329
7330/// parseDirectiveSEHSaveRegPX
7331/// ::= .seh_save_regp_x
7332bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7333 unsigned Reg;
7334 int64_t Offset;
7335 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7336 parseComma() || parseImmExpr(Offset))
7337 return true;
7338 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7339 return false;
7340}
7341
7342/// parseDirectiveSEHSaveLRPair
7343/// ::= .seh_save_lrpair
7344bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7345 unsigned Reg;
7346 int64_t Offset;
7347 L = getLoc();
7348 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7349 parseComma() || parseImmExpr(Offset))
7350 return true;
7351 if (check(((Reg - 19) % 2 != 0), L,
7352 "expected register with even offset from x19"))
7353 return true;
7354 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7355 return false;
7356}
7357
7358/// parseDirectiveSEHSaveFReg
7359/// ::= .seh_save_freg
7360bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7361 unsigned Reg;
7362 int64_t Offset;
7363 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7364 parseComma() || parseImmExpr(Offset))
7365 return true;
7366 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7367 return false;
7368}
7369
7370/// parseDirectiveSEHSaveFRegX
7371/// ::= .seh_save_freg_x
7372bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7373 unsigned Reg;
7374 int64_t Offset;
7375 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7376 parseComma() || parseImmExpr(Offset))
7377 return true;
7378 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7379 return false;
7380}
7381
7382/// parseDirectiveSEHSaveFRegP
7383/// ::= .seh_save_fregp
7384bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7385 unsigned Reg;
7386 int64_t Offset;
7387 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7388 parseComma() || parseImmExpr(Offset))
7389 return true;
7390 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7391 return false;
7392}
7393
7394/// parseDirectiveSEHSaveFRegPX
7395/// ::= .seh_save_fregp_x
7396bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7397 unsigned Reg;
7398 int64_t Offset;
7399 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7400 parseComma() || parseImmExpr(Offset))
7401 return true;
7402 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7403 return false;
7404}
7405
7406/// parseDirectiveSEHSetFP
7407/// ::= .seh_set_fp
7408bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7409 getTargetStreamer().emitARM64WinCFISetFP();
7410 return false;
7411}
7412
7413/// parseDirectiveSEHAddFP
7414/// ::= .seh_add_fp
7415bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7416 int64_t Size;
7417 if (parseImmExpr(Size))
7418 return true;
7419 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7420 return false;
7421}
7422
7423/// parseDirectiveSEHNop
7424/// ::= .seh_nop
7425bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7426 getTargetStreamer().emitARM64WinCFINop();
7427 return false;
7428}
7429
7430/// parseDirectiveSEHSaveNext
7431/// ::= .seh_save_next
7432bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7433 getTargetStreamer().emitARM64WinCFISaveNext();
7434 return false;
7435}
7436
7437/// parseDirectiveSEHEpilogStart
7438/// ::= .seh_startepilogue
7439bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7440 getTargetStreamer().emitARM64WinCFIEpilogStart();
7441 return false;
7442}
7443
7444/// parseDirectiveSEHEpilogEnd
7445/// ::= .seh_endepilogue
7446bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7447 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7448 return false;
7449}
7450
7451/// parseDirectiveSEHTrapFrame
7452/// ::= .seh_trap_frame
7453bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7454 getTargetStreamer().emitARM64WinCFITrapFrame();
7455 return false;
7456}
7457
7458/// parseDirectiveSEHMachineFrame
7459/// ::= .seh_pushframe
7460bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7461 getTargetStreamer().emitARM64WinCFIMachineFrame();
7462 return false;
7463}
7464
7465/// parseDirectiveSEHContext
7466/// ::= .seh_context
7467bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7468 getTargetStreamer().emitARM64WinCFIContext();
7469 return false;
7470}
7471
7472/// parseDirectiveSEHECContext
7473/// ::= .seh_ec_context
7474bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7475 getTargetStreamer().emitARM64WinCFIECContext();
7476 return false;
7477}
7478
7479/// parseDirectiveSEHClearUnwoundToCall
7480/// ::= .seh_clear_unwound_to_call
7481bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7482 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7483 return false;
7484}
7485
7486/// parseDirectiveSEHPACSignLR
7487/// ::= .seh_pac_sign_lr
7488bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7489 getTargetStreamer().emitARM64WinCFIPACSignLR();
7490 return false;
7491}
7492
7493/// parseDirectiveSEHSaveAnyReg
7494/// ::= .seh_save_any_reg
7495/// ::= .seh_save_any_reg_p
7496/// ::= .seh_save_any_reg_x
7497/// ::= .seh_save_any_reg_px
7498bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7499 bool Writeback) {
7501 SMLoc Start, End;
7502 int64_t Offset;
7503 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7504 parseComma() || parseImmExpr(Offset))
7505 return true;
7506
7507 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7508 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7509 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7510 return Error(L, "invalid save_any_reg offset");
7511 unsigned EncodedReg;
7512 if (Reg == AArch64::FP)
7513 EncodedReg = 29;
7514 else if (Reg == AArch64::LR)
7515 EncodedReg = 30;
7516 else
7517 EncodedReg = Reg - AArch64::X0;
7518 if (Paired) {
7519 if (Reg == AArch64::LR)
7520 return Error(Start, "lr cannot be paired with another register");
7521 if (Writeback)
7522 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7523 else
7524 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7525 } else {
7526 if (Writeback)
7527 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7528 else
7529 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7530 }
7531 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7532 unsigned EncodedReg = Reg - AArch64::D0;
7533 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7534 return Error(L, "invalid save_any_reg offset");
7535 if (Paired) {
7536 if (Reg == AArch64::D31)
7537 return Error(Start, "d31 cannot be paired with another register");
7538 if (Writeback)
7539 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7540 else
7541 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7542 } else {
7543 if (Writeback)
7544 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7545 else
7546 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7547 }
7548 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7549 unsigned EncodedReg = Reg - AArch64::Q0;
7550 if (Offset < 0 || Offset % 16)
7551 return Error(L, "invalid save_any_reg offset");
7552 if (Paired) {
7553 if (Reg == AArch64::Q31)
7554 return Error(Start, "q31 cannot be paired with another register");
7555 if (Writeback)
7556 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7557 else
7558 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7559 } else {
7560 if (Writeback)
7561 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7562 else
7563 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7564 }
7565 } else {
7566 return Error(Start, "save_any_reg register must be x, q or d register");
7567 }
7568 return false;
7569}
7570
7571bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7572 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7573 if (!parseAuthExpr(Res, EndLoc))
7574 return false;
7575 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7576}
7577
7578/// parseAuthExpr
7579/// ::= _sym@AUTH(ib,123[,addr])
7580/// ::= (_sym + 5)@AUTH(ib,123[,addr])
7581/// ::= (_sym - 5)@AUTH(ib,123[,addr])
7582bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7583 MCAsmParser &Parser = getParser();
7584 MCContext &Ctx = getContext();
7585
7586 AsmToken Tok = Parser.getTok();
7587
7588 // Look for '_sym@AUTH' ...
7589 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7590 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7591 if (SymName.contains('@'))
7592 return TokError(
7593 "combination of @AUTH with other modifiers not supported");
7594 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7595
7596 Parser.Lex(); // Eat the identifier.
7597 } else {
7598 // ... or look for a more complex symbol reference, such as ...
7600
7601 // ... '"_long sym"@AUTH' ...
7602 if (Tok.is(AsmToken::String))
7603 Tokens.resize(2);
7604 // ... or '(_sym + 5)@AUTH'.
7605 else if (Tok.is(AsmToken::LParen))
7606 Tokens.resize(6);
7607 else
7608 return true;
7609
7610 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7611 return true;
7612
7613 // In either case, the expression ends with '@' 'AUTH'.
7614 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7615 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7616 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7617 return true;
7618
7619 if (Tok.is(AsmToken::String)) {
7620 StringRef SymName;
7621 if (Parser.parseIdentifier(SymName))
7622 return true;
7623 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7624 } else {
7625 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7626 return true;
7627 }
7628
7629 Parser.Lex(); // '@'
7630 Parser.Lex(); // 'AUTH'
7631 }
7632
7633 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7634 if (parseToken(AsmToken::LParen, "expected '('"))
7635 return true;
7636
7637 if (Parser.getTok().isNot(AsmToken::Identifier))
7638 return TokError("expected key name");
7639
7640 StringRef KeyStr = Parser.getTok().getIdentifier();
7641 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7642 if (!KeyIDOrNone)
7643 return TokError("invalid key '" + KeyStr + "'");
7644 Parser.Lex();
7645
7646 if (parseToken(AsmToken::Comma, "expected ','"))
7647 return true;
7648
7649 if (Parser.getTok().isNot(AsmToken::Integer))
7650 return TokError("expected integer discriminator");
7651 int64_t Discriminator = Parser.getTok().getIntVal();
7652
7653 if (!isUInt<16>(Discriminator))
7654 return TokError("integer discriminator " + Twine(Discriminator) +
7655 " out of range [0, 0xFFFF]");
7656 Parser.Lex();
7657
7658 bool UseAddressDiversity = false;
7659 if (Parser.getTok().is(AsmToken::Comma)) {
7660 Parser.Lex();
7661 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7662 Parser.getTok().getIdentifier() != "addr")
7663 return TokError("expected 'addr'");
7664 UseAddressDiversity = true;
7665 Parser.Lex();
7666 }
7667
7668 EndLoc = Parser.getTok().getEndLoc();
7669 if (parseToken(AsmToken::RParen, "expected ')'"))
7670 return true;
7671
7672 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7673 UseAddressDiversity, Ctx);
7674 return false;
7675}
7676
7677bool
7678AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7679 AArch64MCExpr::VariantKind &ELFRefKind,
7680 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7681 int64_t &Addend) {
7682 ELFRefKind = AArch64MCExpr::VK_INVALID;
7683 DarwinRefKind = MCSymbolRefExpr::VK_None;
7684 Addend = 0;
7685
7686 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7687 ELFRefKind = AE->getKind();
7688 Expr = AE->getSubExpr();
7689 }
7690
7691 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7692 if (SE) {
7693 // It's a simple symbol reference with no addend.
7694 DarwinRefKind = SE->getKind();
7695 return true;
7696 }
7697
7698 // Check that it looks like a symbol + an addend
7699 MCValue Res;
7700 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7701 if (!Relocatable || Res.getSymB())
7702 return false;
7703
7704 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7705 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7706 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7707 return false;
7708
7709 if (Res.getSymA())
7710 DarwinRefKind = Res.getSymA()->getKind();
7711 Addend = Res.getConstant();
7712
7713 // It's some symbol reference + a constant addend, but really
7714 // shouldn't use both Darwin and ELF syntax.
7715 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7716 DarwinRefKind == MCSymbolRefExpr::VK_None;
7717}
7718
7719/// Force static initialization.
7726}
7727
7728#define GET_REGISTER_MATCHER
7729#define GET_SUBTARGET_FEATURE_NAME
7730#define GET_MATCHER_IMPLEMENTATION
7731#define GET_MNEMONIC_SPELL_CHECKER
7732#include "AArch64GenAsmMatcher.inc"
7733
7734// Define this matcher function after the auto-generated include so we
7735// have the match class enum definitions.
7736unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7737 unsigned Kind) {
7738 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7739
7740 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7741 if (!Op.isImm())
7742 return Match_InvalidOperand;
7743 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7744 if (!CE)
7745 return Match_InvalidOperand;
7746 if (CE->getValue() == ExpectedVal)
7747 return Match_Success;
7748 return Match_InvalidOperand;
7749 };
7750
7751 switch (Kind) {
7752 default:
7753 return Match_InvalidOperand;
7754 case MCK_MPR:
7755 // If the Kind is a token for the MPR register class which has the "za"
7756 // register (SME accumulator array), check if the asm is a literal "za"
7757 // token. This is for the "smstart za" alias that defines the register
7758 // as a literal token.
7759 if (Op.isTokenEqual("za"))
7760 return Match_Success;
7761 return Match_InvalidOperand;
7762
7763 // If the kind is a token for a literal immediate, check if our asm operand
7764 // matches. This is for InstAliases which have a fixed-value immediate in
7765 // the asm string, such as hints which are parsed into a specific
7766 // instruction definition.
7767#define MATCH_HASH(N) \
7768 case MCK__HASH_##N: \
7769 return MatchesOpImmediate(N);
7770 MATCH_HASH(0)
7771 MATCH_HASH(1)
7772 MATCH_HASH(2)
7773 MATCH_HASH(3)
7774 MATCH_HASH(4)
7775 MATCH_HASH(6)
7776 MATCH_HASH(7)
7777 MATCH_HASH(8)
7778 MATCH_HASH(10)
7779 MATCH_HASH(12)
7780 MATCH_HASH(14)
7781 MATCH_HASH(16)
7782 MATCH_HASH(24)
7783 MATCH_HASH(25)
7784 MATCH_HASH(26)
7785 MATCH_HASH(27)
7786 MATCH_HASH(28)
7787 MATCH_HASH(29)
7788 MATCH_HASH(30)
7789 MATCH_HASH(31)
7790 MATCH_HASH(32)
7791 MATCH_HASH(40)
7792 MATCH_HASH(48)
7793 MATCH_HASH(64)
7794#undef MATCH_HASH
7795#define MATCH_HASH_MINUS(N) \
7796 case MCK__HASH__MINUS_##N: \
7797 return MatchesOpImmediate(-N);
7801#undef MATCH_HASH_MINUS
7802 }
7803}
7804
7805ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7806
7807 SMLoc S = getLoc();
7808
7809 if (getTok().isNot(AsmToken::Identifier))
7810 return Error(S, "expected register");
7811
7812 MCRegister FirstReg;
7813 ParseStatus Res = tryParseScalarRegister(FirstReg);
7814 if (!Res.isSuccess())
7815 return Error(S, "expected first even register of a consecutive same-size "
7816 "even/odd register pair");
7817
7818 const MCRegisterClass &WRegClass =
7819 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7820 const MCRegisterClass &XRegClass =
7821 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7822
7823 bool isXReg = XRegClass.contains(FirstReg),
7824 isWReg = WRegClass.contains(FirstReg);
7825 if (!isXReg && !isWReg)
7826 return Error(S, "expected first even register of a consecutive same-size "
7827 "even/odd register pair");
7828
7829 const MCRegisterInfo *RI = getContext().getRegisterInfo();
7830 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7831
7832 if (FirstEncoding & 0x1)
7833 return Error(S, "expected first even register of a consecutive same-size "
7834 "even/odd register pair");
7835
7836 if (getTok().isNot(AsmToken::Comma))
7837 return Error(getLoc(), "expected comma");
7838 // Eat the comma
7839 Lex();
7840
7841 SMLoc E = getLoc();
7842 MCRegister SecondReg;
7843 Res = tryParseScalarRegister(SecondReg);
7844 if (!Res.isSuccess())
7845 return Error(E, "expected second odd register of a consecutive same-size "
7846 "even/odd register pair");
7847
7848 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7849 (isXReg && !XRegClass.contains(SecondReg)) ||
7850 (isWReg && !WRegClass.contains(SecondReg)))
7851 return Error(E, "expected second odd register of a consecutive same-size "
7852 "even/odd register pair");
7853
7854 unsigned Pair = 0;
7855 if (isXReg) {
7856 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7857 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7858 } else {
7859 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7860 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7861 }
7862
7863 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7864 getLoc(), getContext()));
7865
7866 return ParseStatus::Success;
7867}
7868
7869template <bool ParseShiftExtend, bool ParseSuffix>
7870ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7871 const SMLoc S = getLoc();
7872 // Check for a SVE vector register specifier first.
7873 MCRegister RegNum;
7875
7876 ParseStatus Res =
7877 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7878
7879 if (!Res.isSuccess())
7880 return Res;
7881
7882 if (ParseSuffix && Kind.empty())
7883 return ParseStatus::NoMatch;
7884
7885 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7886 if (!KindRes)
7887 return ParseStatus::NoMatch;
7888
7889 unsigned ElementWidth = KindRes->second;
7890
7891 // No shift/extend is the default.
7892 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7893 Operands.push_back(AArch64Operand::CreateVectorReg(
7894 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7895
7896 ParseStatus Res = tryParseVectorIndex(Operands);
7897 if (Res.isFailure())
7898 return ParseStatus::Failure;
7899 return ParseStatus::Success;
7900 }
7901
7902 // Eat the comma
7903 Lex();
7904
7905 // Match the shift
7907 Res = tryParseOptionalShiftExtend(ExtOpnd);
7908 if (!Res.isSuccess())
7909 return Res;
7910
7911 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7912 Operands.push_back(AArch64Operand::CreateVectorReg(
7913 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7914 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7915 Ext->hasShiftExtendAmount()));
7916
7917 return ParseStatus::Success;
7918}
7919
7920ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7921 MCAsmParser &Parser = getParser();
7922
7923 SMLoc SS = getLoc();
7924 const AsmToken &TokE = getTok();
7925 bool IsHash = TokE.is(AsmToken::Hash);
7926
7927 if (!IsHash && TokE.isNot(AsmToken::Identifier))
7928 return ParseStatus::NoMatch;
7929
7930 int64_t Pattern;
7931 if (IsHash) {
7932 Lex(); // Eat hash
7933
7934 // Parse the immediate operand.
7935 const MCExpr *ImmVal;
7936 SS = getLoc();
7937 if (Parser.parseExpression(ImmVal))
7938 return ParseStatus::Failure;
7939
7940 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
7941 if (!MCE)
7942 return TokError("invalid operand for instruction");
7943
7944 Pattern = MCE->getValue();
7945 } else {
7946 // Parse the pattern
7947 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
7948 if (!Pat)
7949 return ParseStatus::NoMatch;
7950
7951 Lex();
7952 Pattern = Pat->Encoding;
7953 assert(Pattern >= 0 && Pattern < 32);
7954 }
7955
7956 Operands.push_back(
7957 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7958 SS, getLoc(), getContext()));
7959
7960 return ParseStatus::Success;
7961}
7962
7964AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
7965 int64_t Pattern;
7966 SMLoc SS = getLoc();
7967 const AsmToken &TokE = getTok();
7968 // Parse the pattern
7969 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
7970 TokE.getString());
7971 if (!Pat)
7972 return ParseStatus::NoMatch;
7973
7974 Lex();
7975 Pattern = Pat->Encoding;
7976 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
7977
7978 Operands.push_back(
7979 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7980 SS, getLoc(), getContext()));
7981
7982 return ParseStatus::Success;
7983}
7984
7985ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
7986 SMLoc SS = getLoc();
7987
7988 MCRegister XReg;
7989 if (!tryParseScalarRegister(XReg).isSuccess())
7990 return ParseStatus::NoMatch;
7991
7992 MCContext &ctx = getContext();
7993 const MCRegisterInfo *RI = ctx.getRegisterInfo();
7994 int X8Reg = RI->getMatchingSuperReg(
7995 XReg, AArch64::x8sub_0,
7996 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
7997 if (!X8Reg)
7998 return Error(SS,
7999 "expected an even-numbered x-register in the range [x0,x22]");
8000
8001 Operands.push_back(
8002 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8003 return ParseStatus::Success;
8004}
8005
8006ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8007 SMLoc S = getLoc();
8008
8009 if (getTok().isNot(AsmToken::Integer))
8010 return ParseStatus::NoMatch;
8011
8012 if (getLexer().peekTok().isNot(AsmToken::Colon))
8013 return ParseStatus::NoMatch;
8014
8015 const MCExpr *ImmF;
8016 if (getParser().parseExpression(ImmF))
8017 return ParseStatus::NoMatch;
8018
8019 if (getTok().isNot(AsmToken::Colon))
8020 return ParseStatus::NoMatch;
8021
8022 Lex(); // Eat ':'
8023 if (getTok().isNot(AsmToken::Integer))
8024 return ParseStatus::NoMatch;
8025
8026 SMLoc E = getTok().getLoc();
8027 const MCExpr *ImmL;
8028 if (getParser().parseExpression(ImmL))
8029 return ParseStatus::NoMatch;
8030
8031 unsigned ImmFVal = dyn_cast<MCConstantExpr>(ImmF)->getValue();
8032 unsigned ImmLVal = dyn_cast<MCConstantExpr>(ImmL)->getValue();
8033
8034 Operands.push_back(
8035 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8036 return ParseStatus::Success;
8037}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(unsigned ZReg, unsigned Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define check(cond)
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:413
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:410
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1513
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:352
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Context object for machine code objects.
Definition: MCContext.h:76
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:448
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:200
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
bool evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:814
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
unsigned getNumOperands() const
Definition: MCInst.h:208
void setLoc(SMLoc loc)
Definition: MCInst.h:203
unsigned getOpcode() const
Definition: MCInst.h:198
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:162
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual unsigned getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
uint16_t getEncodingValue(MCRegister RegNo) const
Returns the encoding for RegNo.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:212
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:304
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
VariantKind getKind() const
Definition: MCExpr.h:412
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
Target specific streamer interface.
Definition: MCStreamer.h:93
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:236
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:221
iterator find(StringRef Key)
Definition: StringMap.h:234
void erase(iterator I)
Definition: StringMap.h:415
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:307
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:696
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:466
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:605
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:116
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:420
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:585
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:811
std::string lower() const
Definition: StringRef.cpp:111
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:271
static constexpr size_t npos
Definition: StringRef.h:52
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:612
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:170
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:378
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
uint32_t parseGenericRegister(StringRef Name)
const SysReg * lookupSysRegByName(StringRef)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr ArchInfo ARMV8_9A
constexpr ArchInfo ARMV8_3A
constexpr ArchInfo ARMV8_7A
constexpr ArchInfo ARMV8R
constexpr ArchInfo ARMV8_4A
constexpr ArchInfo ARMV9_3A
const ArchInfo * parseArch(StringRef Arch)
constexpr ArchInfo ARMV8_6A
constexpr ArchInfo ARMV8_5A
const ArchInfo * getArchForCpu(StringRef CPU)
constexpr ArchInfo ARMV9_1A
constexpr ArchInfo ARMV9A
constexpr ArchInfo ARMV9_2A
constexpr ArchInfo ARMV9_4A
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr ArchInfo ARMV8_8A
constexpr ArchInfo ARMV8_1A
constexpr ArchInfo ARMV8_2A
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
const CustomOperand< const MCSubtargetInfo & > Msg[]
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1523
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:207
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:31
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1071
@ Offset
Definition: DWP.cpp:456
@ Length
Definition: DWP.cpp:456
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:152
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
static unsigned getXRegFromWReg(unsigned Reg)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
Target & getTheAArch64_32Target()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
MCLOHType
Linker Optimization Hint Type.
static unsigned getWRegFromXReg(unsigned Reg)
Target & getTheARM64Target()
DWARFExpression::Operation Op
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired