LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 MCRegister getDstReg() const { return Dst; }
140 MCRegister getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 MCRegister Dst;
150 MCRegister Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
201
202 bool parseDirectiveVariantPCS(SMLoc L);
203
204 bool parseDirectiveSEHAllocStack(SMLoc L);
205 bool parseDirectiveSEHPrologEnd(SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
209 bool parseDirectiveSEHSaveReg(SMLoc L);
210 bool parseDirectiveSEHSaveRegX(SMLoc L);
211 bool parseDirectiveSEHSaveRegP(SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(SMLoc L);
214 bool parseDirectiveSEHSaveFReg(SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
218 bool parseDirectiveSEHSetFP(SMLoc L);
219 bool parseDirectiveSEHAddFP(SMLoc L);
220 bool parseDirectiveSEHNop(SMLoc L);
221 bool parseDirectiveSEHSaveNext(SMLoc L);
222 bool parseDirectiveSEHEpilogStart(SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(SMLoc L);
224 bool parseDirectiveSEHTrapFrame(SMLoc L);
225 bool parseDirectiveSEHMachineFrame(SMLoc L);
226 bool parseDirectiveSEHContext(SMLoc L);
227 bool parseDirectiveSEHECContext(SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
229 bool parseDirectiveSEHPACSignLR(SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
231
232 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
234 unsigned getNumRegsForRegKind(RegKind K);
235 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
238 bool MatchingInlineAsm) override;
239 /// @name Auto-generated Match Functions
240 /// {
241
242#define GET_ASSEMBLER_HEADER
243#include "AArch64GenAsmMatcher.inc"
244
245 /// }
246
247 ParseStatus tryParseScalarRegister(MCRegister &Reg);
248 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
249 RegKind MatchKind);
250 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
251 ParseStatus tryParseSVCR(OperandVector &Operands);
252 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
253 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
254 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
255 ParseStatus tryParseSysReg(OperandVector &Operands);
256 ParseStatus tryParseSysCROperand(OperandVector &Operands);
257 template <bool IsSVEPrefetch = false>
258 ParseStatus tryParsePrefetch(OperandVector &Operands);
259 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
260 ParseStatus tryParsePSBHint(OperandVector &Operands);
261 ParseStatus tryParseBTIHint(OperandVector &Operands);
262 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
263 ParseStatus tryParseAdrLabel(OperandVector &Operands);
264 template <bool AddFPZeroAsLiteral>
265 ParseStatus tryParseFPImm(OperandVector &Operands);
266 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
267 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
268 bool tryParseNeonVectorRegister(OperandVector &Operands);
269 ParseStatus tryParseVectorIndex(OperandVector &Operands);
270 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
271 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
272 template <bool ParseShiftExtend,
273 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
274 ParseStatus tryParseGPROperand(OperandVector &Operands);
275 ParseStatus tryParseZTOperand(OperandVector &Operands);
276 template <bool ParseShiftExtend, bool ParseSuffix>
277 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
278 template <RegKind RK>
279 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
281 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
282 template <RegKind VectorKind>
283 ParseStatus tryParseVectorList(OperandVector &Operands,
284 bool ExpectMatch = false);
285 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
286 ParseStatus tryParseSVEPattern(OperandVector &Operands);
287 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
288 ParseStatus tryParseGPR64x8(OperandVector &Operands);
289 ParseStatus tryParseImmRange(OperandVector &Operands);
290 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
291 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
292
293public:
294 enum AArch64MatchResultTy {
295 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
296#define GET_OPERAND_DIAGNOSTIC_TYPES
297#include "AArch64GenAsmMatcher.inc"
298 };
299 bool IsILP32;
300 bool IsWindowsArm64EC;
301
302 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
303 const MCInstrInfo &MII, const MCTargetOptions &Options)
304 : MCTargetAsmParser(Options, STI, MII) {
306 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
309 if (S.getTargetStreamer() == nullptr)
311
312 // Alias .hword/.word/.[dx]word to the target-independent
313 // .2byte/.4byte/.8byte directives as they have the same form and
314 // semantics:
315 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
316 Parser.addAliasForDirective(".hword", ".2byte");
317 Parser.addAliasForDirective(".word", ".4byte");
318 Parser.addAliasForDirective(".dword", ".8byte");
319 Parser.addAliasForDirective(".xword", ".8byte");
320
321 // Initialize the set of available features.
322 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
323 }
324
325 bool areEqualRegs(const MCParsedAsmOperand &Op1,
326 const MCParsedAsmOperand &Op2) const override;
328 SMLoc NameLoc, OperandVector &Operands) override;
329 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
331 SMLoc &EndLoc) override;
332 bool ParseDirective(AsmToken DirectiveID) override;
334 unsigned Kind) override;
335
336 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
337
338 static bool classifySymbolRef(const MCExpr *Expr,
339 AArch64MCExpr::VariantKind &ELFRefKind,
340 MCSymbolRefExpr::VariantKind &DarwinRefKind,
341 int64_t &Addend);
342};
343
344/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
345/// instruction.
346class AArch64Operand : public MCParsedAsmOperand {
347private:
348 enum KindTy {
349 k_Immediate,
350 k_ShiftedImm,
351 k_ImmRange,
352 k_CondCode,
353 k_Register,
354 k_MatrixRegister,
355 k_MatrixTileList,
356 k_SVCR,
357 k_VectorList,
358 k_VectorIndex,
359 k_Token,
360 k_SysReg,
361 k_SysCR,
362 k_Prefetch,
363 k_ShiftExtend,
364 k_FPImm,
365 k_Barrier,
366 k_PSBHint,
367 k_PHint,
368 k_BTIHint,
369 } Kind;
370
371 SMLoc StartLoc, EndLoc;
372
373 struct TokOp {
374 const char *Data;
375 unsigned Length;
376 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
377 };
378
379 // Separate shift/extend operand.
380 struct ShiftExtendOp {
382 unsigned Amount;
383 bool HasExplicitAmount;
384 };
385
386 struct RegOp {
387 unsigned RegNum;
388 RegKind Kind;
389 int ElementWidth;
390
391 // The register may be allowed as a different register class,
392 // e.g. for GPR64as32 or GPR32as64.
393 RegConstraintEqualityTy EqualityTy;
394
395 // In some cases the shift/extend needs to be explicitly parsed together
396 // with the register, rather than as a separate operand. This is needed
397 // for addressing modes where the instruction as a whole dictates the
398 // scaling/extend, rather than specific bits in the instruction.
399 // By parsing them as a single operand, we avoid the need to pass an
400 // extra operand in all CodeGen patterns (because all operands need to
401 // have an associated value), and we avoid the need to update TableGen to
402 // accept operands that have no associated bits in the instruction.
403 //
404 // An added benefit of parsing them together is that the assembler
405 // can give a sensible diagnostic if the scaling is not correct.
406 //
407 // The default is 'lsl #0' (HasExplicitAmount = false) if no
408 // ShiftExtend is specified.
409 ShiftExtendOp ShiftExtend;
410 };
411
412 struct MatrixRegOp {
413 unsigned RegNum;
414 unsigned ElementWidth;
415 MatrixKind Kind;
416 };
417
418 struct MatrixTileListOp {
419 unsigned RegMask = 0;
420 };
421
422 struct VectorListOp {
423 unsigned RegNum;
424 unsigned Count;
425 unsigned Stride;
426 unsigned NumElements;
427 unsigned ElementWidth;
428 RegKind RegisterKind;
429 };
430
431 struct VectorIndexOp {
432 int Val;
433 };
434
435 struct ImmOp {
436 const MCExpr *Val;
437 };
438
439 struct ShiftedImmOp {
440 const MCExpr *Val;
441 unsigned ShiftAmount;
442 };
443
444 struct ImmRangeOp {
445 unsigned First;
446 unsigned Last;
447 };
448
449 struct CondCodeOp {
451 };
452
453 struct FPImmOp {
454 uint64_t Val; // APFloat value bitcasted to uint64_t.
455 bool IsExact; // describes whether parsed value was exact.
456 };
457
458 struct BarrierOp {
459 const char *Data;
460 unsigned Length;
461 unsigned Val; // Not the enum since not all values have names.
462 bool HasnXSModifier;
463 };
464
465 struct SysRegOp {
466 const char *Data;
467 unsigned Length;
468 uint32_t MRSReg;
469 uint32_t MSRReg;
470 uint32_t PStateField;
471 };
472
473 struct SysCRImmOp {
474 unsigned Val;
475 };
476
477 struct PrefetchOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct PSBHintOp {
484 const char *Data;
485 unsigned Length;
486 unsigned Val;
487 };
488 struct PHintOp {
489 const char *Data;
490 unsigned Length;
491 unsigned Val;
492 };
493 struct BTIHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498
499 struct SVCROp {
500 const char *Data;
501 unsigned Length;
502 unsigned PStateField;
503 };
504
505 union {
506 struct TokOp Tok;
507 struct RegOp Reg;
508 struct MatrixRegOp MatrixReg;
509 struct MatrixTileListOp MatrixTileList;
510 struct VectorListOp VectorList;
511 struct VectorIndexOp VectorIndex;
512 struct ImmOp Imm;
513 struct ShiftedImmOp ShiftedImm;
514 struct ImmRangeOp ImmRange;
515 struct CondCodeOp CondCode;
516 struct FPImmOp FPImm;
517 struct BarrierOp Barrier;
518 struct SysRegOp SysReg;
519 struct SysCRImmOp SysCRImm;
520 struct PrefetchOp Prefetch;
521 struct PSBHintOp PSBHint;
522 struct PHintOp PHint;
523 struct BTIHintOp BTIHint;
524 struct ShiftExtendOp ShiftExtend;
525 struct SVCROp SVCR;
526 };
527
528 // Keep the MCContext around as the MCExprs may need manipulated during
529 // the add<>Operands() calls.
530 MCContext &Ctx;
531
532public:
533 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
534
535 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
536 Kind = o.Kind;
537 StartLoc = o.StartLoc;
538 EndLoc = o.EndLoc;
539 switch (Kind) {
540 case k_Token:
541 Tok = o.Tok;
542 break;
543 case k_Immediate:
544 Imm = o.Imm;
545 break;
546 case k_ShiftedImm:
547 ShiftedImm = o.ShiftedImm;
548 break;
549 case k_ImmRange:
550 ImmRange = o.ImmRange;
551 break;
552 case k_CondCode:
553 CondCode = o.CondCode;
554 break;
555 case k_FPImm:
556 FPImm = o.FPImm;
557 break;
558 case k_Barrier:
559 Barrier = o.Barrier;
560 break;
561 case k_Register:
562 Reg = o.Reg;
563 break;
564 case k_MatrixRegister:
565 MatrixReg = o.MatrixReg;
566 break;
567 case k_MatrixTileList:
568 MatrixTileList = o.MatrixTileList;
569 break;
570 case k_VectorList:
571 VectorList = o.VectorList;
572 break;
573 case k_VectorIndex:
574 VectorIndex = o.VectorIndex;
575 break;
576 case k_SysReg:
577 SysReg = o.SysReg;
578 break;
579 case k_SysCR:
580 SysCRImm = o.SysCRImm;
581 break;
582 case k_Prefetch:
583 Prefetch = o.Prefetch;
584 break;
585 case k_PSBHint:
586 PSBHint = o.PSBHint;
587 break;
588 case k_PHint:
589 PHint = o.PHint;
590 break;
591 case k_BTIHint:
592 BTIHint = o.BTIHint;
593 break;
594 case k_ShiftExtend:
595 ShiftExtend = o.ShiftExtend;
596 break;
597 case k_SVCR:
598 SVCR = o.SVCR;
599 break;
600 }
601 }
602
603 /// getStartLoc - Get the location of the first token of this operand.
604 SMLoc getStartLoc() const override { return StartLoc; }
605 /// getEndLoc - Get the location of the last token of this operand.
606 SMLoc getEndLoc() const override { return EndLoc; }
607
608 StringRef getToken() const {
609 assert(Kind == k_Token && "Invalid access!");
610 return StringRef(Tok.Data, Tok.Length);
611 }
612
613 bool isTokenSuffix() const {
614 assert(Kind == k_Token && "Invalid access!");
615 return Tok.IsSuffix;
616 }
617
618 const MCExpr *getImm() const {
619 assert(Kind == k_Immediate && "Invalid access!");
620 return Imm.Val;
621 }
622
623 const MCExpr *getShiftedImmVal() const {
624 assert(Kind == k_ShiftedImm && "Invalid access!");
625 return ShiftedImm.Val;
626 }
627
628 unsigned getShiftedImmShift() const {
629 assert(Kind == k_ShiftedImm && "Invalid access!");
630 return ShiftedImm.ShiftAmount;
631 }
632
633 unsigned getFirstImmVal() const {
634 assert(Kind == k_ImmRange && "Invalid access!");
635 return ImmRange.First;
636 }
637
638 unsigned getLastImmVal() const {
639 assert(Kind == k_ImmRange && "Invalid access!");
640 return ImmRange.Last;
641 }
642
644 assert(Kind == k_CondCode && "Invalid access!");
645 return CondCode.Code;
646 }
647
648 APFloat getFPImm() const {
649 assert (Kind == k_FPImm && "Invalid access!");
650 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
651 }
652
653 bool getFPImmIsExact() const {
654 assert (Kind == k_FPImm && "Invalid access!");
655 return FPImm.IsExact;
656 }
657
658 unsigned getBarrier() const {
659 assert(Kind == k_Barrier && "Invalid access!");
660 return Barrier.Val;
661 }
662
663 StringRef getBarrierName() const {
664 assert(Kind == k_Barrier && "Invalid access!");
665 return StringRef(Barrier.Data, Barrier.Length);
666 }
667
668 bool getBarriernXSModifier() const {
669 assert(Kind == k_Barrier && "Invalid access!");
670 return Barrier.HasnXSModifier;
671 }
672
673 MCRegister getReg() const override {
674 assert(Kind == k_Register && "Invalid access!");
675 return Reg.RegNum;
676 }
677
678 unsigned getMatrixReg() const {
679 assert(Kind == k_MatrixRegister && "Invalid access!");
680 return MatrixReg.RegNum;
681 }
682
683 unsigned getMatrixElementWidth() const {
684 assert(Kind == k_MatrixRegister && "Invalid access!");
685 return MatrixReg.ElementWidth;
686 }
687
688 MatrixKind getMatrixKind() const {
689 assert(Kind == k_MatrixRegister && "Invalid access!");
690 return MatrixReg.Kind;
691 }
692
693 unsigned getMatrixTileListRegMask() const {
694 assert(isMatrixTileList() && "Invalid access!");
695 return MatrixTileList.RegMask;
696 }
697
698 RegConstraintEqualityTy getRegEqualityTy() const {
699 assert(Kind == k_Register && "Invalid access!");
700 return Reg.EqualityTy;
701 }
702
703 unsigned getVectorListStart() const {
704 assert(Kind == k_VectorList && "Invalid access!");
705 return VectorList.RegNum;
706 }
707
708 unsigned getVectorListCount() const {
709 assert(Kind == k_VectorList && "Invalid access!");
710 return VectorList.Count;
711 }
712
713 unsigned getVectorListStride() const {
714 assert(Kind == k_VectorList && "Invalid access!");
715 return VectorList.Stride;
716 }
717
718 int getVectorIndex() const {
719 assert(Kind == k_VectorIndex && "Invalid access!");
720 return VectorIndex.Val;
721 }
722
723 StringRef getSysReg() const {
724 assert(Kind == k_SysReg && "Invalid access!");
725 return StringRef(SysReg.Data, SysReg.Length);
726 }
727
728 unsigned getSysCR() const {
729 assert(Kind == k_SysCR && "Invalid access!");
730 return SysCRImm.Val;
731 }
732
733 unsigned getPrefetch() const {
734 assert(Kind == k_Prefetch && "Invalid access!");
735 return Prefetch.Val;
736 }
737
738 unsigned getPSBHint() const {
739 assert(Kind == k_PSBHint && "Invalid access!");
740 return PSBHint.Val;
741 }
742
743 unsigned getPHint() const {
744 assert(Kind == k_PHint && "Invalid access!");
745 return PHint.Val;
746 }
747
748 StringRef getPSBHintName() const {
749 assert(Kind == k_PSBHint && "Invalid access!");
750 return StringRef(PSBHint.Data, PSBHint.Length);
751 }
752
753 StringRef getPHintName() const {
754 assert(Kind == k_PHint && "Invalid access!");
755 return StringRef(PHint.Data, PHint.Length);
756 }
757
758 unsigned getBTIHint() const {
759 assert(Kind == k_BTIHint && "Invalid access!");
760 return BTIHint.Val;
761 }
762
763 StringRef getBTIHintName() const {
764 assert(Kind == k_BTIHint && "Invalid access!");
765 return StringRef(BTIHint.Data, BTIHint.Length);
766 }
767
768 StringRef getSVCR() const {
769 assert(Kind == k_SVCR && "Invalid access!");
770 return StringRef(SVCR.Data, SVCR.Length);
771 }
772
773 StringRef getPrefetchName() const {
774 assert(Kind == k_Prefetch && "Invalid access!");
775 return StringRef(Prefetch.Data, Prefetch.Length);
776 }
777
778 AArch64_AM::ShiftExtendType getShiftExtendType() const {
779 if (Kind == k_ShiftExtend)
780 return ShiftExtend.Type;
781 if (Kind == k_Register)
782 return Reg.ShiftExtend.Type;
783 llvm_unreachable("Invalid access!");
784 }
785
786 unsigned getShiftExtendAmount() const {
787 if (Kind == k_ShiftExtend)
788 return ShiftExtend.Amount;
789 if (Kind == k_Register)
790 return Reg.ShiftExtend.Amount;
791 llvm_unreachable("Invalid access!");
792 }
793
794 bool hasShiftExtendAmount() const {
795 if (Kind == k_ShiftExtend)
796 return ShiftExtend.HasExplicitAmount;
797 if (Kind == k_Register)
798 return Reg.ShiftExtend.HasExplicitAmount;
799 llvm_unreachable("Invalid access!");
800 }
801
802 bool isImm() const override { return Kind == k_Immediate; }
803 bool isMem() const override { return false; }
804
805 bool isUImm6() const {
806 if (!isImm())
807 return false;
808 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
809 if (!MCE)
810 return false;
811 int64_t Val = MCE->getValue();
812 return (Val >= 0 && Val < 64);
813 }
814
815 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
816
817 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
818 return isImmScaled<Bits, Scale>(true);
819 }
820
821 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
822 DiagnosticPredicate isUImmScaled() const {
823 if (IsRange && isImmRange() &&
824 (getLastImmVal() != getFirstImmVal() + Offset))
825 return DiagnosticPredicateTy::NoMatch;
826
827 return isImmScaled<Bits, Scale, IsRange>(false);
828 }
829
830 template <int Bits, int Scale, bool IsRange = false>
831 DiagnosticPredicate isImmScaled(bool Signed) const {
832 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
833 (isImmRange() && !IsRange))
834 return DiagnosticPredicateTy::NoMatch;
835
836 int64_t Val;
837 if (isImmRange())
838 Val = getFirstImmVal();
839 else {
840 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
841 if (!MCE)
842 return DiagnosticPredicateTy::NoMatch;
843 Val = MCE->getValue();
844 }
845
846 int64_t MinVal, MaxVal;
847 if (Signed) {
848 int64_t Shift = Bits - 1;
849 MinVal = (int64_t(1) << Shift) * -Scale;
850 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
851 } else {
852 MinVal = 0;
853 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
854 }
855
856 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
857 return DiagnosticPredicateTy::Match;
858
859 return DiagnosticPredicateTy::NearMatch;
860 }
861
862 DiagnosticPredicate isSVEPattern() const {
863 if (!isImm())
864 return DiagnosticPredicateTy::NoMatch;
865 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
866 if (!MCE)
867 return DiagnosticPredicateTy::NoMatch;
868 int64_t Val = MCE->getValue();
869 if (Val >= 0 && Val < 32)
870 return DiagnosticPredicateTy::Match;
871 return DiagnosticPredicateTy::NearMatch;
872 }
873
874 DiagnosticPredicate isSVEVecLenSpecifier() const {
875 if (!isImm())
876 return DiagnosticPredicateTy::NoMatch;
877 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
878 if (!MCE)
879 return DiagnosticPredicateTy::NoMatch;
880 int64_t Val = MCE->getValue();
881 if (Val >= 0 && Val <= 1)
882 return DiagnosticPredicateTy::Match;
883 return DiagnosticPredicateTy::NearMatch;
884 }
885
886 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
888 MCSymbolRefExpr::VariantKind DarwinRefKind;
889 int64_t Addend;
890 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
891 Addend)) {
892 // If we don't understand the expression, assume the best and
893 // let the fixup and relocation code deal with it.
894 return true;
895 }
896
897 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
898 ELFRefKind == AArch64MCExpr::VK_LO12 ||
899 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
900 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
901 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
902 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
903 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
904 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
906 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
908 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
909 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
910 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
911 // Note that we don't range-check the addend. It's adjusted modulo page
912 // size when converted, so there is no "out of range" condition when using
913 // @pageoff.
914 return true;
915 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
916 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
917 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
918 return Addend == 0;
919 }
920
921 return false;
922 }
923
924 template <int Scale> bool isUImm12Offset() const {
925 if (!isImm())
926 return false;
927
928 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
929 if (!MCE)
930 return isSymbolicUImm12Offset(getImm());
931
932 int64_t Val = MCE->getValue();
933 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
934 }
935
936 template <int N, int M>
937 bool isImmInRange() const {
938 if (!isImm())
939 return false;
940 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941 if (!MCE)
942 return false;
943 int64_t Val = MCE->getValue();
944 return (Val >= N && Val <= M);
945 }
946
947 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
948 // a logical immediate can always be represented when inverted.
949 template <typename T>
950 bool isLogicalImm() const {
951 if (!isImm())
952 return false;
953 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
954 if (!MCE)
955 return false;
956
957 int64_t Val = MCE->getValue();
958 // Avoid left shift by 64 directly.
959 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
960 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
961 if ((Val & Upper) && (Val & Upper) != Upper)
962 return false;
963
964 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
965 }
966
967 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
968
969 bool isImmRange() const { return Kind == k_ImmRange; }
970
971 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
972 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
973 /// immediate that can be shifted by 'Shift'.
974 template <unsigned Width>
975 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
976 if (isShiftedImm() && Width == getShiftedImmShift())
977 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
978 return std::make_pair(CE->getValue(), Width);
979
980 if (isImm())
981 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
982 int64_t Val = CE->getValue();
983 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
984 return std::make_pair(Val >> Width, Width);
985 else
986 return std::make_pair(Val, 0u);
987 }
988
989 return {};
990 }
991
992 bool isAddSubImm() const {
993 if (!isShiftedImm() && !isImm())
994 return false;
995
996 const MCExpr *Expr;
997
998 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
999 if (isShiftedImm()) {
1000 unsigned Shift = ShiftedImm.ShiftAmount;
1001 Expr = ShiftedImm.Val;
1002 if (Shift != 0 && Shift != 12)
1003 return false;
1004 } else {
1005 Expr = getImm();
1006 }
1007
1008 AArch64MCExpr::VariantKind ELFRefKind;
1009 MCSymbolRefExpr::VariantKind DarwinRefKind;
1010 int64_t Addend;
1011 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1012 DarwinRefKind, Addend)) {
1013 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1014 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
1015 (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
1016 ELFRefKind == AArch64MCExpr::VK_LO12 ||
1017 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
1018 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
1019 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
1020 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
1021 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
1022 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
1023 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
1024 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
1026 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
1027 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1028 }
1029
1030 // If it's a constant, it should be a real immediate in range.
1031 if (auto ShiftedVal = getShiftedVal<12>())
1032 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1033
1034 // If it's an expression, we hope for the best and let the fixup/relocation
1035 // code deal with it.
1036 return true;
1037 }
1038
1039 bool isAddSubImmNeg() const {
1040 if (!isShiftedImm() && !isImm())
1041 return false;
1042
1043 // Otherwise it should be a real negative immediate in range.
1044 if (auto ShiftedVal = getShiftedVal<12>())
1045 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1046
1047 return false;
1048 }
1049
1050 // Signed value in the range -128 to +127. For element widths of
1051 // 16 bits or higher it may also be a signed multiple of 256 in the
1052 // range -32768 to +32512.
1053 // For element-width of 8 bits a range of -128 to 255 is accepted,
1054 // since a copy of a byte can be either signed/unsigned.
1055 template <typename T>
1056 DiagnosticPredicate isSVECpyImm() const {
1057 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1058 return DiagnosticPredicateTy::NoMatch;
1059
1060 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1061 std::is_same<int8_t, T>::value;
1062 if (auto ShiftedImm = getShiftedVal<8>())
1063 if (!(IsByte && ShiftedImm->second) &&
1064 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1065 << ShiftedImm->second))
1066 return DiagnosticPredicateTy::Match;
1067
1068 return DiagnosticPredicateTy::NearMatch;
1069 }
1070
1071 // Unsigned value in the range 0 to 255. For element widths of
1072 // 16 bits or higher it may also be a signed multiple of 256 in the
1073 // range 0 to 65280.
1074 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1075 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1076 return DiagnosticPredicateTy::NoMatch;
1077
1078 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1079 std::is_same<int8_t, T>::value;
1080 if (auto ShiftedImm = getShiftedVal<8>())
1081 if (!(IsByte && ShiftedImm->second) &&
1082 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1083 << ShiftedImm->second))
1084 return DiagnosticPredicateTy::Match;
1085
1086 return DiagnosticPredicateTy::NearMatch;
1087 }
1088
1089 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1090 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1091 return DiagnosticPredicateTy::Match;
1092 return DiagnosticPredicateTy::NoMatch;
1093 }
1094
1095 bool isCondCode() const { return Kind == k_CondCode; }
1096
1097 bool isSIMDImmType10() const {
1098 if (!isImm())
1099 return false;
1100 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1101 if (!MCE)
1102 return false;
1104 }
1105
1106 template<int N>
1107 bool isBranchTarget() const {
1108 if (!isImm())
1109 return false;
1110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1111 if (!MCE)
1112 return true;
1113 int64_t Val = MCE->getValue();
1114 if (Val & 0x3)
1115 return false;
1116 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1117 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1118 }
1119
1120 bool
1121 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1122 if (!isImm())
1123 return false;
1124
1125 AArch64MCExpr::VariantKind ELFRefKind;
1126 MCSymbolRefExpr::VariantKind DarwinRefKind;
1127 int64_t Addend;
1128 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1129 DarwinRefKind, Addend)) {
1130 return false;
1131 }
1132 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1133 return false;
1134
1135 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1136 }
1137
1138 bool isMovWSymbolG3() const {
1140 }
1141
1142 bool isMovWSymbolG2() const {
1143 return isMovWSymbol(
1148 }
1149
1150 bool isMovWSymbolG1() const {
1151 return isMovWSymbol(
1157 }
1158
1159 bool isMovWSymbolG0() const {
1160 return isMovWSymbol(
1166 }
1167
1168 template<int RegWidth, int Shift>
1169 bool isMOVZMovAlias() const {
1170 if (!isImm()) return false;
1171
1172 const MCExpr *E = getImm();
1173 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1174 uint64_t Value = CE->getValue();
1175
1176 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1177 }
1178 // Only supports the case of Shift being 0 if an expression is used as an
1179 // operand
1180 return !Shift && E;
1181 }
1182
1183 template<int RegWidth, int Shift>
1184 bool isMOVNMovAlias() const {
1185 if (!isImm()) return false;
1186
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188 if (!CE) return false;
1189 uint64_t Value = CE->getValue();
1190
1191 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1192 }
1193
1194 bool isFPImm() const {
1195 return Kind == k_FPImm &&
1196 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1197 }
1198
1199 bool isBarrier() const {
1200 return Kind == k_Barrier && !getBarriernXSModifier();
1201 }
1202 bool isBarriernXS() const {
1203 return Kind == k_Barrier && getBarriernXSModifier();
1204 }
1205 bool isSysReg() const { return Kind == k_SysReg; }
1206
1207 bool isMRSSystemRegister() const {
1208 if (!isSysReg()) return false;
1209
1210 return SysReg.MRSReg != -1U;
1211 }
1212
1213 bool isMSRSystemRegister() const {
1214 if (!isSysReg()) return false;
1215 return SysReg.MSRReg != -1U;
1216 }
1217
1218 bool isSystemPStateFieldWithImm0_1() const {
1219 if (!isSysReg()) return false;
1220 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1221 }
1222
1223 bool isSystemPStateFieldWithImm0_15() const {
1224 if (!isSysReg())
1225 return false;
1226 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1227 }
1228
1229 bool isSVCR() const {
1230 if (Kind != k_SVCR)
1231 return false;
1232 return SVCR.PStateField != -1U;
1233 }
1234
1235 bool isReg() const override {
1236 return Kind == k_Register;
1237 }
1238
1239 bool isVectorList() const { return Kind == k_VectorList; }
1240
1241 bool isScalarReg() const {
1242 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1243 }
1244
1245 bool isNeonVectorReg() const {
1246 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1247 }
1248
1249 bool isNeonVectorRegLo() const {
1250 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1251 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1252 Reg.RegNum) ||
1253 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1254 Reg.RegNum));
1255 }
1256
1257 bool isNeonVectorReg0to7() const {
1258 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1259 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1260 Reg.RegNum));
1261 }
1262
1263 bool isMatrix() const { return Kind == k_MatrixRegister; }
1264 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1265
1266 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1267 RegKind RK;
1268 switch (Class) {
1269 case AArch64::PPRRegClassID:
1270 case AArch64::PPR_3bRegClassID:
1271 case AArch64::PPR_p8to15RegClassID:
1272 case AArch64::PNRRegClassID:
1273 case AArch64::PNR_p8to15RegClassID:
1274 case AArch64::PPRorPNRRegClassID:
1275 RK = RegKind::SVEPredicateAsCounter;
1276 break;
1277 default:
1278 llvm_unreachable("Unsupport register class");
1279 }
1280
1281 return (Kind == k_Register && Reg.Kind == RK) &&
1282 AArch64MCRegisterClasses[Class].contains(getReg());
1283 }
1284
1285 template <unsigned Class> bool isSVEVectorReg() const {
1286 RegKind RK;
1287 switch (Class) {
1288 case AArch64::ZPRRegClassID:
1289 case AArch64::ZPR_3bRegClassID:
1290 case AArch64::ZPR_4bRegClassID:
1291 case AArch64::ZPRMul2_LoRegClassID:
1292 case AArch64::ZPRMul2_HiRegClassID:
1293 case AArch64::ZPR_KRegClassID:
1294 RK = RegKind::SVEDataVector;
1295 break;
1296 case AArch64::PPRRegClassID:
1297 case AArch64::PPR_3bRegClassID:
1298 case AArch64::PPR_p8to15RegClassID:
1299 case AArch64::PNRRegClassID:
1300 case AArch64::PNR_p8to15RegClassID:
1301 case AArch64::PPRorPNRRegClassID:
1302 RK = RegKind::SVEPredicateVector;
1303 break;
1304 default:
1305 llvm_unreachable("Unsupport register class");
1306 }
1307
1308 return (Kind == k_Register && Reg.Kind == RK) &&
1309 AArch64MCRegisterClasses[Class].contains(getReg());
1310 }
1311
1312 template <unsigned Class> bool isFPRasZPR() const {
1313 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1314 AArch64MCRegisterClasses[Class].contains(getReg());
1315 }
1316
1317 template <int ElementWidth, unsigned Class>
1318 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1319 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1320 return DiagnosticPredicateTy::NoMatch;
1321
1322 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1323 return DiagnosticPredicateTy::Match;
1324
1325 return DiagnosticPredicateTy::NearMatch;
1326 }
1327
1328 template <int ElementWidth, unsigned Class>
1329 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1330 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1331 Reg.Kind != RegKind::SVEPredicateVector))
1332 return DiagnosticPredicateTy::NoMatch;
1333
1334 if ((isSVEPredicateAsCounterReg<Class>() ||
1335 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1336 Reg.ElementWidth == ElementWidth)
1337 return DiagnosticPredicateTy::Match;
1338
1339 return DiagnosticPredicateTy::NearMatch;
1340 }
1341
1342 template <int ElementWidth, unsigned Class>
1343 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1344 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1345 return DiagnosticPredicateTy::NoMatch;
1346
1347 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1348 return DiagnosticPredicateTy::Match;
1349
1350 return DiagnosticPredicateTy::NearMatch;
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1356 return DiagnosticPredicateTy::NoMatch;
1357
1358 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1359 return DiagnosticPredicateTy::Match;
1360
1361 return DiagnosticPredicateTy::NearMatch;
1362 }
1363
1364 template <int ElementWidth, unsigned Class,
1365 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1366 bool ShiftWidthAlwaysSame>
1367 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1368 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1369 if (!VectorMatch.isMatch())
1370 return DiagnosticPredicateTy::NoMatch;
1371
1372 // Give a more specific diagnostic when the user has explicitly typed in
1373 // a shift-amount that does not match what is expected, but for which
1374 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1375 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1376 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1377 ShiftExtendTy == AArch64_AM::SXTW) &&
1378 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1379 return DiagnosticPredicateTy::NoMatch;
1380
1381 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1382 return DiagnosticPredicateTy::Match;
1383
1384 return DiagnosticPredicateTy::NearMatch;
1385 }
1386
1387 bool isGPR32as64() const {
1388 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1389 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1390 }
1391
1392 bool isGPR64as32() const {
1393 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1394 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1395 }
1396
1397 bool isGPR64x8() const {
1398 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1399 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1400 Reg.RegNum);
1401 }
1402
1403 bool isWSeqPair() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1406 Reg.RegNum);
1407 }
1408
1409 bool isXSeqPair() const {
1410 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1411 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1412 Reg.RegNum);
1413 }
1414
1415 bool isSyspXzrPair() const {
1416 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1417 }
1418
1419 template<int64_t Angle, int64_t Remainder>
1420 DiagnosticPredicate isComplexRotation() const {
1421 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1422
1423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1424 if (!CE) return DiagnosticPredicateTy::NoMatch;
1425 uint64_t Value = CE->getValue();
1426
1427 if (Value % Angle == Remainder && Value <= 270)
1428 return DiagnosticPredicateTy::Match;
1429 return DiagnosticPredicateTy::NearMatch;
1430 }
1431
1432 template <unsigned RegClassID> bool isGPR64() const {
1433 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1434 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1435 }
1436
1437 template <unsigned RegClassID, int ExtWidth>
1438 DiagnosticPredicate isGPR64WithShiftExtend() const {
1439 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1440 return DiagnosticPredicateTy::NoMatch;
1441
1442 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1443 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1444 return DiagnosticPredicateTy::Match;
1445 return DiagnosticPredicateTy::NearMatch;
1446 }
1447
1448 /// Is this a vector list with the type implicit (presumably attached to the
1449 /// instruction itself)?
1450 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1451 bool isImplicitlyTypedVectorList() const {
1452 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1453 VectorList.NumElements == 0 &&
1454 VectorList.RegisterKind == VectorKind &&
1455 (!IsConsecutive || (VectorList.Stride == 1));
1456 }
1457
1458 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1459 unsigned ElementWidth, unsigned Stride = 1>
1460 bool isTypedVectorList() const {
1461 if (Kind != k_VectorList)
1462 return false;
1463 if (VectorList.Count != NumRegs)
1464 return false;
1465 if (VectorList.RegisterKind != VectorKind)
1466 return false;
1467 if (VectorList.ElementWidth != ElementWidth)
1468 return false;
1469 if (VectorList.Stride != Stride)
1470 return false;
1471 return VectorList.NumElements == NumElements;
1472 }
1473
1474 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1475 unsigned ElementWidth, unsigned RegClass>
1476 DiagnosticPredicate isTypedVectorListMultiple() const {
1477 bool Res =
1478 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1479 if (!Res)
1480 return DiagnosticPredicateTy::NoMatch;
1481 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1482 return DiagnosticPredicateTy::NearMatch;
1483 return DiagnosticPredicateTy::Match;
1484 }
1485
1486 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1487 unsigned ElementWidth>
1488 DiagnosticPredicate isTypedVectorListStrided() const {
1489 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1490 ElementWidth, Stride>();
1491 if (!Res)
1492 return DiagnosticPredicateTy::NoMatch;
1493 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1494 ((VectorList.RegNum >= AArch64::Z16) &&
1495 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1496 return DiagnosticPredicateTy::Match;
1497 return DiagnosticPredicateTy::NoMatch;
1498 }
1499
1500 template <int Min, int Max>
1501 DiagnosticPredicate isVectorIndex() const {
1502 if (Kind != k_VectorIndex)
1503 return DiagnosticPredicateTy::NoMatch;
1504 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1505 return DiagnosticPredicateTy::Match;
1506 return DiagnosticPredicateTy::NearMatch;
1507 }
1508
1509 bool isToken() const override { return Kind == k_Token; }
1510
1511 bool isTokenEqual(StringRef Str) const {
1512 return Kind == k_Token && getToken() == Str;
1513 }
1514 bool isSysCR() const { return Kind == k_SysCR; }
1515 bool isPrefetch() const { return Kind == k_Prefetch; }
1516 bool isPSBHint() const { return Kind == k_PSBHint; }
1517 bool isPHint() const { return Kind == k_PHint; }
1518 bool isBTIHint() const { return Kind == k_BTIHint; }
1519 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1520 bool isShifter() const {
1521 if (!isShiftExtend())
1522 return false;
1523
1524 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1525 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1526 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1527 ST == AArch64_AM::MSL);
1528 }
1529
1530 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1531 if (Kind != k_FPImm)
1532 return DiagnosticPredicateTy::NoMatch;
1533
1534 if (getFPImmIsExact()) {
1535 // Lookup the immediate from table of supported immediates.
1536 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1537 assert(Desc && "Unknown enum value");
1538
1539 // Calculate its FP value.
1540 APFloat RealVal(APFloat::IEEEdouble());
1541 auto StatusOrErr =
1542 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1543 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1544 llvm_unreachable("FP immediate is not exact");
1545
1546 if (getFPImm().bitwiseIsEqual(RealVal))
1547 return DiagnosticPredicateTy::Match;
1548 }
1549
1550 return DiagnosticPredicateTy::NearMatch;
1551 }
1552
1553 template <unsigned ImmA, unsigned ImmB>
1554 DiagnosticPredicate isExactFPImm() const {
1555 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1556 if ((Res = isExactFPImm<ImmA>()))
1557 return DiagnosticPredicateTy::Match;
1558 if ((Res = isExactFPImm<ImmB>()))
1559 return DiagnosticPredicateTy::Match;
1560 return Res;
1561 }
1562
1563 bool isExtend() const {
1564 if (!isShiftExtend())
1565 return false;
1566
1567 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1568 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1569 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1570 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1571 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1572 ET == AArch64_AM::LSL) &&
1573 getShiftExtendAmount() <= 4;
1574 }
1575
1576 bool isExtend64() const {
1577 if (!isExtend())
1578 return false;
1579 // Make sure the extend expects a 32-bit source register.
1580 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1581 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1582 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1583 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1584 }
1585
1586 bool isExtendLSL64() const {
1587 if (!isExtend())
1588 return false;
1589 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1590 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1591 ET == AArch64_AM::LSL) &&
1592 getShiftExtendAmount() <= 4;
1593 }
1594
1595 bool isLSLImm3Shift() const {
1596 if (!isShiftExtend())
1597 return false;
1598 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1599 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1600 }
1601
1602 template<int Width> bool isMemXExtend() const {
1603 if (!isExtend())
1604 return false;
1605 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1606 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1607 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1608 getShiftExtendAmount() == 0);
1609 }
1610
1611 template<int Width> bool isMemWExtend() const {
1612 if (!isExtend())
1613 return false;
1614 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1615 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1616 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1617 getShiftExtendAmount() == 0);
1618 }
1619
1620 template <unsigned width>
1621 bool isArithmeticShifter() const {
1622 if (!isShifter())
1623 return false;
1624
1625 // An arithmetic shifter is LSL, LSR, or ASR.
1626 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1627 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1628 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1629 }
1630
1631 template <unsigned width>
1632 bool isLogicalShifter() const {
1633 if (!isShifter())
1634 return false;
1635
1636 // A logical shifter is LSL, LSR, ASR or ROR.
1637 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1638 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1639 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1640 getShiftExtendAmount() < width;
1641 }
1642
1643 bool isMovImm32Shifter() const {
1644 if (!isShifter())
1645 return false;
1646
1647 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1648 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1649 if (ST != AArch64_AM::LSL)
1650 return false;
1651 uint64_t Val = getShiftExtendAmount();
1652 return (Val == 0 || Val == 16);
1653 }
1654
1655 bool isMovImm64Shifter() const {
1656 if (!isShifter())
1657 return false;
1658
1659 // A MOVi shifter is LSL of 0 or 16.
1660 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1661 if (ST != AArch64_AM::LSL)
1662 return false;
1663 uint64_t Val = getShiftExtendAmount();
1664 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1665 }
1666
1667 bool isLogicalVecShifter() const {
1668 if (!isShifter())
1669 return false;
1670
1671 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1672 unsigned Shift = getShiftExtendAmount();
1673 return getShiftExtendType() == AArch64_AM::LSL &&
1674 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1675 }
1676
1677 bool isLogicalVecHalfWordShifter() const {
1678 if (!isLogicalVecShifter())
1679 return false;
1680
1681 // A logical vector shifter is a left shift by 0 or 8.
1682 unsigned Shift = getShiftExtendAmount();
1683 return getShiftExtendType() == AArch64_AM::LSL &&
1684 (Shift == 0 || Shift == 8);
1685 }
1686
1687 bool isMoveVecShifter() const {
1688 if (!isShiftExtend())
1689 return false;
1690
1691 // A logical vector shifter is a left shift by 8 or 16.
1692 unsigned Shift = getShiftExtendAmount();
1693 return getShiftExtendType() == AArch64_AM::MSL &&
1694 (Shift == 8 || Shift == 16);
1695 }
1696
1697 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1698 // to LDUR/STUR when the offset is not legal for the former but is for
1699 // the latter. As such, in addition to checking for being a legal unscaled
1700 // address, also check that it is not a legal scaled address. This avoids
1701 // ambiguity in the matcher.
1702 template<int Width>
1703 bool isSImm9OffsetFB() const {
1704 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1705 }
1706
1707 bool isAdrpLabel() const {
1708 // Validation was handled during parsing, so we just verify that
1709 // something didn't go haywire.
1710 if (!isImm())
1711 return false;
1712
1713 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1714 int64_t Val = CE->getValue();
1715 int64_t Min = - (4096 * (1LL << (21 - 1)));
1716 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1717 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1718 }
1719
1720 return true;
1721 }
1722
1723 bool isAdrLabel() const {
1724 // Validation was handled during parsing, so we just verify that
1725 // something didn't go haywire.
1726 if (!isImm())
1727 return false;
1728
1729 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1730 int64_t Val = CE->getValue();
1731 int64_t Min = - (1LL << (21 - 1));
1732 int64_t Max = ((1LL << (21 - 1)) - 1);
1733 return Val >= Min && Val <= Max;
1734 }
1735
1736 return true;
1737 }
1738
1739 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1740 DiagnosticPredicate isMatrixRegOperand() const {
1741 if (!isMatrix())
1742 return DiagnosticPredicateTy::NoMatch;
1743 if (getMatrixKind() != Kind ||
1744 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1745 EltSize != getMatrixElementWidth())
1746 return DiagnosticPredicateTy::NearMatch;
1747 return DiagnosticPredicateTy::Match;
1748 }
1749
1750 bool isPAuthPCRelLabel16Operand() const {
1751 // PAuth PCRel16 operands are similar to regular branch targets, but only
1752 // negative values are allowed for concrete immediates as signing instr
1753 // should be in a lower address.
1754 if (!isImm())
1755 return false;
1756 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1757 if (!MCE)
1758 return true;
1759 int64_t Val = MCE->getValue();
1760 if (Val & 0b11)
1761 return false;
1762 return (Val <= 0) && (Val > -(1 << 18));
1763 }
1764
1765 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1766 // Add as immediates when possible. Null MCExpr = 0.
1767 if (!Expr)
1769 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1770 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1771 else
1773 }
1774
1775 void addRegOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!");
1778 }
1779
1780 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1783 }
1784
1785 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1786 assert(N == 1 && "Invalid number of operands!");
1787 assert(
1788 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1789
1790 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1791 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1792 RI->getEncodingValue(getReg()));
1793
1795 }
1796
1797 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1798 assert(N == 1 && "Invalid number of operands!");
1799 assert(
1800 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1801
1802 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1803 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1804 RI->getEncodingValue(getReg()));
1805
1807 }
1808
1809 template <int Width>
1810 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1811 unsigned Base;
1812 switch (Width) {
1813 case 8: Base = AArch64::B0; break;
1814 case 16: Base = AArch64::H0; break;
1815 case 32: Base = AArch64::S0; break;
1816 case 64: Base = AArch64::D0; break;
1817 case 128: Base = AArch64::Q0; break;
1818 default:
1819 llvm_unreachable("Unsupported width");
1820 }
1821 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1822 }
1823
1824 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1825 assert(N == 1 && "Invalid number of operands!");
1826 unsigned Reg = getReg();
1827 // Normalise to PPR
1828 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1829 Reg = Reg - AArch64::PN0 + AArch64::P0;
1831 }
1832
1833 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1834 assert(N == 1 && "Invalid number of operands!");
1835 Inst.addOperand(
1836 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1837 }
1838
1839 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!");
1841 assert(
1842 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1843 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1844 }
1845
1846 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1847 assert(N == 1 && "Invalid number of operands!");
1848 assert(
1849 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1851 }
1852
1853 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!");
1856 }
1857
1858 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1859 assert(N == 1 && "Invalid number of operands!");
1861 }
1862
1863 enum VecListIndexType {
1864 VecListIdx_DReg = 0,
1865 VecListIdx_QReg = 1,
1866 VecListIdx_ZReg = 2,
1867 VecListIdx_PReg = 3,
1868 };
1869
1870 template <VecListIndexType RegTy, unsigned NumRegs,
1871 bool IsConsecutive = false>
1872 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1873 assert(N == 1 && "Invalid number of operands!");
1874 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1875 "Expected consecutive registers");
1876 static const unsigned FirstRegs[][5] = {
1877 /* DReg */ { AArch64::Q0,
1878 AArch64::D0, AArch64::D0_D1,
1879 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1880 /* QReg */ { AArch64::Q0,
1881 AArch64::Q0, AArch64::Q0_Q1,
1882 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1883 /* ZReg */ { AArch64::Z0,
1884 AArch64::Z0, AArch64::Z0_Z1,
1885 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1886 /* PReg */ { AArch64::P0,
1887 AArch64::P0, AArch64::P0_P1 }
1888 };
1889
1890 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1891 " NumRegs must be <= 4 for ZRegs");
1892
1893 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1894 " NumRegs must be <= 2 for PRegs");
1895
1896 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1897 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1898 FirstRegs[(unsigned)RegTy][0]));
1899 }
1900
1901 template <unsigned NumRegs>
1902 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1903 assert(N == 1 && "Invalid number of operands!");
1904 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1905
1906 switch (NumRegs) {
1907 case 2:
1908 if (getVectorListStart() < AArch64::Z16) {
1909 assert((getVectorListStart() < AArch64::Z8) &&
1910 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1912 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1913 } else {
1914 assert((getVectorListStart() < AArch64::Z24) &&
1915 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1917 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1918 }
1919 break;
1920 case 4:
1921 if (getVectorListStart() < AArch64::Z16) {
1922 assert((getVectorListStart() < AArch64::Z4) &&
1923 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1925 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1926 } else {
1927 assert((getVectorListStart() < AArch64::Z20) &&
1928 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1930 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1931 }
1932 break;
1933 default:
1934 llvm_unreachable("Unsupported number of registers for strided vec list");
1935 }
1936 }
1937
1938 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1939 assert(N == 1 && "Invalid number of operands!");
1940 unsigned RegMask = getMatrixTileListRegMask();
1941 assert(RegMask <= 0xFF && "Invalid mask!");
1942 Inst.addOperand(MCOperand::createImm(RegMask));
1943 }
1944
1945 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1946 assert(N == 1 && "Invalid number of operands!");
1947 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1948 }
1949
1950 template <unsigned ImmIs0, unsigned ImmIs1>
1951 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1952 assert(N == 1 && "Invalid number of operands!");
1953 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1954 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1955 }
1956
1957 void addImmOperands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!");
1959 // If this is a pageoff symrefexpr with an addend, adjust the addend
1960 // to be only the page-offset portion. Otherwise, just add the expr
1961 // as-is.
1962 addExpr(Inst, getImm());
1963 }
1964
1965 template <int Shift>
1966 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1967 assert(N == 2 && "Invalid number of operands!");
1968 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1969 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1970 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1971 } else if (isShiftedImm()) {
1972 addExpr(Inst, getShiftedImmVal());
1973 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1974 } else {
1975 addExpr(Inst, getImm());
1977 }
1978 }
1979
1980 template <int Shift>
1981 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1982 assert(N == 2 && "Invalid number of operands!");
1983 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1984 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1985 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1986 } else
1987 llvm_unreachable("Not a shifted negative immediate");
1988 }
1989
1990 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1991 assert(N == 1 && "Invalid number of operands!");
1993 }
1994
1995 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1996 assert(N == 1 && "Invalid number of operands!");
1997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1998 if (!MCE)
1999 addExpr(Inst, getImm());
2000 else
2001 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2002 }
2003
2004 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2005 addImmOperands(Inst, N);
2006 }
2007
2008 template<int Scale>
2009 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2010 assert(N == 1 && "Invalid number of operands!");
2011 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2012
2013 if (!MCE) {
2014 Inst.addOperand(MCOperand::createExpr(getImm()));
2015 return;
2016 }
2017 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2018 }
2019
2020 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2021 assert(N == 1 && "Invalid number of operands!");
2022 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2024 }
2025
2026 template <int Scale>
2027 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2028 assert(N == 1 && "Invalid number of operands!");
2029 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2030 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2031 }
2032
2033 template <int Scale>
2034 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2035 assert(N == 1 && "Invalid number of operands!");
2036 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2037 }
2038
2039 template <typename T>
2040 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2041 assert(N == 1 && "Invalid number of operands!");
2042 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2043 std::make_unsigned_t<T> Val = MCE->getValue();
2044 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2045 Inst.addOperand(MCOperand::createImm(encoding));
2046 }
2047
2048 template <typename T>
2049 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2052 std::make_unsigned_t<T> Val = ~MCE->getValue();
2053 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2054 Inst.addOperand(MCOperand::createImm(encoding));
2055 }
2056
2057 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2058 assert(N == 1 && "Invalid number of operands!");
2059 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2061 Inst.addOperand(MCOperand::createImm(encoding));
2062 }
2063
2064 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2065 // Branch operands don't encode the low bits, so shift them off
2066 // here. If it's a label, however, just put it on directly as there's
2067 // not enough information now to do anything.
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2070 if (!MCE) {
2071 addExpr(Inst, getImm());
2072 return;
2073 }
2074 assert(MCE && "Invalid constant immediate operand!");
2075 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2076 }
2077
2078 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2079 // PC-relative operands don't encode the low bits, so shift them off
2080 // here. If it's a label, however, just put it on directly as there's
2081 // not enough information now to do anything.
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2084 if (!MCE) {
2085 addExpr(Inst, getImm());
2086 return;
2087 }
2088 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2089 }
2090
2091 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2092 // Branch operands don't encode the low bits, so shift them off
2093 // here. If it's a label, however, just put it on directly as there's
2094 // not enough information now to do anything.
2095 assert(N == 1 && "Invalid number of operands!");
2096 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2097 if (!MCE) {
2098 addExpr(Inst, getImm());
2099 return;
2100 }
2101 assert(MCE && "Invalid constant immediate operand!");
2102 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2103 }
2104
2105 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2120 // Branch operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 assert(MCE && "Invalid constant immediate operand!");
2130 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2131 }
2132
2133 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2134 assert(N == 1 && "Invalid number of operands!");
2136 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2137 }
2138
2139 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(MCOperand::createImm(getBarrier()));
2142 }
2143
2144 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146 Inst.addOperand(MCOperand::createImm(getBarrier()));
2147 }
2148
2149 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2150 assert(N == 1 && "Invalid number of operands!");
2151
2152 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2153 }
2154
2155 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2156 assert(N == 1 && "Invalid number of operands!");
2157
2158 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2159 }
2160
2161 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2162 assert(N == 1 && "Invalid number of operands!");
2163
2164 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2165 }
2166
2167 void addSVCROperands(MCInst &Inst, unsigned N) const {
2168 assert(N == 1 && "Invalid number of operands!");
2169
2170 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2171 }
2172
2173 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2175
2176 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2177 }
2178
2179 void addSysCROperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(MCOperand::createImm(getSysCR()));
2182 }
2183
2184 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2187 }
2188
2189 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2192 }
2193
2194 void addPHintOperands(MCInst &Inst, unsigned N) const {
2195 assert(N == 1 && "Invalid number of operands!");
2196 Inst.addOperand(MCOperand::createImm(getPHint()));
2197 }
2198
2199 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2200 assert(N == 1 && "Invalid number of operands!");
2201 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2202 }
2203
2204 void addShifterOperands(MCInst &Inst, unsigned N) const {
2205 assert(N == 1 && "Invalid number of operands!");
2206 unsigned Imm =
2207 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2209 }
2210
2211 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2212 assert(N == 1 && "Invalid number of operands!");
2213 unsigned Imm = getShiftExtendAmount();
2215 }
2216
2217 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2218 assert(N == 1 && "Invalid number of operands!");
2219
2220 if (!isScalarReg())
2221 return;
2222
2223 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2224 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2226 if (Reg != AArch64::XZR)
2227 llvm_unreachable("wrong register");
2228
2229 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2230 }
2231
2232 void addExtendOperands(MCInst &Inst, unsigned N) const {
2233 assert(N == 1 && "Invalid number of operands!");
2234 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2235 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2236 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2238 }
2239
2240 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2243 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2244 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2246 }
2247
2248 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2249 assert(N == 2 && "Invalid number of operands!");
2250 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2251 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2252 Inst.addOperand(MCOperand::createImm(IsSigned));
2253 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2254 }
2255
2256 // For 8-bit load/store instructions with a register offset, both the
2257 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2258 // they're disambiguated by whether the shift was explicit or implicit rather
2259 // than its size.
2260 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2261 assert(N == 2 && "Invalid number of operands!");
2262 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2263 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2264 Inst.addOperand(MCOperand::createImm(IsSigned));
2265 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2266 }
2267
2268 template<int Shift>
2269 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2270 assert(N == 1 && "Invalid number of operands!");
2271
2272 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2273 if (CE) {
2274 uint64_t Value = CE->getValue();
2275 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2276 } else {
2277 addExpr(Inst, getImm());
2278 }
2279 }
2280
2281 template<int Shift>
2282 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284
2285 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2286 uint64_t Value = CE->getValue();
2287 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2288 }
2289
2290 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2293 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2294 }
2295
2296 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2297 assert(N == 1 && "Invalid number of operands!");
2298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2299 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2300 }
2301
2302 void print(raw_ostream &OS) const override;
2303
2304 static std::unique_ptr<AArch64Operand>
2305 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2306 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2307 Op->Tok.Data = Str.data();
2308 Op->Tok.Length = Str.size();
2309 Op->Tok.IsSuffix = IsSuffix;
2310 Op->StartLoc = S;
2311 Op->EndLoc = S;
2312 return Op;
2313 }
2314
2315 static std::unique_ptr<AArch64Operand>
2316 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2317 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2319 unsigned ShiftAmount = 0,
2320 unsigned HasExplicitAmount = false) {
2321 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2322 Op->Reg.RegNum = RegNum;
2323 Op->Reg.Kind = Kind;
2324 Op->Reg.ElementWidth = 0;
2325 Op->Reg.EqualityTy = EqTy;
2326 Op->Reg.ShiftExtend.Type = ExtTy;
2327 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2328 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2329 Op->StartLoc = S;
2330 Op->EndLoc = E;
2331 return Op;
2332 }
2333
2334 static std::unique_ptr<AArch64Operand>
2335 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2336 SMLoc S, SMLoc E, MCContext &Ctx,
2338 unsigned ShiftAmount = 0,
2339 unsigned HasExplicitAmount = false) {
2340 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2341 Kind == RegKind::SVEPredicateVector ||
2342 Kind == RegKind::SVEPredicateAsCounter) &&
2343 "Invalid vector kind");
2344 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2345 HasExplicitAmount);
2346 Op->Reg.ElementWidth = ElementWidth;
2347 return Op;
2348 }
2349
2350 static std::unique_ptr<AArch64Operand>
2351 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2352 unsigned NumElements, unsigned ElementWidth,
2353 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2354 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2355 Op->VectorList.RegNum = RegNum;
2356 Op->VectorList.Count = Count;
2357 Op->VectorList.Stride = Stride;
2358 Op->VectorList.NumElements = NumElements;
2359 Op->VectorList.ElementWidth = ElementWidth;
2360 Op->VectorList.RegisterKind = RegisterKind;
2361 Op->StartLoc = S;
2362 Op->EndLoc = E;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2368 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2369 Op->VectorIndex.Val = Idx;
2370 Op->StartLoc = S;
2371 Op->EndLoc = E;
2372 return Op;
2373 }
2374
2375 static std::unique_ptr<AArch64Operand>
2376 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2377 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2378 Op->MatrixTileList.RegMask = RegMask;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2385 const unsigned ElementWidth) {
2386 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2387 RegMap = {
2388 {{0, AArch64::ZAB0},
2389 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2390 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2391 {{8, AArch64::ZAB0},
2392 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2393 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2394 {{16, AArch64::ZAH0},
2395 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2396 {{16, AArch64::ZAH1},
2397 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2398 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2399 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2400 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2401 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2402 };
2403
2404 if (ElementWidth == 64)
2405 OutRegs.insert(Reg);
2406 else {
2407 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2408 assert(!Regs.empty() && "Invalid tile or element width!");
2409 for (auto OutReg : Regs)
2410 OutRegs.insert(OutReg);
2411 }
2412 }
2413
2414 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2415 SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2417 Op->Imm.Val = Val;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2424 unsigned ShiftAmount,
2425 SMLoc S, SMLoc E,
2426 MCContext &Ctx) {
2427 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2428 Op->ShiftedImm .Val = Val;
2429 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2430 Op->StartLoc = S;
2431 Op->EndLoc = E;
2432 return Op;
2433 }
2434
2435 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2436 unsigned Last, SMLoc S,
2437 SMLoc E,
2438 MCContext &Ctx) {
2439 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2440 Op->ImmRange.First = First;
2441 Op->ImmRange.Last = Last;
2442 Op->EndLoc = E;
2443 return Op;
2444 }
2445
2446 static std::unique_ptr<AArch64Operand>
2447 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2448 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2449 Op->CondCode.Code = Code;
2450 Op->StartLoc = S;
2451 Op->EndLoc = E;
2452 return Op;
2453 }
2454
2455 static std::unique_ptr<AArch64Operand>
2456 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2457 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2458 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2459 Op->FPImm.IsExact = IsExact;
2460 Op->StartLoc = S;
2461 Op->EndLoc = S;
2462 return Op;
2463 }
2464
2465 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2466 StringRef Str,
2467 SMLoc S,
2468 MCContext &Ctx,
2469 bool HasnXSModifier) {
2470 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2471 Op->Barrier.Val = Val;
2472 Op->Barrier.Data = Str.data();
2473 Op->Barrier.Length = Str.size();
2474 Op->Barrier.HasnXSModifier = HasnXSModifier;
2475 Op->StartLoc = S;
2476 Op->EndLoc = S;
2477 return Op;
2478 }
2479
2480 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2481 uint32_t MRSReg,
2482 uint32_t MSRReg,
2483 uint32_t PStateField,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2486 Op->SysReg.Data = Str.data();
2487 Op->SysReg.Length = Str.size();
2488 Op->SysReg.MRSReg = MRSReg;
2489 Op->SysReg.MSRReg = MSRReg;
2490 Op->SysReg.PStateField = PStateField;
2491 Op->StartLoc = S;
2492 Op->EndLoc = S;
2493 return Op;
2494 }
2495
2496 static std::unique_ptr<AArch64Operand>
2497 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2498 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2499 Op->PHint.Val = Val;
2500 Op->PHint.Data = Str.data();
2501 Op->PHint.Length = Str.size();
2502 Op->StartLoc = S;
2503 Op->EndLoc = S;
2504 return Op;
2505 }
2506
2507 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2508 SMLoc E, MCContext &Ctx) {
2509 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2510 Op->SysCRImm.Val = Val;
2511 Op->StartLoc = S;
2512 Op->EndLoc = E;
2513 return Op;
2514 }
2515
2516 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2517 StringRef Str,
2518 SMLoc S,
2519 MCContext &Ctx) {
2520 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2521 Op->Prefetch.Val = Val;
2522 Op->Barrier.Data = Str.data();
2523 Op->Barrier.Length = Str.size();
2524 Op->StartLoc = S;
2525 Op->EndLoc = S;
2526 return Op;
2527 }
2528
2529 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2530 StringRef Str,
2531 SMLoc S,
2532 MCContext &Ctx) {
2533 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2534 Op->PSBHint.Val = Val;
2535 Op->PSBHint.Data = Str.data();
2536 Op->PSBHint.Length = Str.size();
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2543 StringRef Str,
2544 SMLoc S,
2545 MCContext &Ctx) {
2546 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2547 Op->BTIHint.Val = Val | 32;
2548 Op->BTIHint.Data = Str.data();
2549 Op->BTIHint.Length = Str.size();
2550 Op->StartLoc = S;
2551 Op->EndLoc = S;
2552 return Op;
2553 }
2554
2555 static std::unique_ptr<AArch64Operand>
2556 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2557 SMLoc S, SMLoc E, MCContext &Ctx) {
2558 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2559 Op->MatrixReg.RegNum = RegNum;
2560 Op->MatrixReg.ElementWidth = ElementWidth;
2561 Op->MatrixReg.Kind = Kind;
2562 Op->StartLoc = S;
2563 Op->EndLoc = E;
2564 return Op;
2565 }
2566
2567 static std::unique_ptr<AArch64Operand>
2568 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2569 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2570 Op->SVCR.PStateField = PStateField;
2571 Op->SVCR.Data = Str.data();
2572 Op->SVCR.Length = Str.size();
2573 Op->StartLoc = S;
2574 Op->EndLoc = S;
2575 return Op;
2576 }
2577
2578 static std::unique_ptr<AArch64Operand>
2579 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2580 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2581 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2582 Op->ShiftExtend.Type = ShOp;
2583 Op->ShiftExtend.Amount = Val;
2584 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2585 Op->StartLoc = S;
2586 Op->EndLoc = E;
2587 return Op;
2588 }
2589};
2590
2591} // end anonymous namespace.
2592
2593void AArch64Operand::print(raw_ostream &OS) const {
2594 switch (Kind) {
2595 case k_FPImm:
2596 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2597 if (!getFPImmIsExact())
2598 OS << " (inexact)";
2599 OS << ">";
2600 break;
2601 case k_Barrier: {
2602 StringRef Name = getBarrierName();
2603 if (!Name.empty())
2604 OS << "<barrier " << Name << ">";
2605 else
2606 OS << "<barrier invalid #" << getBarrier() << ">";
2607 break;
2608 }
2609 case k_Immediate:
2610 OS << *getImm();
2611 break;
2612 case k_ShiftedImm: {
2613 unsigned Shift = getShiftedImmShift();
2614 OS << "<shiftedimm ";
2615 OS << *getShiftedImmVal();
2616 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2617 break;
2618 }
2619 case k_ImmRange: {
2620 OS << "<immrange ";
2621 OS << getFirstImmVal();
2622 OS << ":" << getLastImmVal() << ">";
2623 break;
2624 }
2625 case k_CondCode:
2626 OS << "<condcode " << getCondCode() << ">";
2627 break;
2628 case k_VectorList: {
2629 OS << "<vectorlist ";
2630 unsigned Reg = getVectorListStart();
2631 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2632 OS << Reg + i * getVectorListStride() << " ";
2633 OS << ">";
2634 break;
2635 }
2636 case k_VectorIndex:
2637 OS << "<vectorindex " << getVectorIndex() << ">";
2638 break;
2639 case k_SysReg:
2640 OS << "<sysreg: " << getSysReg() << '>';
2641 break;
2642 case k_Token:
2643 OS << "'" << getToken() << "'";
2644 break;
2645 case k_SysCR:
2646 OS << "c" << getSysCR();
2647 break;
2648 case k_Prefetch: {
2649 StringRef Name = getPrefetchName();
2650 if (!Name.empty())
2651 OS << "<prfop " << Name << ">";
2652 else
2653 OS << "<prfop invalid #" << getPrefetch() << ">";
2654 break;
2655 }
2656 case k_PSBHint:
2657 OS << getPSBHintName();
2658 break;
2659 case k_PHint:
2660 OS << getPHintName();
2661 break;
2662 case k_BTIHint:
2663 OS << getBTIHintName();
2664 break;
2665 case k_MatrixRegister:
2666 OS << "<matrix " << getMatrixReg() << ">";
2667 break;
2668 case k_MatrixTileList: {
2669 OS << "<matrixlist ";
2670 unsigned RegMask = getMatrixTileListRegMask();
2671 unsigned MaxBits = 8;
2672 for (unsigned I = MaxBits; I > 0; --I)
2673 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2674 OS << '>';
2675 break;
2676 }
2677 case k_SVCR: {
2678 OS << getSVCR();
2679 break;
2680 }
2681 case k_Register:
2682 OS << "<register " << getReg() << ">";
2683 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2684 break;
2685 [[fallthrough]];
2686 case k_ShiftExtend:
2687 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2688 << getShiftExtendAmount();
2689 if (!hasShiftExtendAmount())
2690 OS << "<imp>";
2691 OS << '>';
2692 break;
2693 }
2694}
2695
2696/// @name Auto-generated Match Functions
2697/// {
2698
2700
2701/// }
2702
2704 return StringSwitch<unsigned>(Name.lower())
2705 .Case("v0", AArch64::Q0)
2706 .Case("v1", AArch64::Q1)
2707 .Case("v2", AArch64::Q2)
2708 .Case("v3", AArch64::Q3)
2709 .Case("v4", AArch64::Q4)
2710 .Case("v5", AArch64::Q5)
2711 .Case("v6", AArch64::Q6)
2712 .Case("v7", AArch64::Q7)
2713 .Case("v8", AArch64::Q8)
2714 .Case("v9", AArch64::Q9)
2715 .Case("v10", AArch64::Q10)
2716 .Case("v11", AArch64::Q11)
2717 .Case("v12", AArch64::Q12)
2718 .Case("v13", AArch64::Q13)
2719 .Case("v14", AArch64::Q14)
2720 .Case("v15", AArch64::Q15)
2721 .Case("v16", AArch64::Q16)
2722 .Case("v17", AArch64::Q17)
2723 .Case("v18", AArch64::Q18)
2724 .Case("v19", AArch64::Q19)
2725 .Case("v20", AArch64::Q20)
2726 .Case("v21", AArch64::Q21)
2727 .Case("v22", AArch64::Q22)
2728 .Case("v23", AArch64::Q23)
2729 .Case("v24", AArch64::Q24)
2730 .Case("v25", AArch64::Q25)
2731 .Case("v26", AArch64::Q26)
2732 .Case("v27", AArch64::Q27)
2733 .Case("v28", AArch64::Q28)
2734 .Case("v29", AArch64::Q29)
2735 .Case("v30", AArch64::Q30)
2736 .Case("v31", AArch64::Q31)
2737 .Default(0);
2738}
2739
2740/// Returns an optional pair of (#elements, element-width) if Suffix
2741/// is a valid vector kind. Where the number of elements in a vector
2742/// or the vector width is implicit or explicitly unknown (but still a
2743/// valid suffix kind), 0 is used.
2744static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2745 RegKind VectorKind) {
2746 std::pair<int, int> Res = {-1, -1};
2747
2748 switch (VectorKind) {
2749 case RegKind::NeonVector:
2751 .Case("", {0, 0})
2752 .Case(".1d", {1, 64})
2753 .Case(".1q", {1, 128})
2754 // '.2h' needed for fp16 scalar pairwise reductions
2755 .Case(".2h", {2, 16})
2756 .Case(".2b", {2, 8})
2757 .Case(".2s", {2, 32})
2758 .Case(".2d", {2, 64})
2759 // '.4b' is another special case for the ARMv8.2a dot product
2760 // operand
2761 .Case(".4b", {4, 8})
2762 .Case(".4h", {4, 16})
2763 .Case(".4s", {4, 32})
2764 .Case(".8b", {8, 8})
2765 .Case(".8h", {8, 16})
2766 .Case(".16b", {16, 8})
2767 // Accept the width neutral ones, too, for verbose syntax. If
2768 // those aren't used in the right places, the token operand won't
2769 // match so all will work out.
2770 .Case(".b", {0, 8})
2771 .Case(".h", {0, 16})
2772 .Case(".s", {0, 32})
2773 .Case(".d", {0, 64})
2774 .Default({-1, -1});
2775 break;
2776 case RegKind::SVEPredicateAsCounter:
2777 case RegKind::SVEPredicateVector:
2778 case RegKind::SVEDataVector:
2779 case RegKind::Matrix:
2781 .Case("", {0, 0})
2782 .Case(".b", {0, 8})
2783 .Case(".h", {0, 16})
2784 .Case(".s", {0, 32})
2785 .Case(".d", {0, 64})
2786 .Case(".q", {0, 128})
2787 .Default({-1, -1});
2788 break;
2789 default:
2790 llvm_unreachable("Unsupported RegKind");
2791 }
2792
2793 if (Res == std::make_pair(-1, -1))
2794 return std::nullopt;
2795
2796 return std::optional<std::pair<int, int>>(Res);
2797}
2798
2799static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2800 return parseVectorKind(Suffix, VectorKind).has_value();
2801}
2802
2804 return StringSwitch<unsigned>(Name.lower())
2805 .Case("z0", AArch64::Z0)
2806 .Case("z1", AArch64::Z1)
2807 .Case("z2", AArch64::Z2)
2808 .Case("z3", AArch64::Z3)
2809 .Case("z4", AArch64::Z4)
2810 .Case("z5", AArch64::Z5)
2811 .Case("z6", AArch64::Z6)
2812 .Case("z7", AArch64::Z7)
2813 .Case("z8", AArch64::Z8)
2814 .Case("z9", AArch64::Z9)
2815 .Case("z10", AArch64::Z10)
2816 .Case("z11", AArch64::Z11)
2817 .Case("z12", AArch64::Z12)
2818 .Case("z13", AArch64::Z13)
2819 .Case("z14", AArch64::Z14)
2820 .Case("z15", AArch64::Z15)
2821 .Case("z16", AArch64::Z16)
2822 .Case("z17", AArch64::Z17)
2823 .Case("z18", AArch64::Z18)
2824 .Case("z19", AArch64::Z19)
2825 .Case("z20", AArch64::Z20)
2826 .Case("z21", AArch64::Z21)
2827 .Case("z22", AArch64::Z22)
2828 .Case("z23", AArch64::Z23)
2829 .Case("z24", AArch64::Z24)
2830 .Case("z25", AArch64::Z25)
2831 .Case("z26", AArch64::Z26)
2832 .Case("z27", AArch64::Z27)
2833 .Case("z28", AArch64::Z28)
2834 .Case("z29", AArch64::Z29)
2835 .Case("z30", AArch64::Z30)
2836 .Case("z31", AArch64::Z31)
2837 .Default(0);
2838}
2839
2841 return StringSwitch<unsigned>(Name.lower())
2842 .Case("p0", AArch64::P0)
2843 .Case("p1", AArch64::P1)
2844 .Case("p2", AArch64::P2)
2845 .Case("p3", AArch64::P3)
2846 .Case("p4", AArch64::P4)
2847 .Case("p5", AArch64::P5)
2848 .Case("p6", AArch64::P6)
2849 .Case("p7", AArch64::P7)
2850 .Case("p8", AArch64::P8)
2851 .Case("p9", AArch64::P9)
2852 .Case("p10", AArch64::P10)
2853 .Case("p11", AArch64::P11)
2854 .Case("p12", AArch64::P12)
2855 .Case("p13", AArch64::P13)
2856 .Case("p14", AArch64::P14)
2857 .Case("p15", AArch64::P15)
2858 .Default(0);
2859}
2860
2862 return StringSwitch<unsigned>(Name.lower())
2863 .Case("pn0", AArch64::PN0)
2864 .Case("pn1", AArch64::PN1)
2865 .Case("pn2", AArch64::PN2)
2866 .Case("pn3", AArch64::PN3)
2867 .Case("pn4", AArch64::PN4)
2868 .Case("pn5", AArch64::PN5)
2869 .Case("pn6", AArch64::PN6)
2870 .Case("pn7", AArch64::PN7)
2871 .Case("pn8", AArch64::PN8)
2872 .Case("pn9", AArch64::PN9)
2873 .Case("pn10", AArch64::PN10)
2874 .Case("pn11", AArch64::PN11)
2875 .Case("pn12", AArch64::PN12)
2876 .Case("pn13", AArch64::PN13)
2877 .Case("pn14", AArch64::PN14)
2878 .Case("pn15", AArch64::PN15)
2879 .Default(0);
2880}
2881
2883 return StringSwitch<unsigned>(Name.lower())
2884 .Case("za0.d", AArch64::ZAD0)
2885 .Case("za1.d", AArch64::ZAD1)
2886 .Case("za2.d", AArch64::ZAD2)
2887 .Case("za3.d", AArch64::ZAD3)
2888 .Case("za4.d", AArch64::ZAD4)
2889 .Case("za5.d", AArch64::ZAD5)
2890 .Case("za6.d", AArch64::ZAD6)
2891 .Case("za7.d", AArch64::ZAD7)
2892 .Case("za0.s", AArch64::ZAS0)
2893 .Case("za1.s", AArch64::ZAS1)
2894 .Case("za2.s", AArch64::ZAS2)
2895 .Case("za3.s", AArch64::ZAS3)
2896 .Case("za0.h", AArch64::ZAH0)
2897 .Case("za1.h", AArch64::ZAH1)
2898 .Case("za0.b", AArch64::ZAB0)
2899 .Default(0);
2900}
2901
2903 return StringSwitch<unsigned>(Name.lower())
2904 .Case("za", AArch64::ZA)
2905 .Case("za0.q", AArch64::ZAQ0)
2906 .Case("za1.q", AArch64::ZAQ1)
2907 .Case("za2.q", AArch64::ZAQ2)
2908 .Case("za3.q", AArch64::ZAQ3)
2909 .Case("za4.q", AArch64::ZAQ4)
2910 .Case("za5.q", AArch64::ZAQ5)
2911 .Case("za6.q", AArch64::ZAQ6)
2912 .Case("za7.q", AArch64::ZAQ7)
2913 .Case("za8.q", AArch64::ZAQ8)
2914 .Case("za9.q", AArch64::ZAQ9)
2915 .Case("za10.q", AArch64::ZAQ10)
2916 .Case("za11.q", AArch64::ZAQ11)
2917 .Case("za12.q", AArch64::ZAQ12)
2918 .Case("za13.q", AArch64::ZAQ13)
2919 .Case("za14.q", AArch64::ZAQ14)
2920 .Case("za15.q", AArch64::ZAQ15)
2921 .Case("za0.d", AArch64::ZAD0)
2922 .Case("za1.d", AArch64::ZAD1)
2923 .Case("za2.d", AArch64::ZAD2)
2924 .Case("za3.d", AArch64::ZAD3)
2925 .Case("za4.d", AArch64::ZAD4)
2926 .Case("za5.d", AArch64::ZAD5)
2927 .Case("za6.d", AArch64::ZAD6)
2928 .Case("za7.d", AArch64::ZAD7)
2929 .Case("za0.s", AArch64::ZAS0)
2930 .Case("za1.s", AArch64::ZAS1)
2931 .Case("za2.s", AArch64::ZAS2)
2932 .Case("za3.s", AArch64::ZAS3)
2933 .Case("za0.h", AArch64::ZAH0)
2934 .Case("za1.h", AArch64::ZAH1)
2935 .Case("za0.b", AArch64::ZAB0)
2936 .Case("za0h.q", AArch64::ZAQ0)
2937 .Case("za1h.q", AArch64::ZAQ1)
2938 .Case("za2h.q", AArch64::ZAQ2)
2939 .Case("za3h.q", AArch64::ZAQ3)
2940 .Case("za4h.q", AArch64::ZAQ4)
2941 .Case("za5h.q", AArch64::ZAQ5)
2942 .Case("za6h.q", AArch64::ZAQ6)
2943 .Case("za7h.q", AArch64::ZAQ7)
2944 .Case("za8h.q", AArch64::ZAQ8)
2945 .Case("za9h.q", AArch64::ZAQ9)
2946 .Case("za10h.q", AArch64::ZAQ10)
2947 .Case("za11h.q", AArch64::ZAQ11)
2948 .Case("za12h.q", AArch64::ZAQ12)
2949 .Case("za13h.q", AArch64::ZAQ13)
2950 .Case("za14h.q", AArch64::ZAQ14)
2951 .Case("za15h.q", AArch64::ZAQ15)
2952 .Case("za0h.d", AArch64::ZAD0)
2953 .Case("za1h.d", AArch64::ZAD1)
2954 .Case("za2h.d", AArch64::ZAD2)
2955 .Case("za3h.d", AArch64::ZAD3)
2956 .Case("za4h.d", AArch64::ZAD4)
2957 .Case("za5h.d", AArch64::ZAD5)
2958 .Case("za6h.d", AArch64::ZAD6)
2959 .Case("za7h.d", AArch64::ZAD7)
2960 .Case("za0h.s", AArch64::ZAS0)
2961 .Case("za1h.s", AArch64::ZAS1)
2962 .Case("za2h.s", AArch64::ZAS2)
2963 .Case("za3h.s", AArch64::ZAS3)
2964 .Case("za0h.h", AArch64::ZAH0)
2965 .Case("za1h.h", AArch64::ZAH1)
2966 .Case("za0h.b", AArch64::ZAB0)
2967 .Case("za0v.q", AArch64::ZAQ0)
2968 .Case("za1v.q", AArch64::ZAQ1)
2969 .Case("za2v.q", AArch64::ZAQ2)
2970 .Case("za3v.q", AArch64::ZAQ3)
2971 .Case("za4v.q", AArch64::ZAQ4)
2972 .Case("za5v.q", AArch64::ZAQ5)
2973 .Case("za6v.q", AArch64::ZAQ6)
2974 .Case("za7v.q", AArch64::ZAQ7)
2975 .Case("za8v.q", AArch64::ZAQ8)
2976 .Case("za9v.q", AArch64::ZAQ9)
2977 .Case("za10v.q", AArch64::ZAQ10)
2978 .Case("za11v.q", AArch64::ZAQ11)
2979 .Case("za12v.q", AArch64::ZAQ12)
2980 .Case("za13v.q", AArch64::ZAQ13)
2981 .Case("za14v.q", AArch64::ZAQ14)
2982 .Case("za15v.q", AArch64::ZAQ15)
2983 .Case("za0v.d", AArch64::ZAD0)
2984 .Case("za1v.d", AArch64::ZAD1)
2985 .Case("za2v.d", AArch64::ZAD2)
2986 .Case("za3v.d", AArch64::ZAD3)
2987 .Case("za4v.d", AArch64::ZAD4)
2988 .Case("za5v.d", AArch64::ZAD5)
2989 .Case("za6v.d", AArch64::ZAD6)
2990 .Case("za7v.d", AArch64::ZAD7)
2991 .Case("za0v.s", AArch64::ZAS0)
2992 .Case("za1v.s", AArch64::ZAS1)
2993 .Case("za2v.s", AArch64::ZAS2)
2994 .Case("za3v.s", AArch64::ZAS3)
2995 .Case("za0v.h", AArch64::ZAH0)
2996 .Case("za1v.h", AArch64::ZAH1)
2997 .Case("za0v.b", AArch64::ZAB0)
2998 .Default(0);
2999}
3000
3001bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3002 SMLoc &EndLoc) {
3003 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3004}
3005
3006ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3007 SMLoc &EndLoc) {
3008 StartLoc = getLoc();
3009 ParseStatus Res = tryParseScalarRegister(Reg);
3010 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 return Res;
3012}
3013
3014// Matches a register name or register alias previously defined by '.req'
3015unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3016 RegKind Kind) {
3017 unsigned RegNum = 0;
3018 if ((RegNum = matchSVEDataVectorRegName(Name)))
3019 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3020
3021 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3022 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3023
3025 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3026
3027 if ((RegNum = MatchNeonVectorRegName(Name)))
3028 return Kind == RegKind::NeonVector ? RegNum : 0;
3029
3030 if ((RegNum = matchMatrixRegName(Name)))
3031 return Kind == RegKind::Matrix ? RegNum : 0;
3032
3033 if (Name.equals_insensitive("zt0"))
3034 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3035
3036 // The parsed register must be of RegKind Scalar
3037 if ((RegNum = MatchRegisterName(Name)))
3038 return (Kind == RegKind::Scalar) ? RegNum : 0;
3039
3040 if (!RegNum) {
3041 // Handle a few common aliases of registers.
3042 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3043 .Case("fp", AArch64::FP)
3044 .Case("lr", AArch64::LR)
3045 .Case("x31", AArch64::XZR)
3046 .Case("w31", AArch64::WZR)
3047 .Default(0))
3048 return Kind == RegKind::Scalar ? RegNum : 0;
3049
3050 // Check for aliases registered via .req. Canonicalize to lower case.
3051 // That's more consistent since register names are case insensitive, and
3052 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3053 auto Entry = RegisterReqs.find(Name.lower());
3054 if (Entry == RegisterReqs.end())
3055 return 0;
3056
3057 // set RegNum if the match is the right kind of register
3058 if (Kind == Entry->getValue().first)
3059 RegNum = Entry->getValue().second;
3060 }
3061 return RegNum;
3062}
3063
3064unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3065 switch (K) {
3066 case RegKind::Scalar:
3067 case RegKind::NeonVector:
3068 case RegKind::SVEDataVector:
3069 return 32;
3070 case RegKind::Matrix:
3071 case RegKind::SVEPredicateVector:
3072 case RegKind::SVEPredicateAsCounter:
3073 return 16;
3074 case RegKind::LookupTable:
3075 return 1;
3076 }
3077 llvm_unreachable("Unsupported RegKind");
3078}
3079
3080/// tryParseScalarRegister - Try to parse a register name. The token must be an
3081/// Identifier when called, and if it is a register name the token is eaten and
3082/// the register is added to the operand list.
3083ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3084 const AsmToken &Tok = getTok();
3085 if (Tok.isNot(AsmToken::Identifier))
3086 return ParseStatus::NoMatch;
3087
3088 std::string lowerCase = Tok.getString().lower();
3089 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3090 if (Reg == 0)
3091 return ParseStatus::NoMatch;
3092
3093 RegNum = Reg;
3094 Lex(); // Eat identifier token.
3095 return ParseStatus::Success;
3096}
3097
3098/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3099ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3100 SMLoc S = getLoc();
3101
3102 if (getTok().isNot(AsmToken::Identifier))
3103 return Error(S, "Expected cN operand where 0 <= N <= 15");
3104
3105 StringRef Tok = getTok().getIdentifier();
3106 if (Tok[0] != 'c' && Tok[0] != 'C')
3107 return Error(S, "Expected cN operand where 0 <= N <= 15");
3108
3109 uint32_t CRNum;
3110 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3111 if (BadNum || CRNum > 15)
3112 return Error(S, "Expected cN operand where 0 <= N <= 15");
3113
3114 Lex(); // Eat identifier token.
3115 Operands.push_back(
3116 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3117 return ParseStatus::Success;
3118}
3119
3120// Either an identifier for named values or a 6-bit immediate.
3121ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3122 SMLoc S = getLoc();
3123 const AsmToken &Tok = getTok();
3124
3125 unsigned MaxVal = 63;
3126
3127 // Immediate case, with optional leading hash:
3128 if (parseOptionalToken(AsmToken::Hash) ||
3129 Tok.is(AsmToken::Integer)) {
3130 const MCExpr *ImmVal;
3131 if (getParser().parseExpression(ImmVal))
3132 return ParseStatus::Failure;
3133
3134 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3135 if (!MCE)
3136 return TokError("immediate value expected for prefetch operand");
3137 unsigned prfop = MCE->getValue();
3138 if (prfop > MaxVal)
3139 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3140 "] expected");
3141
3142 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3143 Operands.push_back(AArch64Operand::CreatePrefetch(
3144 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3145 return ParseStatus::Success;
3146 }
3147
3148 if (Tok.isNot(AsmToken::Identifier))
3149 return TokError("prefetch hint expected");
3150
3151 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3152 if (!RPRFM)
3153 return TokError("prefetch hint expected");
3154
3155 Operands.push_back(AArch64Operand::CreatePrefetch(
3156 RPRFM->Encoding, Tok.getString(), S, getContext()));
3157 Lex(); // Eat identifier token.
3158 return ParseStatus::Success;
3159}
3160
3161/// tryParsePrefetch - Try to parse a prefetch operand.
3162template <bool IsSVEPrefetch>
3163ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3164 SMLoc S = getLoc();
3165 const AsmToken &Tok = getTok();
3166
3167 auto LookupByName = [](StringRef N) {
3168 if (IsSVEPrefetch) {
3169 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3170 return std::optional<unsigned>(Res->Encoding);
3171 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3172 return std::optional<unsigned>(Res->Encoding);
3173 return std::optional<unsigned>();
3174 };
3175
3176 auto LookupByEncoding = [](unsigned E) {
3177 if (IsSVEPrefetch) {
3178 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3179 return std::optional<StringRef>(Res->Name);
3180 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3181 return std::optional<StringRef>(Res->Name);
3182 return std::optional<StringRef>();
3183 };
3184 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3185
3186 // Either an identifier for named values or a 5-bit immediate.
3187 // Eat optional hash.
3188 if (parseOptionalToken(AsmToken::Hash) ||
3189 Tok.is(AsmToken::Integer)) {
3190 const MCExpr *ImmVal;
3191 if (getParser().parseExpression(ImmVal))
3192 return ParseStatus::Failure;
3193
3194 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3195 if (!MCE)
3196 return TokError("immediate value expected for prefetch operand");
3197 unsigned prfop = MCE->getValue();
3198 if (prfop > MaxVal)
3199 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3200 "] expected");
3201
3202 auto PRFM = LookupByEncoding(MCE->getValue());
3203 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3204 S, getContext()));
3205 return ParseStatus::Success;
3206 }
3207
3208 if (Tok.isNot(AsmToken::Identifier))
3209 return TokError("prefetch hint expected");
3210
3211 auto PRFM = LookupByName(Tok.getString());
3212 if (!PRFM)
3213 return TokError("prefetch hint expected");
3214
3215 Operands.push_back(AArch64Operand::CreatePrefetch(
3216 *PRFM, Tok.getString(), S, getContext()));
3217 Lex(); // Eat identifier token.
3218 return ParseStatus::Success;
3219}
3220
3221/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3222ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3223 SMLoc S = getLoc();
3224 const AsmToken &Tok = getTok();
3225 if (Tok.isNot(AsmToken::Identifier))
3226 return TokError("invalid operand for instruction");
3227
3228 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3229 if (!PSB)
3230 return TokError("invalid operand for instruction");
3231
3232 Operands.push_back(AArch64Operand::CreatePSBHint(
3233 PSB->Encoding, Tok.getString(), S, getContext()));
3234 Lex(); // Eat identifier token.
3235 return ParseStatus::Success;
3236}
3237
3238ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3239 SMLoc StartLoc = getLoc();
3240
3241 MCRegister RegNum;
3242
3243 // The case where xzr, xzr is not present is handled by an InstAlias.
3244
3245 auto RegTok = getTok(); // in case we need to backtrack
3246 if (!tryParseScalarRegister(RegNum).isSuccess())
3247 return ParseStatus::NoMatch;
3248
3249 if (RegNum != AArch64::XZR) {
3250 getLexer().UnLex(RegTok);
3251 return ParseStatus::NoMatch;
3252 }
3253
3254 if (parseComma())
3255 return ParseStatus::Failure;
3256
3257 if (!tryParseScalarRegister(RegNum).isSuccess())
3258 return TokError("expected register operand");
3259
3260 if (RegNum != AArch64::XZR)
3261 return TokError("xzr must be followed by xzr");
3262
3263 // We need to push something, since we claim this is an operand in .td.
3264 // See also AArch64AsmParser::parseKeywordOperand.
3265 Operands.push_back(AArch64Operand::CreateReg(
3266 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3267
3268 return ParseStatus::Success;
3269}
3270
3271/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3272ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3273 SMLoc S = getLoc();
3274 const AsmToken &Tok = getTok();
3275 if (Tok.isNot(AsmToken::Identifier))
3276 return TokError("invalid operand for instruction");
3277
3278 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3279 if (!BTI)
3280 return TokError("invalid operand for instruction");
3281
3282 Operands.push_back(AArch64Operand::CreateBTIHint(
3283 BTI->Encoding, Tok.getString(), S, getContext()));
3284 Lex(); // Eat identifier token.
3285 return ParseStatus::Success;
3286}
3287
3288/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3289/// instruction.
3290ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3291 SMLoc S = getLoc();
3292 const MCExpr *Expr = nullptr;
3293
3294 if (getTok().is(AsmToken::Hash)) {
3295 Lex(); // Eat hash token.
3296 }
3297
3298 if (parseSymbolicImmVal(Expr))
3299 return ParseStatus::Failure;
3300
3301 AArch64MCExpr::VariantKind ELFRefKind;
3302 MCSymbolRefExpr::VariantKind DarwinRefKind;
3303 int64_t Addend;
3304 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3305 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3306 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3307 // No modifier was specified at all; this is the syntax for an ELF basic
3308 // ADRP relocation (unfortunately).
3309 Expr =
3311 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3312 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3313 Addend != 0) {
3314 return Error(S, "gotpage label reference not allowed an addend");
3315 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3316 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3317 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3318 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3319 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3320 ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
3321 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3322 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3323 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE &&
3325 // The operand must be an @page or @gotpage qualified symbolref.
3326 return Error(S, "page or gotpage label reference expected");
3327 }
3328 }
3329
3330 // We have either a label reference possibly with addend or an immediate. The
3331 // addend is a raw value here. The linker will adjust it to only reference the
3332 // page.
3333 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3334 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3335
3336 return ParseStatus::Success;
3337}
3338
3339/// tryParseAdrLabel - Parse and validate a source label for the ADR
3340/// instruction.
3341ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3342 SMLoc S = getLoc();
3343 const MCExpr *Expr = nullptr;
3344
3345 // Leave anything with a bracket to the default for SVE
3346 if (getTok().is(AsmToken::LBrac))
3347 return ParseStatus::NoMatch;
3348
3349 if (getTok().is(AsmToken::Hash))
3350 Lex(); // Eat hash token.
3351
3352 if (parseSymbolicImmVal(Expr))
3353 return ParseStatus::Failure;
3354
3355 AArch64MCExpr::VariantKind ELFRefKind;
3356 MCSymbolRefExpr::VariantKind DarwinRefKind;
3357 int64_t Addend;
3358 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3359 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3360 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3361 // No modifier was specified at all; this is the syntax for an ELF basic
3362 // ADR relocation (unfortunately).
3363 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3364 } else if (ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE) {
3365 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3366 // adr. It's not actually GOT entry page address but the GOT address
3367 // itself - we just share the same variant kind with :got_auth: operator
3368 // applied for adrp.
3369 // TODO: can we somehow get current TargetMachine object to call
3370 // getCodeModel() on it to ensure we are using tiny code model?
3371 return Error(S, "unexpected adr label");
3372 }
3373 }
3374
3375 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3376 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseFPImm - A floating point immediate expression operand.
3381template <bool AddFPZeroAsLiteral>
3382ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384
3385 bool Hash = parseOptionalToken(AsmToken::Hash);
3386
3387 // Handle negation, as that still comes through as a separate token.
3388 bool isNegative = parseOptionalToken(AsmToken::Minus);
3389
3390 const AsmToken &Tok = getTok();
3391 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3392 if (!Hash)
3393 return ParseStatus::NoMatch;
3394 return TokError("invalid floating point immediate");
3395 }
3396
3397 // Parse hexadecimal representation.
3398 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3399 if (Tok.getIntVal() > 255 || isNegative)
3400 return TokError("encoded floating point value out of range");
3401
3403 Operands.push_back(
3404 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3405 } else {
3406 // Parse FP representation.
3407 APFloat RealVal(APFloat::IEEEdouble());
3408 auto StatusOrErr =
3409 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3410 if (errorToBool(StatusOrErr.takeError()))
3411 return TokError("invalid floating point representation");
3412
3413 if (isNegative)
3414 RealVal.changeSign();
3415
3416 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3417 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3418 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3419 } else
3420 Operands.push_back(AArch64Operand::CreateFPImm(
3421 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3422 }
3423
3424 Lex(); // Eat the token.
3425
3426 return ParseStatus::Success;
3427}
3428
3429/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3430/// a shift suffix, for example '#1, lsl #12'.
3432AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3433 SMLoc S = getLoc();
3434
3435 if (getTok().is(AsmToken::Hash))
3436 Lex(); // Eat '#'
3437 else if (getTok().isNot(AsmToken::Integer))
3438 // Operand should start from # or should be integer, emit error otherwise.
3439 return ParseStatus::NoMatch;
3440
3441 if (getTok().is(AsmToken::Integer) &&
3442 getLexer().peekTok().is(AsmToken::Colon))
3443 return tryParseImmRange(Operands);
3444
3445 const MCExpr *Imm = nullptr;
3446 if (parseSymbolicImmVal(Imm))
3447 return ParseStatus::Failure;
3448 else if (getTok().isNot(AsmToken::Comma)) {
3449 Operands.push_back(
3450 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3451 return ParseStatus::Success;
3452 }
3453
3454 // Eat ','
3455 Lex();
3456 StringRef VecGroup;
3457 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3458 Operands.push_back(
3459 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3460 Operands.push_back(
3461 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3462 return ParseStatus::Success;
3463 }
3464
3465 // The optional operand must be "lsl #N" where N is non-negative.
3466 if (!getTok().is(AsmToken::Identifier) ||
3467 !getTok().getIdentifier().equals_insensitive("lsl"))
3468 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3469
3470 // Eat 'lsl'
3471 Lex();
3472
3473 parseOptionalToken(AsmToken::Hash);
3474
3475 if (getTok().isNot(AsmToken::Integer))
3476 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3477
3478 int64_t ShiftAmount = getTok().getIntVal();
3479
3480 if (ShiftAmount < 0)
3481 return Error(getLoc(), "positive shift amount required");
3482 Lex(); // Eat the number
3483
3484 // Just in case the optional lsl #0 is used for immediates other than zero.
3485 if (ShiftAmount == 0 && Imm != nullptr) {
3486 Operands.push_back(
3487 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3488 return ParseStatus::Success;
3489 }
3490
3491 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3492 getLoc(), getContext()));
3493 return ParseStatus::Success;
3494}
3495
3496/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3497/// suggestion to help common typos.
3499AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3501 .Case("eq", AArch64CC::EQ)
3502 .Case("ne", AArch64CC::NE)
3503 .Case("cs", AArch64CC::HS)
3504 .Case("hs", AArch64CC::HS)
3505 .Case("cc", AArch64CC::LO)
3506 .Case("lo", AArch64CC::LO)
3507 .Case("mi", AArch64CC::MI)
3508 .Case("pl", AArch64CC::PL)
3509 .Case("vs", AArch64CC::VS)
3510 .Case("vc", AArch64CC::VC)
3511 .Case("hi", AArch64CC::HI)
3512 .Case("ls", AArch64CC::LS)
3513 .Case("ge", AArch64CC::GE)
3514 .Case("lt", AArch64CC::LT)
3515 .Case("gt", AArch64CC::GT)
3516 .Case("le", AArch64CC::LE)
3517 .Case("al", AArch64CC::AL)
3518 .Case("nv", AArch64CC::NV)
3520
3521 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3523 .Case("none", AArch64CC::EQ)
3524 .Case("any", AArch64CC::NE)
3525 .Case("nlast", AArch64CC::HS)
3526 .Case("last", AArch64CC::LO)
3527 .Case("first", AArch64CC::MI)
3528 .Case("nfrst", AArch64CC::PL)
3529 .Case("pmore", AArch64CC::HI)
3530 .Case("plast", AArch64CC::LS)
3531 .Case("tcont", AArch64CC::GE)
3532 .Case("tstop", AArch64CC::LT)
3534
3535 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3536 Suggestion = "nfrst";
3537 }
3538 return CC;
3539}
3540
3541/// parseCondCode - Parse a Condition Code operand.
3542bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3543 bool invertCondCode) {
3544 SMLoc S = getLoc();
3545 const AsmToken &Tok = getTok();
3546 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3547
3548 StringRef Cond = Tok.getString();
3549 std::string Suggestion;
3550 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3551 if (CC == AArch64CC::Invalid) {
3552 std::string Msg = "invalid condition code";
3553 if (!Suggestion.empty())
3554 Msg += ", did you mean " + Suggestion + "?";
3555 return TokError(Msg);
3556 }
3557 Lex(); // Eat identifier token.
3558
3559 if (invertCondCode) {
3560 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3561 return TokError("condition codes AL and NV are invalid for this instruction");
3563 }
3564
3565 Operands.push_back(
3566 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3567 return false;
3568}
3569
3570ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3571 const AsmToken &Tok = getTok();
3572 SMLoc S = getLoc();
3573
3574 if (Tok.isNot(AsmToken::Identifier))
3575 return TokError("invalid operand for instruction");
3576
3577 unsigned PStateImm = -1;
3578 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3579 if (!SVCR)
3580 return ParseStatus::NoMatch;
3581 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3582 PStateImm = SVCR->Encoding;
3583
3584 Operands.push_back(
3585 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3586 Lex(); // Eat identifier token.
3587 return ParseStatus::Success;
3588}
3589
3590ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3591 const AsmToken &Tok = getTok();
3592 SMLoc S = getLoc();
3593
3594 StringRef Name = Tok.getString();
3595
3596 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3597 Lex(); // eat "za[.(b|h|s|d)]"
3598 unsigned ElementWidth = 0;
3599 auto DotPosition = Name.find('.');
3600 if (DotPosition != StringRef::npos) {
3601 const auto &KindRes =
3602 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3603 if (!KindRes)
3604 return TokError(
3605 "Expected the register to be followed by element width suffix");
3606 ElementWidth = KindRes->second;
3607 }
3608 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3609 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3610 getContext()));
3611 if (getLexer().is(AsmToken::LBrac)) {
3612 // There's no comma after matrix operand, so we can parse the next operand
3613 // immediately.
3614 if (parseOperand(Operands, false, false))
3615 return ParseStatus::NoMatch;
3616 }
3617 return ParseStatus::Success;
3618 }
3619
3620 // Try to parse matrix register.
3621 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3622 if (!Reg)
3623 return ParseStatus::NoMatch;
3624
3625 size_t DotPosition = Name.find('.');
3626 assert(DotPosition != StringRef::npos && "Unexpected register");
3627
3628 StringRef Head = Name.take_front(DotPosition);
3629 StringRef Tail = Name.drop_front(DotPosition);
3630 StringRef RowOrColumn = Head.take_back();
3631
3632 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3633 .Case("h", MatrixKind::Row)
3634 .Case("v", MatrixKind::Col)
3635 .Default(MatrixKind::Tile);
3636
3637 // Next up, parsing the suffix
3638 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3639 if (!KindRes)
3640 return TokError(
3641 "Expected the register to be followed by element width suffix");
3642 unsigned ElementWidth = KindRes->second;
3643
3644 Lex();
3645
3646 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3647 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3648
3649 if (getLexer().is(AsmToken::LBrac)) {
3650 // There's no comma after matrix operand, so we can parse the next operand
3651 // immediately.
3652 if (parseOperand(Operands, false, false))
3653 return ParseStatus::NoMatch;
3654 }
3655 return ParseStatus::Success;
3656}
3657
3658/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3659/// them if present.
3661AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3662 const AsmToken &Tok = getTok();
3663 std::string LowerID = Tok.getString().lower();
3666 .Case("lsl", AArch64_AM::LSL)
3667 .Case("lsr", AArch64_AM::LSR)
3668 .Case("asr", AArch64_AM::ASR)
3669 .Case("ror", AArch64_AM::ROR)
3670 .Case("msl", AArch64_AM::MSL)
3671 .Case("uxtb", AArch64_AM::UXTB)
3672 .Case("uxth", AArch64_AM::UXTH)
3673 .Case("uxtw", AArch64_AM::UXTW)
3674 .Case("uxtx", AArch64_AM::UXTX)
3675 .Case("sxtb", AArch64_AM::SXTB)
3676 .Case("sxth", AArch64_AM::SXTH)
3677 .Case("sxtw", AArch64_AM::SXTW)
3678 .Case("sxtx", AArch64_AM::SXTX)
3680
3682 return ParseStatus::NoMatch;
3683
3684 SMLoc S = Tok.getLoc();
3685 Lex();
3686
3687 bool Hash = parseOptionalToken(AsmToken::Hash);
3688
3689 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3690 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3691 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3692 ShOp == AArch64_AM::MSL) {
3693 // We expect a number here.
3694 return TokError("expected #imm after shift specifier");
3695 }
3696
3697 // "extend" type operations don't need an immediate, #0 is implicit.
3698 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3699 Operands.push_back(
3700 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3701 return ParseStatus::Success;
3702 }
3703
3704 // Make sure we do actually have a number, identifier or a parenthesized
3705 // expression.
3706 SMLoc E = getLoc();
3707 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3708 !getTok().is(AsmToken::Identifier))
3709 return Error(E, "expected integer shift amount");
3710
3711 const MCExpr *ImmVal;
3712 if (getParser().parseExpression(ImmVal))
3713 return ParseStatus::Failure;
3714
3715 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3716 if (!MCE)
3717 return Error(E, "expected constant '#imm' after shift specifier");
3718
3719 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3720 Operands.push_back(AArch64Operand::CreateShiftExtend(
3721 ShOp, MCE->getValue(), true, S, E, getContext()));
3722 return ParseStatus::Success;
3723}
3724
3725static const struct Extension {
3726 const char *Name;
3728} ExtensionMap[] = {
3729 {"crc", {AArch64::FeatureCRC}},
3730 {"sm4", {AArch64::FeatureSM4}},
3731 {"sha3", {AArch64::FeatureSHA3}},
3732 {"sha2", {AArch64::FeatureSHA2}},
3733 {"aes", {AArch64::FeatureAES}},
3734 {"crypto", {AArch64::FeatureCrypto}},
3735 {"fp", {AArch64::FeatureFPARMv8}},
3736 {"simd", {AArch64::FeatureNEON}},
3737 {"ras", {AArch64::FeatureRAS}},
3738 {"rasv2", {AArch64::FeatureRASv2}},
3739 {"lse", {AArch64::FeatureLSE}},
3740 {"predres", {AArch64::FeaturePredRes}},
3741 {"predres2", {AArch64::FeatureSPECRES2}},
3742 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3743 {"mte", {AArch64::FeatureMTE}},
3744 {"memtag", {AArch64::FeatureMTE}},
3745 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3746 {"pan", {AArch64::FeaturePAN}},
3747 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3748 {"ccpp", {AArch64::FeatureCCPP}},
3749 {"rcpc", {AArch64::FeatureRCPC}},
3750 {"rng", {AArch64::FeatureRandGen}},
3751 {"sve", {AArch64::FeatureSVE}},
3752 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3753 {"sve2", {AArch64::FeatureSVE2}},
3754 {"sve-aes", {AArch64::FeatureSVEAES}},
3755 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3756 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3757 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3758 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3759 {"sve2-bitperm",
3760 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3761 AArch64::FeatureSVE2}},
3762 {"sve2p1", {AArch64::FeatureSVE2p1}},
3763 {"ls64", {AArch64::FeatureLS64}},
3764 {"xs", {AArch64::FeatureXS}},
3765 {"pauth", {AArch64::FeaturePAuth}},
3766 {"flagm", {AArch64::FeatureFlagM}},
3767 {"rme", {AArch64::FeatureRME}},
3768 {"sme", {AArch64::FeatureSME}},
3769 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3770 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3771 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3772 {"sme2", {AArch64::FeatureSME2}},
3773 {"sme2p1", {AArch64::FeatureSME2p1}},
3774 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3775 {"hbc", {AArch64::FeatureHBC}},
3776 {"mops", {AArch64::FeatureMOPS}},
3777 {"mec", {AArch64::FeatureMEC}},
3778 {"the", {AArch64::FeatureTHE}},
3779 {"d128", {AArch64::FeatureD128}},
3780 {"lse128", {AArch64::FeatureLSE128}},
3781 {"ite", {AArch64::FeatureITE}},
3782 {"cssc", {AArch64::FeatureCSSC}},
3783 {"rcpc3", {AArch64::FeatureRCPC3}},
3784 {"gcs", {AArch64::FeatureGCS}},
3785 {"bf16", {AArch64::FeatureBF16}},
3786 {"compnum", {AArch64::FeatureComplxNum}},
3787 {"dotprod", {AArch64::FeatureDotProd}},
3788 {"f32mm", {AArch64::FeatureMatMulFP32}},
3789 {"f64mm", {AArch64::FeatureMatMulFP64}},
3790 {"fp16", {AArch64::FeatureFullFP16}},
3791 {"fp16fml", {AArch64::FeatureFP16FML}},
3792 {"i8mm", {AArch64::FeatureMatMulInt8}},
3793 {"lor", {AArch64::FeatureLOR}},
3794 {"profile", {AArch64::FeatureSPE}},
3795 // "rdma" is the name documented by binutils for the feature, but
3796 // binutils also accepts incomplete prefixes of features, so "rdm"
3797 // works too. Support both spellings here.
3798 {"rdm", {AArch64::FeatureRDM}},
3799 {"rdma", {AArch64::FeatureRDM}},
3800 {"sb", {AArch64::FeatureSB}},
3801 {"ssbs", {AArch64::FeatureSSBS}},
3802 {"tme", {AArch64::FeatureTME}},
3803 {"fp8", {AArch64::FeatureFP8}},
3804 {"faminmax", {AArch64::FeatureFAMINMAX}},
3805 {"fp8fma", {AArch64::FeatureFP8FMA}},
3806 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3807 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3808 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3809 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3810 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3811 {"lut", {AArch64::FeatureLUT}},
3812 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3813 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3814 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3815 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3816 {"cpa", {AArch64::FeatureCPA}},
3817 {"tlbiw", {AArch64::FeatureTLBIW}},
3818 {"pops", {AArch64::FeaturePoPS}},
3819 {"cmpbr", {AArch64::FeatureCMPBR}},
3820 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3821 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3822 {"fprcvt", {AArch64::FeatureFPRCVT}},
3823 {"lsfe", {AArch64::FeatureLSFE}},
3824 {"sme2p2", {AArch64::FeatureSME2p2}},
3825 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3826 {"sve2p2", {AArch64::FeatureSVE2p2}},
3827 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3828 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3829 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3830 {"lsui", {AArch64::FeatureLSUI}},
3831 {"occmo", {AArch64::FeatureOCCMO}},
3832 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3833 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3834 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3835 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3837
3838static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3839 if (FBS[AArch64::HasV8_0aOps])
3840 Str += "ARMv8a";
3841 if (FBS[AArch64::HasV8_1aOps])
3842 Str += "ARMv8.1a";
3843 else if (FBS[AArch64::HasV8_2aOps])
3844 Str += "ARMv8.2a";
3845 else if (FBS[AArch64::HasV8_3aOps])
3846 Str += "ARMv8.3a";
3847 else if (FBS[AArch64::HasV8_4aOps])
3848 Str += "ARMv8.4a";
3849 else if (FBS[AArch64::HasV8_5aOps])
3850 Str += "ARMv8.5a";
3851 else if (FBS[AArch64::HasV8_6aOps])
3852 Str += "ARMv8.6a";
3853 else if (FBS[AArch64::HasV8_7aOps])
3854 Str += "ARMv8.7a";
3855 else if (FBS[AArch64::HasV8_8aOps])
3856 Str += "ARMv8.8a";
3857 else if (FBS[AArch64::HasV8_9aOps])
3858 Str += "ARMv8.9a";
3859 else if (FBS[AArch64::HasV9_0aOps])
3860 Str += "ARMv9-a";
3861 else if (FBS[AArch64::HasV9_1aOps])
3862 Str += "ARMv9.1a";
3863 else if (FBS[AArch64::HasV9_2aOps])
3864 Str += "ARMv9.2a";
3865 else if (FBS[AArch64::HasV9_3aOps])
3866 Str += "ARMv9.3a";
3867 else if (FBS[AArch64::HasV9_4aOps])
3868 Str += "ARMv9.4a";
3869 else if (FBS[AArch64::HasV9_5aOps])
3870 Str += "ARMv9.5a";
3871 else if (FBS[AArch64::HasV9_6aOps])
3872 Str += "ARMv9.6a";
3873 else if (FBS[AArch64::HasV8_0rOps])
3874 Str += "ARMv8r";
3875 else {
3876 SmallVector<std::string, 2> ExtMatches;
3877 for (const auto& Ext : ExtensionMap) {
3878 // Use & in case multiple features are enabled
3879 if ((FBS & Ext.Features) != FeatureBitset())
3880 ExtMatches.push_back(Ext.Name);
3881 }
3882 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3883 }
3884}
3885
3886void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3887 SMLoc S) {
3888 const uint16_t Op2 = Encoding & 7;
3889 const uint16_t Cm = (Encoding & 0x78) >> 3;
3890 const uint16_t Cn = (Encoding & 0x780) >> 7;
3891 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3892
3893 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3894
3895 Operands.push_back(
3896 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3897 Operands.push_back(
3898 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3899 Operands.push_back(
3900 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3901 Expr = MCConstantExpr::create(Op2, getContext());
3902 Operands.push_back(
3903 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3904}
3905
3906/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3907/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3908bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3910 if (Name.contains('.'))
3911 return TokError("invalid operand");
3912
3913 Mnemonic = Name;
3914 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3915
3916 const AsmToken &Tok = getTok();
3917 StringRef Op = Tok.getString();
3918 SMLoc S = Tok.getLoc();
3919
3920 if (Mnemonic == "ic") {
3921 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3922 if (!IC)
3923 return TokError("invalid operand for IC instruction");
3924 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3925 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3927 return TokError(Str);
3928 }
3929 createSysAlias(IC->Encoding, Operands, S);
3930 } else if (Mnemonic == "dc") {
3931 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3932 if (!DC)
3933 return TokError("invalid operand for DC instruction");
3934 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3935 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3937 return TokError(Str);
3938 }
3939 createSysAlias(DC->Encoding, Operands, S);
3940 } else if (Mnemonic == "at") {
3941 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3942 if (!AT)
3943 return TokError("invalid operand for AT instruction");
3944 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3945 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3947 return TokError(Str);
3948 }
3949 createSysAlias(AT->Encoding, Operands, S);
3950 } else if (Mnemonic == "tlbi") {
3951 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3952 if (!TLBI)
3953 return TokError("invalid operand for TLBI instruction");
3954 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3955 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3957 return TokError(Str);
3958 }
3959 createSysAlias(TLBI->Encoding, Operands, S);
3960 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3961
3962 if (Op.lower() != "rctx")
3963 return TokError("invalid operand for prediction restriction instruction");
3964
3965 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3966 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3967 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3968
3969 if (Mnemonic == "cosp" && !hasSpecres2)
3970 return TokError("COSP requires: predres2");
3971 if (!hasPredres)
3972 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3973
3974 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3975 : Mnemonic == "dvp" ? 0b101
3976 : Mnemonic == "cosp" ? 0b110
3977 : Mnemonic == "cpp" ? 0b111
3978 : 0;
3979 assert(PRCTX_Op2 &&
3980 "Invalid mnemonic for prediction restriction instruction");
3981 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3982 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3983
3984 createSysAlias(Encoding, Operands, S);
3985 }
3986
3987 Lex(); // Eat operand.
3988
3989 bool ExpectRegister = !Op.contains_insensitive("all");
3990 bool HasRegister = false;
3991
3992 // Check for the optional register operand.
3993 if (parseOptionalToken(AsmToken::Comma)) {
3994 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3995 return TokError("expected register operand");
3996 HasRegister = true;
3997 }
3998
3999 if (ExpectRegister && !HasRegister)
4000 return TokError("specified " + Mnemonic + " op requires a register");
4001 else if (!ExpectRegister && HasRegister)
4002 return TokError("specified " + Mnemonic + " op does not use a register");
4003
4004 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4005 return true;
4006
4007 return false;
4008}
4009
4010/// parseSyspAlias - The TLBIP instructions are simple aliases for
4011/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4012bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4014 if (Name.contains('.'))
4015 return TokError("invalid operand");
4016
4017 Mnemonic = Name;
4018 Operands.push_back(
4019 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4020
4021 const AsmToken &Tok = getTok();
4022 StringRef Op = Tok.getString();
4023 SMLoc S = Tok.getLoc();
4024
4025 if (Mnemonic == "tlbip") {
4026 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4027 if (HasnXSQualifier) {
4028 Op = Op.drop_back(3);
4029 }
4030 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4031 if (!TLBIorig)
4032 return TokError("invalid operand for TLBIP instruction");
4033 const AArch64TLBI::TLBI TLBI(
4034 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4035 TLBIorig->NeedsReg,
4036 HasnXSQualifier
4037 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4038 : TLBIorig->FeaturesRequired);
4039 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4040 std::string Name =
4041 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4042 std::string Str("TLBIP " + Name + " requires: ");
4044 return TokError(Str);
4045 }
4046 createSysAlias(TLBI.Encoding, Operands, S);
4047 }
4048
4049 Lex(); // Eat operand.
4050
4051 if (parseComma())
4052 return true;
4053
4054 if (Tok.isNot(AsmToken::Identifier))
4055 return TokError("expected register identifier");
4056 auto Result = tryParseSyspXzrPair(Operands);
4057 if (Result.isNoMatch())
4058 Result = tryParseGPRSeqPair(Operands);
4059 if (!Result.isSuccess())
4060 return TokError("specified " + Mnemonic +
4061 " op requires a pair of registers");
4062
4063 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4064 return true;
4065
4066 return false;
4067}
4068
4069ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4070 MCAsmParser &Parser = getParser();
4071 const AsmToken &Tok = getTok();
4072
4073 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4074 return TokError("'csync' operand expected");
4075 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4076 // Immediate operand.
4077 const MCExpr *ImmVal;
4078 SMLoc ExprLoc = getLoc();
4079 AsmToken IntTok = Tok;
4080 if (getParser().parseExpression(ImmVal))
4081 return ParseStatus::Failure;
4082 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4083 if (!MCE)
4084 return Error(ExprLoc, "immediate value expected for barrier operand");
4085 int64_t Value = MCE->getValue();
4086 if (Mnemonic == "dsb" && Value > 15) {
4087 // This case is a no match here, but it might be matched by the nXS
4088 // variant. Deliberately not unlex the optional '#' as it is not necessary
4089 // to characterize an integer immediate.
4090 Parser.getLexer().UnLex(IntTok);
4091 return ParseStatus::NoMatch;
4092 }
4093 if (Value < 0 || Value > 15)
4094 return Error(ExprLoc, "barrier operand out of range");
4095 auto DB = AArch64DB::lookupDBByEncoding(Value);
4096 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4097 ExprLoc, getContext(),
4098 false /*hasnXSModifier*/));
4099 return ParseStatus::Success;
4100 }
4101
4102 if (Tok.isNot(AsmToken::Identifier))
4103 return TokError("invalid operand for instruction");
4104
4105 StringRef Operand = Tok.getString();
4106 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4107 auto DB = AArch64DB::lookupDBByName(Operand);
4108 // The only valid named option for ISB is 'sy'
4109 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4110 return TokError("'sy' or #imm operand expected");
4111 // The only valid named option for TSB is 'csync'
4112 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4113 return TokError("'csync' operand expected");
4114 if (!DB && !TSB) {
4115 if (Mnemonic == "dsb") {
4116 // This case is a no match here, but it might be matched by the nXS
4117 // variant.
4118 return ParseStatus::NoMatch;
4119 }
4120 return TokError("invalid barrier option name");
4121 }
4122
4123 Operands.push_back(AArch64Operand::CreateBarrier(
4124 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4125 getContext(), false /*hasnXSModifier*/));
4126 Lex(); // Consume the option
4127
4128 return ParseStatus::Success;
4129}
4130
4132AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4133 const AsmToken &Tok = getTok();
4134
4135 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4136 if (Mnemonic != "dsb")
4137 return ParseStatus::Failure;
4138
4139 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4140 // Immediate operand.
4141 const MCExpr *ImmVal;
4142 SMLoc ExprLoc = getLoc();
4143 if (getParser().parseExpression(ImmVal))
4144 return ParseStatus::Failure;
4145 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4146 if (!MCE)
4147 return Error(ExprLoc, "immediate value expected for barrier operand");
4148 int64_t Value = MCE->getValue();
4149 // v8.7-A DSB in the nXS variant accepts only the following immediate
4150 // values: 16, 20, 24, 28.
4151 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4152 return Error(ExprLoc, "barrier operand out of range");
4153 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4154 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4155 ExprLoc, getContext(),
4156 true /*hasnXSModifier*/));
4157 return ParseStatus::Success;
4158 }
4159
4160 if (Tok.isNot(AsmToken::Identifier))
4161 return TokError("invalid operand for instruction");
4162
4163 StringRef Operand = Tok.getString();
4164 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4165
4166 if (!DB)
4167 return TokError("invalid barrier option name");
4168
4169 Operands.push_back(
4170 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4171 getContext(), true /*hasnXSModifier*/));
4172 Lex(); // Consume the option
4173
4174 return ParseStatus::Success;
4175}
4176
4177ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4178 const AsmToken &Tok = getTok();
4179
4180 if (Tok.isNot(AsmToken::Identifier))
4181 return ParseStatus::NoMatch;
4182
4183 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4184 return ParseStatus::NoMatch;
4185
4186 int MRSReg, MSRReg;
4187 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4188 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4189 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4190 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4191 } else
4192 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4193
4194 unsigned PStateImm = -1;
4195 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4196 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4197 PStateImm = PState15->Encoding;
4198 if (!PState15) {
4199 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4200 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4201 PStateImm = PState1->Encoding;
4202 }
4203
4204 Operands.push_back(
4205 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4206 PStateImm, getContext()));
4207 Lex(); // Eat identifier
4208
4209 return ParseStatus::Success;
4210}
4211
4213AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4214 SMLoc S = getLoc();
4215 const AsmToken &Tok = getTok();
4216 if (Tok.isNot(AsmToken::Identifier))
4217 return TokError("invalid operand for instruction");
4218
4220 if (!PH)
4221 return TokError("invalid operand for instruction");
4222
4223 Operands.push_back(AArch64Operand::CreatePHintInst(
4224 PH->Encoding, Tok.getString(), S, getContext()));
4225 Lex(); // Eat identifier token.
4226 return ParseStatus::Success;
4227}
4228
4229/// tryParseNeonVectorRegister - Parse a vector register operand.
4230bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4231 if (getTok().isNot(AsmToken::Identifier))
4232 return true;
4233
4234 SMLoc S = getLoc();
4235 // Check for a vector register specifier first.
4238 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4239 if (!Res.isSuccess())
4240 return true;
4241
4242 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4243 if (!KindRes)
4244 return true;
4245
4246 unsigned ElementWidth = KindRes->second;
4247 Operands.push_back(
4248 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4249 S, getLoc(), getContext()));
4250
4251 // If there was an explicit qualifier, that goes on as a literal text
4252 // operand.
4253 if (!Kind.empty())
4254 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4255
4256 return tryParseVectorIndex(Operands).isFailure();
4257}
4258
4259ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4260 SMLoc SIdx = getLoc();
4261 if (parseOptionalToken(AsmToken::LBrac)) {
4262 const MCExpr *ImmVal;
4263 if (getParser().parseExpression(ImmVal))
4264 return ParseStatus::NoMatch;
4265 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4266 if (!MCE)
4267 return TokError("immediate value expected for vector index");
4268
4269 SMLoc E = getLoc();
4270
4271 if (parseToken(AsmToken::RBrac, "']' expected"))
4272 return ParseStatus::Failure;
4273
4274 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4275 E, getContext()));
4276 return ParseStatus::Success;
4277 }
4278
4279 return ParseStatus::NoMatch;
4280}
4281
4282// tryParseVectorRegister - Try to parse a vector register name with
4283// optional kind specifier. If it is a register specifier, eat the token
4284// and return it.
4285ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4286 StringRef &Kind,
4287 RegKind MatchKind) {
4288 const AsmToken &Tok = getTok();
4289
4290 if (Tok.isNot(AsmToken::Identifier))
4291 return ParseStatus::NoMatch;
4292
4293 StringRef Name = Tok.getString();
4294 // If there is a kind specifier, it's separated from the register name by
4295 // a '.'.
4296 size_t Start = 0, Next = Name.find('.');
4297 StringRef Head = Name.slice(Start, Next);
4298 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4299
4300 if (RegNum) {
4301 if (Next != StringRef::npos) {
4302 Kind = Name.substr(Next);
4303 if (!isValidVectorKind(Kind, MatchKind))
4304 return TokError("invalid vector kind qualifier");
4305 }
4306 Lex(); // Eat the register token.
4307
4308 Reg = RegNum;
4309 return ParseStatus::Success;
4310 }
4311
4312 return ParseStatus::NoMatch;
4313}
4314
4315ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4318 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4319 if (!Status.isSuccess())
4320 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4321 return Status;
4322}
4323
4324/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4325template <RegKind RK>
4327AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4328 // Check for a SVE predicate register specifier first.
4329 const SMLoc S = getLoc();
4331 MCRegister RegNum;
4332 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4333 if (!Res.isSuccess())
4334 return Res;
4335
4336 const auto &KindRes = parseVectorKind(Kind, RK);
4337 if (!KindRes)
4338 return ParseStatus::NoMatch;
4339
4340 unsigned ElementWidth = KindRes->second;
4341 Operands.push_back(AArch64Operand::CreateVectorReg(
4342 RegNum, RK, ElementWidth, S,
4343 getLoc(), getContext()));
4344
4345 if (getLexer().is(AsmToken::LBrac)) {
4346 if (RK == RegKind::SVEPredicateAsCounter) {
4347 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4348 if (ResIndex.isSuccess())
4349 return ParseStatus::Success;
4350 } else {
4351 // Indexed predicate, there's no comma so try parse the next operand
4352 // immediately.
4353 if (parseOperand(Operands, false, false))
4354 return ParseStatus::NoMatch;
4355 }
4356 }
4357
4358 // Not all predicates are followed by a '/m' or '/z'.
4359 if (getTok().isNot(AsmToken::Slash))
4360 return ParseStatus::Success;
4361
4362 // But when they do they shouldn't have an element type suffix.
4363 if (!Kind.empty())
4364 return Error(S, "not expecting size suffix");
4365
4366 // Add a literal slash as operand
4367 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4368
4369 Lex(); // Eat the slash.
4370
4371 // Zeroing or merging?
4372 auto Pred = getTok().getString().lower();
4373 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4374 return Error(getLoc(), "expecting 'z' predication");
4375
4376 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4377 return Error(getLoc(), "expecting 'm' or 'z' predication");
4378
4379 // Add zero/merge token.
4380 const char *ZM = Pred == "z" ? "z" : "m";
4381 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4382
4383 Lex(); // Eat zero/merge token.
4384 return ParseStatus::Success;
4385}
4386
4387/// parseRegister - Parse a register operand.
4388bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4389 // Try for a Neon vector register.
4390 if (!tryParseNeonVectorRegister(Operands))
4391 return false;
4392
4393 if (tryParseZTOperand(Operands).isSuccess())
4394 return false;
4395
4396 // Otherwise try for a scalar register.
4397 if (tryParseGPROperand<false>(Operands).isSuccess())
4398 return false;
4399
4400 return true;
4401}
4402
4403bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4404 bool HasELFModifier = false;
4406
4407 if (parseOptionalToken(AsmToken::Colon)) {
4408 HasELFModifier = true;
4409
4410 if (getTok().isNot(AsmToken::Identifier))
4411 return TokError("expect relocation specifier in operand after ':'");
4412
4413 std::string LowerCase = getTok().getIdentifier().lower();
4414 RefKind =
4417 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4418 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4419 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4420 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4421 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4422 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4423 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4424 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4425 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4426 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4427 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4428 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4429 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4430 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4431 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4432 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4433 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4434 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4435 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4436 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4437 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4438 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4439 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4440 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4441 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4442 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4443 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4444 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4445 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4446 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4447 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4448 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4449 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4450 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4451 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4452 .Case("tlsdesc_auth_lo12", AArch64MCExpr::VK_TLSDESC_AUTH_LO12)
4454 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4455 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4457 .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
4459 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4460 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4461 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4464 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4465 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4467
4468 if (RefKind == AArch64MCExpr::VK_INVALID)
4469 return TokError("expect relocation specifier in operand after ':'");
4470
4471 Lex(); // Eat identifier
4472
4473 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4474 return true;
4475 }
4476
4477 if (getParser().parseExpression(ImmVal))
4478 return true;
4479
4480 if (HasELFModifier)
4481 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4482
4483 return false;
4484}
4485
4486ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4487 if (getTok().isNot(AsmToken::LCurly))
4488 return ParseStatus::NoMatch;
4489
4490 auto ParseMatrixTile = [this](unsigned &Reg,
4491 unsigned &ElementWidth) -> ParseStatus {
4492 StringRef Name = getTok().getString();
4493 size_t DotPosition = Name.find('.');
4494 if (DotPosition == StringRef::npos)
4495 return ParseStatus::NoMatch;
4496
4497 unsigned RegNum = matchMatrixTileListRegName(Name);
4498 if (!RegNum)
4499 return ParseStatus::NoMatch;
4500
4501 StringRef Tail = Name.drop_front(DotPosition);
4502 const std::optional<std::pair<int, int>> &KindRes =
4503 parseVectorKind(Tail, RegKind::Matrix);
4504 if (!KindRes)
4505 return TokError(
4506 "Expected the register to be followed by element width suffix");
4507 ElementWidth = KindRes->second;
4508 Reg = RegNum;
4509 Lex(); // Eat the register.
4510 return ParseStatus::Success;
4511 };
4512
4513 SMLoc S = getLoc();
4514 auto LCurly = getTok();
4515 Lex(); // Eat left bracket token.
4516
4517 // Empty matrix list
4518 if (parseOptionalToken(AsmToken::RCurly)) {
4519 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4520 /*RegMask=*/0, S, getLoc(), getContext()));
4521 return ParseStatus::Success;
4522 }
4523
4524 // Try parse {za} alias early
4525 if (getTok().getString().equals_insensitive("za")) {
4526 Lex(); // Eat 'za'
4527
4528 if (parseToken(AsmToken::RCurly, "'}' expected"))
4529 return ParseStatus::Failure;
4530
4531 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4532 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4533 return ParseStatus::Success;
4534 }
4535
4536 SMLoc TileLoc = getLoc();
4537
4538 unsigned FirstReg, ElementWidth;
4539 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4540 if (!ParseRes.isSuccess()) {
4541 getLexer().UnLex(LCurly);
4542 return ParseRes;
4543 }
4544
4545 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4546
4547 unsigned PrevReg = FirstReg;
4548
4550 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4551
4552 SmallSet<unsigned, 8> SeenRegs;
4553 SeenRegs.insert(FirstReg);
4554
4555 while (parseOptionalToken(AsmToken::Comma)) {
4556 TileLoc = getLoc();
4557 unsigned Reg, NextElementWidth;
4558 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4559 if (!ParseRes.isSuccess())
4560 return ParseRes;
4561
4562 // Element size must match on all regs in the list.
4563 if (ElementWidth != NextElementWidth)
4564 return Error(TileLoc, "mismatched register size suffix");
4565
4566 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4567 Warning(TileLoc, "tile list not in ascending order");
4568
4569 if (SeenRegs.contains(Reg))
4570 Warning(TileLoc, "duplicate tile in list");
4571 else {
4572 SeenRegs.insert(Reg);
4573 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4574 }
4575
4576 PrevReg = Reg;
4577 }
4578
4579 if (parseToken(AsmToken::RCurly, "'}' expected"))
4580 return ParseStatus::Failure;
4581
4582 unsigned RegMask = 0;
4583 for (auto Reg : DRegs)
4584 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4585 RI->getEncodingValue(AArch64::ZAD0));
4586 Operands.push_back(
4587 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4588
4589 return ParseStatus::Success;
4590}
4591
4592template <RegKind VectorKind>
4593ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4594 bool ExpectMatch) {
4595 MCAsmParser &Parser = getParser();
4596 if (!getTok().is(AsmToken::LCurly))
4597 return ParseStatus::NoMatch;
4598
4599 // Wrapper around parse function
4600 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4601 bool NoMatchIsError) -> ParseStatus {
4602 auto RegTok = getTok();
4603 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4604 if (ParseRes.isSuccess()) {
4605 if (parseVectorKind(Kind, VectorKind))
4606 return ParseRes;
4607 llvm_unreachable("Expected a valid vector kind");
4608 }
4609
4610 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4611 RegTok.getString().equals_insensitive("zt0"))
4612 return ParseStatus::NoMatch;
4613
4614 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4615 (ParseRes.isNoMatch() && NoMatchIsError &&
4616 !RegTok.getString().starts_with_insensitive("za")))
4617 return Error(Loc, "vector register expected");
4618
4619 return ParseStatus::NoMatch;
4620 };
4621
4622 int NumRegs = getNumRegsForRegKind(VectorKind);
4623 SMLoc S = getLoc();
4624 auto LCurly = getTok();
4625 Lex(); // Eat left bracket token.
4626
4628 MCRegister FirstReg;
4629 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4630
4631 // Put back the original left bracket if there was no match, so that
4632 // different types of list-operands can be matched (e.g. SVE, Neon).
4633 if (ParseRes.isNoMatch())
4634 Parser.getLexer().UnLex(LCurly);
4635
4636 if (!ParseRes.isSuccess())
4637 return ParseRes;
4638
4639 int64_t PrevReg = FirstReg;
4640 unsigned Count = 1;
4641
4642 int Stride = 1;
4643 if (parseOptionalToken(AsmToken::Minus)) {
4644 SMLoc Loc = getLoc();
4645 StringRef NextKind;
4646
4648 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4649 if (!ParseRes.isSuccess())
4650 return ParseRes;
4651
4652 // Any Kind suffices must match on all regs in the list.
4653 if (Kind != NextKind)
4654 return Error(Loc, "mismatched register size suffix");
4655
4656 unsigned Space =
4657 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4658
4659 if (Space == 0 || Space > 3)
4660 return Error(Loc, "invalid number of vectors");
4661
4662 Count += Space;
4663 }
4664 else {
4665 bool HasCalculatedStride = false;
4666 while (parseOptionalToken(AsmToken::Comma)) {
4667 SMLoc Loc = getLoc();
4668 StringRef NextKind;
4670 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4671 if (!ParseRes.isSuccess())
4672 return ParseRes;
4673
4674 // Any Kind suffices must match on all regs in the list.
4675 if (Kind != NextKind)
4676 return Error(Loc, "mismatched register size suffix");
4677
4678 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4679 unsigned PrevRegVal =
4680 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4681 if (!HasCalculatedStride) {
4682 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4683 : (RegVal + NumRegs - PrevRegVal);
4684 HasCalculatedStride = true;
4685 }
4686
4687 // Register must be incremental (with a wraparound at last register).
4688 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4689 return Error(Loc, "registers must have the same sequential stride");
4690
4691 PrevReg = Reg;
4692 ++Count;
4693 }
4694 }
4695
4696 if (parseToken(AsmToken::RCurly, "'}' expected"))
4697 return ParseStatus::Failure;
4698
4699 if (Count > 4)
4700 return Error(S, "invalid number of vectors");
4701
4702 unsigned NumElements = 0;
4703 unsigned ElementWidth = 0;
4704 if (!Kind.empty()) {
4705 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4706 std::tie(NumElements, ElementWidth) = *VK;
4707 }
4708
4709 Operands.push_back(AArch64Operand::CreateVectorList(
4710 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4711 getLoc(), getContext()));
4712
4713 return ParseStatus::Success;
4714}
4715
4716/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4717bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4718 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4719 if (!ParseRes.isSuccess())
4720 return true;
4721
4722 return tryParseVectorIndex(Operands).isFailure();
4723}
4724
4725ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4726 SMLoc StartLoc = getLoc();
4727
4728 MCRegister RegNum;
4729 ParseStatus Res = tryParseScalarRegister(RegNum);
4730 if (!Res.isSuccess())
4731 return Res;
4732
4733 if (!parseOptionalToken(AsmToken::Comma)) {
4734 Operands.push_back(AArch64Operand::CreateReg(
4735 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4736 return ParseStatus::Success;
4737 }
4738
4739 parseOptionalToken(AsmToken::Hash);
4740
4741 if (getTok().isNot(AsmToken::Integer))
4742 return Error(getLoc(), "index must be absent or #0");
4743
4744 const MCExpr *ImmVal;
4745 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4746 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4747 return Error(getLoc(), "index must be absent or #0");
4748
4749 Operands.push_back(AArch64Operand::CreateReg(
4750 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4751 return ParseStatus::Success;
4752}
4753
4754ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4755 SMLoc StartLoc = getLoc();
4756 const AsmToken &Tok = getTok();
4757 std::string Name = Tok.getString().lower();
4758
4759 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4760
4761 if (RegNum == 0)
4762 return ParseStatus::NoMatch;
4763
4764 Operands.push_back(AArch64Operand::CreateReg(
4765 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4766 Lex(); // Eat register.
4767
4768 // Check if register is followed by an index
4769 if (parseOptionalToken(AsmToken::LBrac)) {
4770 Operands.push_back(
4771 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4772 const MCExpr *ImmVal;
4773 if (getParser().parseExpression(ImmVal))
4774 return ParseStatus::NoMatch;
4775 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4776 if (!MCE)
4777 return TokError("immediate value expected for vector index");
4778 Operands.push_back(AArch64Operand::CreateImm(
4779 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4780 getLoc(), getContext()));
4781 if (parseOptionalToken(AsmToken::Comma))
4782 if (parseOptionalMulOperand(Operands))
4783 return ParseStatus::Failure;
4784 if (parseToken(AsmToken::RBrac, "']' expected"))
4785 return ParseStatus::Failure;
4786 Operands.push_back(
4787 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4788 }
4789 return ParseStatus::Success;
4790}
4791
4792template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4793ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4794 SMLoc StartLoc = getLoc();
4795
4796 MCRegister RegNum;
4797 ParseStatus Res = tryParseScalarRegister(RegNum);
4798 if (!Res.isSuccess())
4799 return Res;
4800
4801 // No shift/extend is the default.
4802 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4803 Operands.push_back(AArch64Operand::CreateReg(
4804 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4805 return ParseStatus::Success;
4806 }
4807
4808 // Eat the comma
4809 Lex();
4810
4811 // Match the shift
4813 Res = tryParseOptionalShiftExtend(ExtOpnd);
4814 if (!Res.isSuccess())
4815 return Res;
4816
4817 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4818 Operands.push_back(AArch64Operand::CreateReg(
4819 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4820 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4821 Ext->hasShiftExtendAmount()));
4822
4823 return ParseStatus::Success;
4824}
4825
4826bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4827 MCAsmParser &Parser = getParser();
4828
4829 // Some SVE instructions have a decoration after the immediate, i.e.
4830 // "mul vl". We parse them here and add tokens, which must be present in the
4831 // asm string in the tablegen instruction.
4832 bool NextIsVL =
4833 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4834 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4835 if (!getTok().getString().equals_insensitive("mul") ||
4836 !(NextIsVL || NextIsHash))
4837 return true;
4838
4839 Operands.push_back(
4840 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4841 Lex(); // Eat the "mul"
4842
4843 if (NextIsVL) {
4844 Operands.push_back(
4845 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4846 Lex(); // Eat the "vl"
4847 return false;
4848 }
4849
4850 if (NextIsHash) {
4851 Lex(); // Eat the #
4852 SMLoc S = getLoc();
4853
4854 // Parse immediate operand.
4855 const MCExpr *ImmVal;
4856 if (!Parser.parseExpression(ImmVal))
4857 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4858 Operands.push_back(AArch64Operand::CreateImm(
4859 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4860 getContext()));
4861 return false;
4862 }
4863 }
4864
4865 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4866}
4867
4868bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4869 StringRef &VecGroup) {
4870 MCAsmParser &Parser = getParser();
4871 auto Tok = Parser.getTok();
4872 if (Tok.isNot(AsmToken::Identifier))
4873 return true;
4874
4876 .Case("vgx2", "vgx2")
4877 .Case("vgx4", "vgx4")
4878 .Default("");
4879
4880 if (VG.empty())
4881 return true;
4882
4883 VecGroup = VG;
4884 Parser.Lex(); // Eat vgx[2|4]
4885 return false;
4886}
4887
4888bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4889 auto Tok = getTok();
4890 if (Tok.isNot(AsmToken::Identifier))
4891 return true;
4892
4893 auto Keyword = Tok.getString();
4895 .Case("sm", "sm")
4896 .Case("za", "za")
4897 .Default(Keyword);
4898 Operands.push_back(
4899 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4900
4901 Lex();
4902 return false;
4903}
4904
4905/// parseOperand - Parse a arm instruction operand. For now this parses the
4906/// operand regardless of the mnemonic.
4907bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4908 bool invertCondCode) {
4909 MCAsmParser &Parser = getParser();
4910
4911 ParseStatus ResTy =
4912 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4913
4914 // Check if the current operand has a custom associated parser, if so, try to
4915 // custom parse the operand, or fallback to the general approach.
4916 if (ResTy.isSuccess())
4917 return false;
4918 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4919 // there was a match, but an error occurred, in which case, just return that
4920 // the operand parsing failed.
4921 if (ResTy.isFailure())
4922 return true;
4923
4924 // Nothing custom, so do general case parsing.
4925 SMLoc S, E;
4926 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4927 if (parseOptionalToken(AsmToken::Comma)) {
4928 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4929 if (!Res.isNoMatch())
4930 return Res.isFailure();
4931 getLexer().UnLex(SavedTok);
4932 }
4933 return false;
4934 };
4935 switch (getLexer().getKind()) {
4936 default: {
4937 SMLoc S = getLoc();
4938 const MCExpr *Expr;
4939 if (parseSymbolicImmVal(Expr))
4940 return Error(S, "invalid operand");
4941
4942 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4943 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4944 return parseOptionalShiftExtend(getTok());
4945 }
4946 case AsmToken::LBrac: {
4947 Operands.push_back(
4948 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4949 Lex(); // Eat '['
4950
4951 // There's no comma after a '[', so we can parse the next operand
4952 // immediately.
4953 return parseOperand(Operands, false, false);
4954 }
4955 case AsmToken::LCurly: {
4956 if (!parseNeonVectorList(Operands))
4957 return false;
4958
4959 Operands.push_back(
4960 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4961 Lex(); // Eat '{'
4962
4963 // There's no comma after a '{', so we can parse the next operand
4964 // immediately.
4965 return parseOperand(Operands, false, false);
4966 }
4967 case AsmToken::Identifier: {
4968 // See if this is a "VG" decoration used by SME instructions.
4969 StringRef VecGroup;
4970 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4971 Operands.push_back(
4972 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4973 return false;
4974 }
4975 // If we're expecting a Condition Code operand, then just parse that.
4976 if (isCondCode)
4977 return parseCondCode(Operands, invertCondCode);
4978
4979 // If it's a register name, parse it.
4980 if (!parseRegister(Operands)) {
4981 // Parse an optional shift/extend modifier.
4982 AsmToken SavedTok = getTok();
4983 if (parseOptionalToken(AsmToken::Comma)) {
4984 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4985 // such cases and don't report an error when <label> happens to match a
4986 // shift/extend modifier.
4987 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4988 /*ParseForAllFeatures=*/true);
4989 if (!Res.isNoMatch())
4990 return Res.isFailure();
4991 Res = tryParseOptionalShiftExtend(Operands);
4992 if (!Res.isNoMatch())
4993 return Res.isFailure();
4994 getLexer().UnLex(SavedTok);
4995 }
4996 return false;
4997 }
4998
4999 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5000 // by SVE instructions.
5001 if (!parseOptionalMulOperand(Operands))
5002 return false;
5003
5004 // If this is a two-word mnemonic, parse its special keyword
5005 // operand as an identifier.
5006 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5007 Mnemonic == "gcsb")
5008 return parseKeywordOperand(Operands);
5009
5010 // This was not a register so parse other operands that start with an
5011 // identifier (like labels) as expressions and create them as immediates.
5012 const MCExpr *IdVal;
5013 S = getLoc();
5014 if (getParser().parseExpression(IdVal))
5015 return true;
5016 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5017 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5018 return false;
5019 }
5020 case AsmToken::Integer:
5021 case AsmToken::Real:
5022 case AsmToken::Hash: {
5023 // #42 -> immediate.
5024 S = getLoc();
5025
5026 parseOptionalToken(AsmToken::Hash);
5027
5028 // Parse a negative sign
5029 bool isNegative = false;
5030 if (getTok().is(AsmToken::Minus)) {
5031 isNegative = true;
5032 // We need to consume this token only when we have a Real, otherwise
5033 // we let parseSymbolicImmVal take care of it
5034 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5035 Lex();
5036 }
5037
5038 // The only Real that should come through here is a literal #0.0 for
5039 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5040 // so convert the value.
5041 const AsmToken &Tok = getTok();
5042 if (Tok.is(AsmToken::Real)) {
5043 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5044 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5045 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5046 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5047 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5048 return TokError("unexpected floating point literal");
5049 else if (IntVal != 0 || isNegative)
5050 return TokError("expected floating-point constant #0.0");
5051 Lex(); // Eat the token.
5052
5053 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5054 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5055 return false;
5056 }
5057
5058 const MCExpr *ImmVal;
5059 if (parseSymbolicImmVal(ImmVal))
5060 return true;
5061
5062 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5063 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5064
5065 // Parse an optional shift/extend modifier.
5066 return parseOptionalShiftExtend(Tok);
5067 }
5068 case AsmToken::Equal: {
5069 SMLoc Loc = getLoc();
5070 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5071 return TokError("unexpected token in operand");
5072 Lex(); // Eat '='
5073 const MCExpr *SubExprVal;
5074 if (getParser().parseExpression(SubExprVal))
5075 return true;
5076
5077 if (Operands.size() < 2 ||
5078 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5079 return Error(Loc, "Only valid when first operand is register");
5080
5081 bool IsXReg =
5082 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5083 Operands[1]->getReg());
5084
5085 MCContext& Ctx = getContext();
5086 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5087 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5088 if (isa<MCConstantExpr>(SubExprVal)) {
5089 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5090 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5091 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5092 ShiftAmt += 16;
5093 Imm >>= 16;
5094 }
5095 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5096 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5097 Operands.push_back(AArch64Operand::CreateImm(
5098 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5099 if (ShiftAmt)
5100 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5101 ShiftAmt, true, S, E, Ctx));
5102 return false;
5103 }
5104 APInt Simm = APInt(64, Imm << ShiftAmt);
5105 // check if the immediate is an unsigned or signed 32-bit int for W regs
5106 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5107 return Error(Loc, "Immediate too large for register");
5108 }
5109 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5110 const MCExpr *CPLoc =
5111 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5112 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5113 return false;
5114 }
5115 }
5116}
5117
5118bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5119 const MCExpr *Expr = nullptr;
5120 SMLoc L = getLoc();
5121 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5122 return true;
5123 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5124 if (check(!Value, L, "expected constant expression"))
5125 return true;
5126 Out = Value->getValue();
5127 return false;
5128}
5129
5130bool AArch64AsmParser::parseComma() {
5131 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5132 return true;
5133 // Eat the comma
5134 Lex();
5135 return false;
5136}
5137
5138bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5139 unsigned First, unsigned Last) {
5141 SMLoc Start, End;
5142 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5143 return true;
5144
5145 // Special handling for FP and LR; they aren't linearly after x28 in
5146 // the registers enum.
5147 unsigned RangeEnd = Last;
5148 if (Base == AArch64::X0) {
5149 if (Last == AArch64::FP) {
5150 RangeEnd = AArch64::X28;
5151 if (Reg == AArch64::FP) {
5152 Out = 29;
5153 return false;
5154 }
5155 }
5156 if (Last == AArch64::LR) {
5157 RangeEnd = AArch64::X28;
5158 if (Reg == AArch64::FP) {
5159 Out = 29;
5160 return false;
5161 } else if (Reg == AArch64::LR) {
5162 Out = 30;
5163 return false;
5164 }
5165 }
5166 }
5167
5168 if (check(Reg < First || Reg > RangeEnd, Start,
5169 Twine("expected register in range ") +
5172 return true;
5173 Out = Reg - Base;
5174 return false;
5175}
5176
5177bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5178 const MCParsedAsmOperand &Op2) const {
5179 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5180 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5181
5182 if (AOp1.isVectorList() && AOp2.isVectorList())
5183 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5184 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5185 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5186
5187 if (!AOp1.isReg() || !AOp2.isReg())
5188 return false;
5189
5190 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5191 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5192 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5193
5194 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5195 "Testing equality of non-scalar registers not supported");
5196
5197 // Check if a registers match their sub/super register classes.
5198 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5199 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5200 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5201 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5202 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5203 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5204 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5205 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5206
5207 return false;
5208}
5209
5210/// Parse an AArch64 instruction mnemonic followed by its operands.
5211bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5212 StringRef Name, SMLoc NameLoc,
5215 .Case("beq", "b.eq")
5216 .Case("bne", "b.ne")
5217 .Case("bhs", "b.hs")
5218 .Case("bcs", "b.cs")
5219 .Case("blo", "b.lo")
5220 .Case("bcc", "b.cc")
5221 .Case("bmi", "b.mi")
5222 .Case("bpl", "b.pl")
5223 .Case("bvs", "b.vs")
5224 .Case("bvc", "b.vc")
5225 .Case("bhi", "b.hi")
5226 .Case("bls", "b.ls")
5227 .Case("bge", "b.ge")
5228 .Case("blt", "b.lt")
5229 .Case("bgt", "b.gt")
5230 .Case("ble", "b.le")
5231 .Case("bal", "b.al")
5232 .Case("bnv", "b.nv")
5233 .Default(Name);
5234
5235 // First check for the AArch64-specific .req directive.
5236 if (getTok().is(AsmToken::Identifier) &&
5237 getTok().getIdentifier().lower() == ".req") {
5238 parseDirectiveReq(Name, NameLoc);
5239 // We always return 'error' for this, as we're done with this
5240 // statement and don't need to match the 'instruction."
5241 return true;
5242 }
5243
5244 // Create the leading tokens for the mnemonic, split by '.' characters.
5245 size_t Start = 0, Next = Name.find('.');
5246 StringRef Head = Name.slice(Start, Next);
5247
5248 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5249 // the SYS instruction.
5250 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5251 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5252 return parseSysAlias(Head, NameLoc, Operands);
5253
5254 // TLBIP instructions are aliases for the SYSP instruction.
5255 if (Head == "tlbip")
5256 return parseSyspAlias(Head, NameLoc, Operands);
5257
5258 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5259 Mnemonic = Head;
5260
5261 // Handle condition codes for a branch mnemonic
5262 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5263 Start = Next;
5264 Next = Name.find('.', Start + 1);
5265 Head = Name.slice(Start + 1, Next);
5266
5267 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5268 (Head.data() - Name.data()));
5269 std::string Suggestion;
5270 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5271 if (CC == AArch64CC::Invalid) {
5272 std::string Msg = "invalid condition code";
5273 if (!Suggestion.empty())
5274 Msg += ", did you mean " + Suggestion + "?";
5275 return Error(SuffixLoc, Msg);
5276 }
5277 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5278 /*IsSuffix=*/true));
5279 Operands.push_back(
5280 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5281 }
5282
5283 // Add the remaining tokens in the mnemonic.
5284 while (Next != StringRef::npos) {
5285 Start = Next;
5286 Next = Name.find('.', Start + 1);
5287 Head = Name.slice(Start, Next);
5288 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5289 (Head.data() - Name.data()) + 1);
5290 Operands.push_back(AArch64Operand::CreateToken(
5291 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5292 }
5293
5294 // Conditional compare instructions have a Condition Code operand, which needs
5295 // to be parsed and an immediate operand created.
5296 bool condCodeFourthOperand =
5297 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5298 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5299 Head == "csinc" || Head == "csinv" || Head == "csneg");
5300
5301 // These instructions are aliases to some of the conditional select
5302 // instructions. However, the condition code is inverted in the aliased
5303 // instruction.
5304 //
5305 // FIXME: Is this the correct way to handle these? Or should the parser
5306 // generate the aliased instructions directly?
5307 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5308 bool condCodeThirdOperand =
5309 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5310
5311 // Read the remaining operands.
5312 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5313
5314 unsigned N = 1;
5315 do {
5316 // Parse and remember the operand.
5317 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5318 (N == 3 && condCodeThirdOperand) ||
5319 (N == 2 && condCodeSecondOperand),
5320 condCodeSecondOperand || condCodeThirdOperand)) {
5321 return true;
5322 }
5323
5324 // After successfully parsing some operands there are three special cases
5325 // to consider (i.e. notional operands not separated by commas). Two are
5326 // due to memory specifiers:
5327 // + An RBrac will end an address for load/store/prefetch
5328 // + An '!' will indicate a pre-indexed operation.
5329 //
5330 // And a further case is '}', which ends a group of tokens specifying the
5331 // SME accumulator array 'ZA' or tile vector, i.e.
5332 //
5333 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5334 //
5335 // It's someone else's responsibility to make sure these tokens are sane
5336 // in the given context!
5337
5338 if (parseOptionalToken(AsmToken::RBrac))
5339 Operands.push_back(
5340 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5341 if (parseOptionalToken(AsmToken::Exclaim))
5342 Operands.push_back(
5343 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5344 if (parseOptionalToken(AsmToken::RCurly))
5345 Operands.push_back(
5346 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5347
5348 ++N;
5349 } while (parseOptionalToken(AsmToken::Comma));
5350 }
5351
5352 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5353 return true;
5354
5355 return false;
5356}
5357
5358static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5359 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5360 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5361 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5362 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5363 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5364 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5365 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5366}
5367
5368// FIXME: This entire function is a giant hack to provide us with decent
5369// operand range validation/diagnostics until TableGen/MC can be extended
5370// to support autogeneration of this kind of validation.
5371bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5373 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5374 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5375
5376 // A prefix only applies to the instruction following it. Here we extract
5377 // prefix information for the next instruction before validating the current
5378 // one so that in the case of failure we don't erronously continue using the
5379 // current prefix.
5380 PrefixInfo Prefix = NextPrefix;
5381 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5382
5383 // Before validating the instruction in isolation we run through the rules
5384 // applicable when it follows a prefix instruction.
5385 // NOTE: brk & hlt can be prefixed but require no additional validation.
5386 if (Prefix.isActive() &&
5387 (Inst.getOpcode() != AArch64::BRK) &&
5388 (Inst.getOpcode() != AArch64::HLT)) {
5389
5390 // Prefixed intructions must have a destructive operand.
5393 return Error(IDLoc, "instruction is unpredictable when following a"
5394 " movprfx, suggest replacing movprfx with mov");
5395
5396 // Destination operands must match.
5397 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5398 return Error(Loc[0], "instruction is unpredictable when following a"
5399 " movprfx writing to a different destination");
5400
5401 // Destination operand must not be used in any other location.
5402 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5403 if (Inst.getOperand(i).isReg() &&
5404 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5405 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5406 return Error(Loc[0], "instruction is unpredictable when following a"
5407 " movprfx and destination also used as non-destructive"
5408 " source");
5409 }
5410
5411 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5412 if (Prefix.isPredicated()) {
5413 int PgIdx = -1;
5414
5415 // Find the instructions general predicate.
5416 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5417 if (Inst.getOperand(i).isReg() &&
5418 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5419 PgIdx = i;
5420 break;
5421 }
5422
5423 // Instruction must be predicated if the movprfx is predicated.
5424 if (PgIdx == -1 ||
5426 return Error(IDLoc, "instruction is unpredictable when following a"
5427 " predicated movprfx, suggest using unpredicated movprfx");
5428
5429 // Instruction must use same general predicate as the movprfx.
5430 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5431 return Error(IDLoc, "instruction is unpredictable when following a"
5432 " predicated movprfx using a different general predicate");
5433
5434 // Instruction element type must match the movprfx.
5435 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5436 return Error(IDLoc, "instruction is unpredictable when following a"
5437 " predicated movprfx with a different element size");
5438 }
5439 }
5440
5441 // On ARM64EC, only valid registers may be used. Warn against using
5442 // explicitly disallowed registers.
5443 if (IsWindowsArm64EC) {
5444 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5445 if (Inst.getOperand(i).isReg()) {
5446 MCRegister Reg = Inst.getOperand(i).getReg();
5447 // At this point, vector registers are matched to their
5448 // appropriately sized alias.
5449 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5450 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5451 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5452 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5453 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5454 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5455 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5456 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5457 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5458 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5459 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5460 " is disallowed on ARM64EC.");
5461 }
5462 }
5463 }
5464 }
5465
5466 // Check for indexed addressing modes w/ the base register being the
5467 // same as a destination/source register or pair load where
5468 // the Rt == Rt2. All of those are undefined behaviour.
5469 switch (Inst.getOpcode()) {
5470 case AArch64::LDPSWpre:
5471 case AArch64::LDPWpost:
5472 case AArch64::LDPWpre:
5473 case AArch64::LDPXpost:
5474 case AArch64::LDPXpre: {
5475 MCRegister Rt = Inst.getOperand(1).getReg();
5476 MCRegister Rt2 = Inst.getOperand(2).getReg();
5477 MCRegister Rn = Inst.getOperand(3).getReg();
5478 if (RI->isSubRegisterEq(Rn, Rt))
5479 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5480 "is also a destination");
5481 if (RI->isSubRegisterEq(Rn, Rt2))
5482 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5483 "is also a destination");
5484 [[fallthrough]];
5485 }
5486 case AArch64::LDR_ZA:
5487 case AArch64::STR_ZA: {
5488 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5489 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5490 return Error(Loc[1],
5491 "unpredictable instruction, immediate and offset mismatch.");
5492 break;
5493 }
5494 case AArch64::LDPDi:
5495 case AArch64::LDPQi:
5496 case AArch64::LDPSi:
5497 case AArch64::LDPSWi:
5498 case AArch64::LDPWi:
5499 case AArch64::LDPXi: {
5500 MCRegister Rt = Inst.getOperand(0).getReg();
5501 MCRegister Rt2 = Inst.getOperand(1).getReg();
5502 if (Rt == Rt2)
5503 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5504 break;
5505 }
5506 case AArch64::LDPDpost:
5507 case AArch64::LDPDpre:
5508 case AArch64::LDPQpost:
5509 case AArch64::LDPQpre:
5510 case AArch64::LDPSpost:
5511 case AArch64::LDPSpre:
5512 case AArch64::LDPSWpost: {
5513 MCRegister Rt = Inst.getOperand(1).getReg();
5514 MCRegister Rt2 = Inst.getOperand(2).getReg();
5515 if (Rt == Rt2)
5516 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5517 break;
5518 }
5519 case AArch64::STPDpost:
5520 case AArch64::STPDpre:
5521 case AArch64::STPQpost:
5522 case AArch64::STPQpre:
5523 case AArch64::STPSpost:
5524 case AArch64::STPSpre:
5525 case AArch64::STPWpost:
5526 case AArch64::STPWpre:
5527 case AArch64::STPXpost:
5528 case AArch64::STPXpre: {
5529 MCRegister Rt = Inst.getOperand(1).getReg();
5530 MCRegister Rt2 = Inst.getOperand(2).getReg();
5531 MCRegister Rn = Inst.getOperand(3).getReg();
5532 if (RI->isSubRegisterEq(Rn, Rt))
5533 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5534 "is also a source");
5535 if (RI->isSubRegisterEq(Rn, Rt2))
5536 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5537 "is also a source");
5538 break;
5539 }
5540 case AArch64::LDRBBpre:
5541 case AArch64::LDRBpre:
5542 case AArch64::LDRHHpre:
5543 case AArch64::LDRHpre:
5544 case AArch64::LDRSBWpre:
5545 case AArch64::LDRSBXpre:
5546 case AArch64::LDRSHWpre:
5547 case AArch64::LDRSHXpre:
5548 case AArch64::LDRSWpre:
5549 case AArch64::LDRWpre:
5550 case AArch64::LDRXpre:
5551 case AArch64::LDRBBpost:
5552 case AArch64::LDRBpost:
5553 case AArch64::LDRHHpost:
5554 case AArch64::LDRHpost:
5555 case AArch64::LDRSBWpost:
5556 case AArch64::LDRSBXpost:
5557 case AArch64::LDRSHWpost:
5558 case AArch64::LDRSHXpost:
5559 case AArch64::LDRSWpost:
5560 case AArch64::LDRWpost:
5561 case AArch64::LDRXpost: {
5562 MCRegister Rt = Inst.getOperand(1).getReg();
5563 MCRegister Rn = Inst.getOperand(2).getReg();
5564 if (RI->isSubRegisterEq(Rn, Rt))
5565 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5566 "is also a source");
5567 break;
5568 }
5569 case AArch64::STRBBpost:
5570 case AArch64::STRBpost:
5571 case AArch64::STRHHpost:
5572 case AArch64::STRHpost:
5573 case AArch64::STRWpost:
5574 case AArch64::STRXpost:
5575 case AArch64::STRBBpre:
5576 case AArch64::STRBpre:
5577 case AArch64::STRHHpre:
5578 case AArch64::STRHpre:
5579 case AArch64::STRWpre:
5580 case AArch64::STRXpre: {
5581 MCRegister Rt = Inst.getOperand(1).getReg();
5582 MCRegister Rn = Inst.getOperand(2).getReg();
5583 if (RI->isSubRegisterEq(Rn, Rt))
5584 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5585 "is also a source");
5586 break;
5587 }
5588 case AArch64::STXRB:
5589 case AArch64::STXRH:
5590 case AArch64::STXRW:
5591 case AArch64::STXRX:
5592 case AArch64::STLXRB:
5593 case AArch64::STLXRH:
5594 case AArch64::STLXRW:
5595 case AArch64::STLXRX: {
5596 MCRegister Rs = Inst.getOperand(0).getReg();
5597 MCRegister Rt = Inst.getOperand(1).getReg();
5598 MCRegister Rn = Inst.getOperand(2).getReg();
5599 if (RI->isSubRegisterEq(Rt, Rs) ||
5600 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5601 return Error(Loc[0],
5602 "unpredictable STXR instruction, status is also a source");
5603 break;
5604 }
5605 case AArch64::STXPW:
5606 case AArch64::STXPX:
5607 case AArch64::STLXPW:
5608 case AArch64::STLXPX: {
5609 MCRegister Rs = Inst.getOperand(0).getReg();
5610 MCRegister Rt1 = Inst.getOperand(1).getReg();
5611 MCRegister Rt2 = Inst.getOperand(2).getReg();
5612 MCRegister Rn = Inst.getOperand(3).getReg();
5613 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5614 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5615 return Error(Loc[0],
5616 "unpredictable STXP instruction, status is also a source");
5617 break;
5618 }
5619 case AArch64::LDRABwriteback:
5620 case AArch64::LDRAAwriteback: {
5621 MCRegister Xt = Inst.getOperand(0).getReg();
5622 MCRegister Xn = Inst.getOperand(1).getReg();
5623 if (Xt == Xn)
5624 return Error(Loc[0],
5625 "unpredictable LDRA instruction, writeback base"
5626 " is also a destination");
5627 break;
5628 }
5629 }
5630
5631 // Check v8.8-A memops instructions.
5632 switch (Inst.getOpcode()) {
5633 case AArch64::CPYFP:
5634 case AArch64::CPYFPWN:
5635 case AArch64::CPYFPRN:
5636 case AArch64::CPYFPN:
5637 case AArch64::CPYFPWT:
5638 case AArch64::CPYFPWTWN:
5639 case AArch64::CPYFPWTRN:
5640 case AArch64::CPYFPWTN:
5641 case AArch64::CPYFPRT:
5642 case AArch64::CPYFPRTWN:
5643 case AArch64::CPYFPRTRN:
5644 case AArch64::CPYFPRTN:
5645 case AArch64::CPYFPT:
5646 case AArch64::CPYFPTWN:
5647 case AArch64::CPYFPTRN:
5648 case AArch64::CPYFPTN:
5649 case AArch64::CPYFM:
5650 case AArch64::CPYFMWN:
5651 case AArch64::CPYFMRN:
5652 case AArch64::CPYFMN:
5653 case AArch64::CPYFMWT:
5654 case AArch64::CPYFMWTWN:
5655 case AArch64::CPYFMWTRN:
5656 case AArch64::CPYFMWTN:
5657 case AArch64::CPYFMRT:
5658 case AArch64::CPYFMRTWN:
5659 case AArch64::CPYFMRTRN:
5660 case AArch64::CPYFMRTN:
5661 case AArch64::CPYFMT:
5662 case AArch64::CPYFMTWN:
5663 case AArch64::CPYFMTRN:
5664 case AArch64::CPYFMTN:
5665 case AArch64::CPYFE:
5666 case AArch64::CPYFEWN:
5667 case AArch64::CPYFERN:
5668 case AArch64::CPYFEN:
5669 case AArch64::CPYFEWT:
5670 case AArch64::CPYFEWTWN:
5671 case AArch64::CPYFEWTRN:
5672 case AArch64::CPYFEWTN:
5673 case AArch64::CPYFERT:
5674 case AArch64::CPYFERTWN:
5675 case AArch64::CPYFERTRN:
5676 case AArch64::CPYFERTN:
5677 case AArch64::CPYFET:
5678 case AArch64::CPYFETWN:
5679 case AArch64::CPYFETRN:
5680 case AArch64::CPYFETN:
5681 case AArch64::CPYP:
5682 case AArch64::CPYPWN:
5683 case AArch64::CPYPRN:
5684 case AArch64::CPYPN:
5685 case AArch64::CPYPWT:
5686 case AArch64::CPYPWTWN:
5687 case AArch64::CPYPWTRN:
5688 case AArch64::CPYPWTN:
5689 case AArch64::CPYPRT:
5690 case AArch64::CPYPRTWN:
5691 case AArch64::CPYPRTRN:
5692 case AArch64::CPYPRTN:
5693 case AArch64::CPYPT:
5694 case AArch64::CPYPTWN:
5695 case AArch64::CPYPTRN:
5696 case AArch64::CPYPTN:
5697 case AArch64::CPYM:
5698 case AArch64::CPYMWN:
5699 case AArch64::CPYMRN:
5700 case AArch64::CPYMN:
5701 case AArch64::CPYMWT:
5702 case AArch64::CPYMWTWN:
5703 case AArch64::CPYMWTRN:
5704 case AArch64::CPYMWTN:
5705 case AArch64::CPYMRT:
5706 case AArch64::CPYMRTWN:
5707 case AArch64::CPYMRTRN:
5708 case AArch64::CPYMRTN:
5709 case AArch64::CPYMT:
5710 case AArch64::CPYMTWN:
5711 case AArch64::CPYMTRN:
5712 case AArch64::CPYMTN:
5713 case AArch64::CPYE:
5714 case AArch64::CPYEWN:
5715 case AArch64::CPYERN:
5716 case AArch64::CPYEN:
5717 case AArch64::CPYEWT:
5718 case AArch64::CPYEWTWN:
5719 case AArch64::CPYEWTRN:
5720 case AArch64::CPYEWTN:
5721 case AArch64::CPYERT:
5722 case AArch64::CPYERTWN:
5723 case AArch64::CPYERTRN:
5724 case AArch64::CPYERTN:
5725 case AArch64::CPYET:
5726 case AArch64::CPYETWN:
5727 case AArch64::CPYETRN:
5728 case AArch64::CPYETN: {
5729 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5730 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5731 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5732 MCRegister Xd = Inst.getOperand(3).getReg();
5733 MCRegister Xs = Inst.getOperand(4).getReg();
5734 MCRegister Xn = Inst.getOperand(5).getReg();
5735 if (Xd_wb != Xd)
5736 return Error(Loc[0],
5737 "invalid CPY instruction, Xd_wb and Xd do not match");
5738 if (Xs_wb != Xs)
5739 return Error(Loc[0],
5740 "invalid CPY instruction, Xs_wb and Xs do not match");
5741 if (Xn_wb != Xn)
5742 return Error(Loc[0],
5743 "invalid CPY instruction, Xn_wb and Xn do not match");
5744 if (Xd == Xs)
5745 return Error(Loc[0], "invalid CPY instruction, destination and source"
5746 " registers are the same");
5747 if (Xd == Xn)
5748 return Error(Loc[0], "invalid CPY instruction, destination and size"
5749 " registers are the same");
5750 if (Xs == Xn)
5751 return Error(Loc[0], "invalid CPY instruction, source and size"
5752 " registers are the same");
5753 break;
5754 }
5755 case AArch64::SETP:
5756 case AArch64::SETPT:
5757 case AArch64::SETPN:
5758 case AArch64::SETPTN:
5759 case AArch64::SETM:
5760 case AArch64::SETMT:
5761 case AArch64::SETMN:
5762 case AArch64::SETMTN:
5763 case AArch64::SETE:
5764 case AArch64::SETET:
5765 case AArch64::SETEN:
5766 case AArch64::SETETN:
5767 case AArch64::SETGP:
5768 case AArch64::SETGPT:
5769 case AArch64::SETGPN:
5770 case AArch64::SETGPTN:
5771 case AArch64::SETGM:
5772 case AArch64::SETGMT:
5773 case AArch64::SETGMN:
5774 case AArch64::SETGMTN:
5775 case AArch64::MOPSSETGE:
5776 case AArch64::MOPSSETGET:
5777 case AArch64::MOPSSETGEN:
5778 case AArch64::MOPSSETGETN: {
5779 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5780 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5781 MCRegister Xd = Inst.getOperand(2).getReg();
5782 MCRegister Xn = Inst.getOperand(3).getReg();
5783 MCRegister Xm = Inst.getOperand(4).getReg();
5784 if (Xd_wb != Xd)
5785 return Error(Loc[0],
5786 "invalid SET instruction, Xd_wb and Xd do not match");
5787 if (Xn_wb != Xn)
5788 return Error(Loc[0],
5789 "invalid SET instruction, Xn_wb and Xn do not match");
5790 if (Xd == Xn)
5791 return Error(Loc[0], "invalid SET instruction, destination and size"
5792 " registers are the same");
5793 if (Xd == Xm)
5794 return Error(Loc[0], "invalid SET instruction, destination and source"
5795 " registers are the same");
5796 if (Xn == Xm)
5797 return Error(Loc[0], "invalid SET instruction, source and size"
5798 " registers are the same");
5799 break;
5800 }
5801 }
5802
5803 // Now check immediate ranges. Separate from the above as there is overlap
5804 // in the instructions being checked and this keeps the nested conditionals
5805 // to a minimum.
5806 switch (Inst.getOpcode()) {
5807 case AArch64::ADDSWri:
5808 case AArch64::ADDSXri:
5809 case AArch64::ADDWri:
5810 case AArch64::ADDXri:
5811 case AArch64::SUBSWri:
5812 case AArch64::SUBSXri:
5813 case AArch64::SUBWri:
5814 case AArch64::SUBXri: {
5815 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5816 // some slight duplication here.
5817 if (Inst.getOperand(2).isExpr()) {
5818 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5819 AArch64MCExpr::VariantKind ELFRefKind;
5820 MCSymbolRefExpr::VariantKind DarwinRefKind;
5821 int64_t Addend;
5822 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5823
5824 // Only allow these with ADDXri.
5825 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5826 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5827 Inst.getOpcode() == AArch64::ADDXri)
5828 return false;
5829
5830 // Only allow these with ADDXri/ADDWri
5831 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5832 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
5833 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5834 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5835 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5836 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5837 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5838 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5839 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5841 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5842 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5843 (Inst.getOpcode() == AArch64::ADDXri ||
5844 Inst.getOpcode() == AArch64::ADDWri))
5845 return false;
5846
5847 // Don't allow symbol refs in the immediate field otherwise
5848 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5849 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5850 // 'cmp w0, 'borked')
5851 return Error(Loc.back(), "invalid immediate expression");
5852 }
5853 // We don't validate more complex expressions here
5854 }
5855 return false;
5856 }
5857 default:
5858 return false;
5859 }
5860}
5861
5863 const FeatureBitset &FBS,
5864 unsigned VariantID = 0);
5865
5866bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5869 switch (ErrCode) {
5870 case Match_InvalidTiedOperand: {
5871 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5872 if (Op.isVectorList())
5873 return Error(Loc, "operand must match destination register list");
5874
5875 assert(Op.isReg() && "Unexpected operand type");
5876 switch (Op.getRegEqualityTy()) {
5877 case RegConstraintEqualityTy::EqualsSubReg:
5878 return Error(Loc, "operand must be 64-bit form of destination register");
5879 case RegConstraintEqualityTy::EqualsSuperReg:
5880 return Error(Loc, "operand must be 32-bit form of destination register");
5881 case RegConstraintEqualityTy::EqualsReg:
5882 return Error(Loc, "operand must match destination register");
5883 }
5884 llvm_unreachable("Unknown RegConstraintEqualityTy");
5885 }
5886 case Match_MissingFeature:
5887 return Error(Loc,
5888 "instruction requires a CPU feature not currently enabled");
5889 case Match_InvalidOperand:
5890 return Error(Loc, "invalid operand for instruction");
5891 case Match_InvalidSuffix:
5892 return Error(Loc, "invalid type suffix for instruction");
5893 case Match_InvalidCondCode:
5894 return Error(Loc, "expected AArch64 condition code");
5895 case Match_AddSubRegExtendSmall:
5896 return Error(Loc,
5897 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5898 case Match_AddSubRegExtendLarge:
5899 return Error(Loc,
5900 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5901 case Match_AddSubSecondSource:
5902 return Error(Loc,
5903 "expected compatible register, symbol or integer in range [0, 4095]");
5904 case Match_LogicalSecondSource:
5905 return Error(Loc, "expected compatible register or logical immediate");
5906 case Match_InvalidMovImm32Shift:
5907 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5908 case Match_InvalidMovImm64Shift:
5909 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5910 case Match_AddSubRegShift32:
5911 return Error(Loc,
5912 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5913 case Match_AddSubRegShift64:
5914 return Error(Loc,
5915 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5916 case Match_InvalidFPImm:
5917 return Error(Loc,
5918 "expected compatible register or floating-point constant");
5919 case Match_InvalidMemoryIndexedSImm6:
5920 return Error(Loc, "index must be an integer in range [-32, 31].");
5921 case Match_InvalidMemoryIndexedSImm5:
5922 return Error(Loc, "index must be an integer in range [-16, 15].");
5923 case Match_InvalidMemoryIndexed1SImm4:
5924 return Error(Loc, "index must be an integer in range [-8, 7].");
5925 case Match_InvalidMemoryIndexed2SImm4:
5926 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5927 case Match_InvalidMemoryIndexed3SImm4:
5928 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5929 case Match_InvalidMemoryIndexed4SImm4:
5930 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5931 case Match_InvalidMemoryIndexed16SImm4:
5932 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5933 case Match_InvalidMemoryIndexed32SImm4:
5934 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5935 case Match_InvalidMemoryIndexed1SImm6:
5936 return Error(Loc, "index must be an integer in range [-32, 31].");
5937 case Match_InvalidMemoryIndexedSImm8:
5938 return Error(Loc, "index must be an integer in range [-128, 127].");
5939 case Match_InvalidMemoryIndexedSImm9:
5940 return Error(Loc, "index must be an integer in range [-256, 255].");
5941 case Match_InvalidMemoryIndexed16SImm9:
5942 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5943 case Match_InvalidMemoryIndexed8SImm10:
5944 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5945 case Match_InvalidMemoryIndexed4SImm7:
5946 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5947 case Match_InvalidMemoryIndexed8SImm7:
5948 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5949 case Match_InvalidMemoryIndexed16SImm7:
5950 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5951 case Match_InvalidMemoryIndexed8UImm5:
5952 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5953 case Match_InvalidMemoryIndexed8UImm3:
5954 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5955 case Match_InvalidMemoryIndexed4UImm5:
5956 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5957 case Match_InvalidMemoryIndexed2UImm5:
5958 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5959 case Match_InvalidMemoryIndexed8UImm6:
5960 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5961 case Match_InvalidMemoryIndexed16UImm6:
5962 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5963 case Match_InvalidMemoryIndexed4UImm6:
5964 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5965 case Match_InvalidMemoryIndexed2UImm6:
5966 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5967 case Match_InvalidMemoryIndexed1UImm6:
5968 return Error(Loc, "index must be in range [0, 63].");
5969 case Match_InvalidMemoryWExtend8:
5970 return Error(Loc,
5971 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5972 case Match_InvalidMemoryWExtend16:
5973 return Error(Loc,
5974 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5975 case Match_InvalidMemoryWExtend32:
5976 return Error(Loc,
5977 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5978 case Match_InvalidMemoryWExtend64:
5979 return Error(Loc,
5980 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5981 case Match_InvalidMemoryWExtend128:
5982 return Error(Loc,
5983 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5984 case Match_InvalidMemoryXExtend8:
5985 return Error(Loc,
5986 "expected 'lsl' or 'sxtx' with optional shift of #0");
5987 case Match_InvalidMemoryXExtend16:
5988 return Error(Loc,
5989 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5990 case Match_InvalidMemoryXExtend32:
5991 return Error(Loc,
5992 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5993 case Match_InvalidMemoryXExtend64:
5994 return Error(Loc,
5995 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5996 case Match_InvalidMemoryXExtend128:
5997 return Error(Loc,
5998 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5999 case Match_InvalidMemoryIndexed1:
6000 return Error(Loc, "index must be an integer in range [0, 4095].");
6001 case Match_InvalidMemoryIndexed2:
6002 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6003 case Match_InvalidMemoryIndexed4:
6004 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6005 case Match_InvalidMemoryIndexed8:
6006 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6007 case Match_InvalidMemoryIndexed16:
6008 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6009 case Match_InvalidImm0_0:
6010 return Error(Loc, "immediate must be 0.");
6011 case Match_InvalidImm0_1:
6012 return Error(Loc, "immediate must be an integer in range [0, 1].");
6013 case Match_InvalidImm0_3:
6014 return Error(Loc, "immediate must be an integer in range [0, 3].");
6015 case Match_InvalidImm0_7:
6016 return Error(Loc, "immediate must be an integer in range [0, 7].");
6017 case Match_InvalidImm0_15:
6018 return Error(Loc, "immediate must be an integer in range [0, 15].");
6019 case Match_InvalidImm0_31:
6020 return Error(Loc, "immediate must be an integer in range [0, 31].");
6021 case Match_InvalidImm0_63:
6022 return Error(Loc, "immediate must be an integer in range [0, 63].");
6023 case Match_InvalidImm0_127:
6024 return Error(Loc, "immediate must be an integer in range [0, 127].");
6025 case Match_InvalidImm0_255:
6026 return Error(Loc, "immediate must be an integer in range [0, 255].");
6027 case Match_InvalidImm0_65535:
6028 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6029 case Match_InvalidImm1_8:
6030 return Error(Loc, "immediate must be an integer in range [1, 8].");
6031 case Match_InvalidImm1_16:
6032 return Error(Loc, "immediate must be an integer in range [1, 16].");
6033 case Match_InvalidImm1_32:
6034 return Error(Loc, "immediate must be an integer in range [1, 32].");
6035 case Match_InvalidImm1_64:
6036 return Error(Loc, "immediate must be an integer in range [1, 64].");
6037 case Match_InvalidImmM1_62:
6038 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6039 case Match_InvalidMemoryIndexedRange2UImm0:
6040 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6041 case Match_InvalidMemoryIndexedRange2UImm1:
6042 return Error(Loc, "vector select offset must be an immediate range of the "
6043 "form <immf>:<imml>, where the first "
6044 "immediate is a multiple of 2 in the range [0, 2], and "
6045 "the second immediate is immf + 1.");
6046 case Match_InvalidMemoryIndexedRange2UImm2:
6047 case Match_InvalidMemoryIndexedRange2UImm3:
6048 return Error(
6049 Loc,
6050 "vector select offset must be an immediate range of the form "
6051 "<immf>:<imml>, "
6052 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6053 "[0, 14] "
6054 "depending on the instruction, and the second immediate is immf + 1.");
6055 case Match_InvalidMemoryIndexedRange4UImm0:
6056 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6057 case Match_InvalidMemoryIndexedRange4UImm1:
6058 case Match_InvalidMemoryIndexedRange4UImm2:
6059 return Error(
6060 Loc,
6061 "vector select offset must be an immediate range of the form "
6062 "<immf>:<imml>, "
6063 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6064 "[0, 12] "
6065 "depending on the instruction, and the second immediate is immf + 3.");
6066 case Match_InvalidSVEAddSubImm8:
6067 return Error(Loc, "immediate must be an integer in range [0, 255]"
6068 " with a shift amount of 0");
6069 case Match_InvalidSVEAddSubImm16:
6070 case Match_InvalidSVEAddSubImm32:
6071 case Match_InvalidSVEAddSubImm64:
6072 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6073 "multiple of 256 in range [256, 65280]");
6074 case Match_InvalidSVECpyImm8:
6075 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6076 " with a shift amount of 0");
6077 case Match_InvalidSVECpyImm16:
6078 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6079 "multiple of 256 in range [-32768, 65280]");
6080 case Match_InvalidSVECpyImm32:
6081 case Match_InvalidSVECpyImm64:
6082 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6083 "multiple of 256 in range [-32768, 32512]");
6084 case Match_InvalidIndexRange0_0:
6085 return Error(Loc, "expected lane specifier '[0]'");
6086 case Match_InvalidIndexRange1_1:
6087 return Error(Loc, "expected lane specifier '[1]'");
6088 case Match_InvalidIndexRange0_15:
6089 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6090 case Match_InvalidIndexRange0_7:
6091 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6092 case Match_InvalidIndexRange0_3:
6093 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6094 case Match_InvalidIndexRange0_1:
6095 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6096 case Match_InvalidSVEIndexRange0_63:
6097 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6098 case Match_InvalidSVEIndexRange0_31:
6099 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6100 case Match_InvalidSVEIndexRange0_15:
6101 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6102 case Match_InvalidSVEIndexRange0_7:
6103 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6104 case Match_InvalidSVEIndexRange0_3:
6105 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6106 case Match_InvalidLabel:
6107 return Error(Loc, "expected label or encodable integer pc offset");
6108 case Match_MRS:
6109 return Error(Loc, "expected readable system register");
6110 case Match_MSR:
6111 case Match_InvalidSVCR:
6112 return Error(Loc, "expected writable system register or pstate");
6113 case Match_InvalidComplexRotationEven:
6114 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6115 case Match_InvalidComplexRotationOdd:
6116 return Error(Loc, "complex rotation must be 90 or 270.");
6117 case Match_MnemonicFail: {
6118 std::string Suggestion = AArch64MnemonicSpellCheck(
6119 ((AArch64Operand &)*Operands[0]).getToken(),
6120 ComputeAvailableFeatures(STI->getFeatureBits()));
6121 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6122 }
6123 case Match_InvalidGPR64shifted8:
6124 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6125 case Match_InvalidGPR64shifted16:
6126 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6127 case Match_InvalidGPR64shifted32:
6128 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6129 case Match_InvalidGPR64shifted64:
6130 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6131 case Match_InvalidGPR64shifted128:
6132 return Error(
6133 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6134 case Match_InvalidGPR64NoXZRshifted8:
6135 return Error(Loc, "register must be x0..x30 without shift");
6136 case Match_InvalidGPR64NoXZRshifted16:
6137 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6138 case Match_InvalidGPR64NoXZRshifted32:
6139 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6140 case Match_InvalidGPR64NoXZRshifted64:
6141 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6142 case Match_InvalidGPR64NoXZRshifted128:
6143 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6144 case Match_InvalidZPR32UXTW8:
6145 case Match_InvalidZPR32SXTW8:
6146 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6147 case Match_InvalidZPR32UXTW16:
6148 case Match_InvalidZPR32SXTW16:
6149 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6150 case Match_InvalidZPR32UXTW32:
6151 case Match_InvalidZPR32SXTW32:
6152 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6153 case Match_InvalidZPR32UXTW64:
6154 case Match_InvalidZPR32SXTW64:
6155 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6156 case Match_InvalidZPR64UXTW8:
6157 case Match_InvalidZPR64SXTW8:
6158 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6159 case Match_InvalidZPR64UXTW16:
6160 case Match_InvalidZPR64SXTW16:
6161 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6162 case Match_InvalidZPR64UXTW32:
6163 case Match_InvalidZPR64SXTW32:
6164 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6165 case Match_InvalidZPR64UXTW64:
6166 case Match_InvalidZPR64SXTW64:
6167 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6168 case Match_InvalidZPR32LSL8:
6169 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6170 case Match_InvalidZPR32LSL16:
6171 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6172 case Match_InvalidZPR32LSL32:
6173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6174 case Match_InvalidZPR32LSL64:
6175 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6176 case Match_InvalidZPR64LSL8:
6177 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6178 case Match_InvalidZPR64LSL16:
6179 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6180 case Match_InvalidZPR64LSL32:
6181 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6182 case Match_InvalidZPR64LSL64:
6183 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6184 case Match_InvalidZPR0:
6185 return Error(Loc, "expected register without element width suffix");
6186 case Match_InvalidZPR8:
6187 case Match_InvalidZPR16:
6188 case Match_InvalidZPR32:
6189 case Match_InvalidZPR64:
6190 case Match_InvalidZPR128:
6191 return Error(Loc, "invalid element width");
6192 case Match_InvalidZPR_3b8:
6193 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6194 case Match_InvalidZPR_3b16:
6195 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6196 case Match_InvalidZPR_3b32:
6197 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6198 case Match_InvalidZPR_4b8:
6199 return Error(Loc,
6200 "Invalid restricted vector register, expected z0.b..z15.b");
6201 case Match_InvalidZPR_4b16:
6202 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6203 case Match_InvalidZPR_4b32:
6204 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6205 case Match_InvalidZPR_4b64:
6206 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6207 case Match_InvalidZPRMul2_Lo8:
6208 return Error(Loc, "Invalid restricted vector register, expected even "
6209 "register in z0.b..z14.b");
6210 case Match_InvalidZPRMul2_Hi8:
6211 return Error(Loc, "Invalid restricted vector register, expected even "
6212 "register in z16.b..z30.b");
6213 case Match_InvalidZPRMul2_Lo16:
6214 return Error(Loc, "Invalid restricted vector register, expected even "
6215 "register in z0.h..z14.h");
6216 case Match_InvalidZPRMul2_Hi16:
6217 return Error(Loc, "Invalid restricted vector register, expected even "
6218 "register in z16.h..z30.h");
6219 case Match_InvalidZPRMul2_Lo32:
6220 return Error(Loc, "Invalid restricted vector register, expected even "
6221 "register in z0.s..z14.s");
6222 case Match_InvalidZPRMul2_Hi32:
6223 return Error(Loc, "Invalid restricted vector register, expected even "
6224 "register in z16.s..z30.s");
6225 case Match_InvalidZPRMul2_Lo64:
6226 return Error(Loc, "Invalid restricted vector register, expected even "
6227 "register in z0.d..z14.d");
6228 case Match_InvalidZPRMul2_Hi64:
6229 return Error(Loc, "Invalid restricted vector register, expected even "
6230 "register in z16.d..z30.d");
6231 case Match_InvalidZPR_K0:
6232 return Error(Loc, "invalid restricted vector register, expected register "
6233 "in z20..z23 or z28..z31");
6234 case Match_InvalidSVEPattern:
6235 return Error(Loc, "invalid predicate pattern");
6236 case Match_InvalidSVEPPRorPNRAnyReg:
6237 case Match_InvalidSVEPPRorPNRBReg:
6238 case Match_InvalidSVEPredicateAnyReg:
6239 case Match_InvalidSVEPredicateBReg:
6240 case Match_InvalidSVEPredicateHReg:
6241 case Match_InvalidSVEPredicateSReg:
6242 case Match_InvalidSVEPredicateDReg:
6243 return Error(Loc, "invalid predicate register.");
6244 case Match_InvalidSVEPredicate3bAnyReg:
6245 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6246 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6247 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6248 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6249 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6250 return Error(Loc, "Invalid predicate register, expected PN in range "
6251 "pn8..pn15 with element suffix.");
6252 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6253 return Error(Loc, "invalid restricted predicate-as-counter register "
6254 "expected pn8..pn15");
6255 case Match_InvalidSVEPNPredicateBReg:
6256 case Match_InvalidSVEPNPredicateHReg:
6257 case Match_InvalidSVEPNPredicateSReg:
6258 case Match_InvalidSVEPNPredicateDReg:
6259 return Error(Loc, "Invalid predicate register, expected PN in range "
6260 "pn0..pn15 with element suffix.");
6261 case Match_InvalidSVEVecLenSpecifier:
6262 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6263 case Match_InvalidSVEPredicateListMul2x8:
6264 case Match_InvalidSVEPredicateListMul2x16:
6265 case Match_InvalidSVEPredicateListMul2x32:
6266 case Match_InvalidSVEPredicateListMul2x64:
6267 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6268 "predicate registers, where the first vector is a multiple of 2 "
6269 "and with correct element type");
6270 case Match_InvalidSVEExactFPImmOperandHalfOne:
6271 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6272 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6273 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6274 case Match_InvalidSVEExactFPImmOperandZeroOne:
6275 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6276 case Match_InvalidMatrixTileVectorH8:
6277 case Match_InvalidMatrixTileVectorV8:
6278 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6279 case Match_InvalidMatrixTileVectorH16:
6280 case Match_InvalidMatrixTileVectorV16:
6281 return Error(Loc,
6282 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6283 case Match_InvalidMatrixTileVectorH32:
6284 case Match_InvalidMatrixTileVectorV32:
6285 return Error(Loc,
6286 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6287 case Match_InvalidMatrixTileVectorH64:
6288 case Match_InvalidMatrixTileVectorV64:
6289 return Error(Loc,
6290 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6291 case Match_InvalidMatrixTileVectorH128:
6292 case Match_InvalidMatrixTileVectorV128:
6293 return Error(Loc,
6294 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6295 case Match_InvalidMatrixTile16:
6296 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6297 case Match_InvalidMatrixTile32:
6298 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6299 case Match_InvalidMatrixTile64:
6300 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6301 case Match_InvalidMatrix:
6302 return Error(Loc, "invalid matrix operand, expected za");
6303 case Match_InvalidMatrix8:
6304 return Error(Loc, "invalid matrix operand, expected suffix .b");
6305 case Match_InvalidMatrix16:
6306 return Error(Loc, "invalid matrix operand, expected suffix .h");
6307 case Match_InvalidMatrix32:
6308 return Error(Loc, "invalid matrix operand, expected suffix .s");
6309 case Match_InvalidMatrix64:
6310 return Error(Loc, "invalid matrix operand, expected suffix .d");
6311 case Match_InvalidMatrixIndexGPR32_12_15:
6312 return Error(Loc, "operand must be a register in range [w12, w15]");
6313 case Match_InvalidMatrixIndexGPR32_8_11:
6314 return Error(Loc, "operand must be a register in range [w8, w11]");
6315 case Match_InvalidSVEVectorList2x8Mul2:
6316 case Match_InvalidSVEVectorList2x16Mul2:
6317 case Match_InvalidSVEVectorList2x32Mul2:
6318 case Match_InvalidSVEVectorList2x64Mul2:
6319 case Match_InvalidSVEVectorList2x128Mul2:
6320 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6321 "SVE vectors, where the first vector is a multiple of 2 "
6322 "and with matching element types");
6323 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6324 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6325 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6326 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6327 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6328 "SVE vectors in the range z0-z14, where the first vector "
6329 "is a multiple of 2 "
6330 "and with matching element types");
6331 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6332 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6333 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6334 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6335 return Error(Loc,
6336 "Invalid vector list, expected list with 2 consecutive "
6337 "SVE vectors in the range z16-z30, where the first vector "
6338 "is a multiple of 2 "
6339 "and with matching element types");
6340 case Match_InvalidSVEVectorList4x8Mul4:
6341 case Match_InvalidSVEVectorList4x16Mul4:
6342 case Match_InvalidSVEVectorList4x32Mul4:
6343 case Match_InvalidSVEVectorList4x64Mul4:
6344 case Match_InvalidSVEVectorList4x128Mul4:
6345 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6346 "SVE vectors, where the first vector is a multiple of 4 "
6347 "and with matching element types");
6348 case Match_InvalidLookupTable:
6349 return Error(Loc, "Invalid lookup table, expected zt0");
6350 case Match_InvalidSVEVectorListStrided2x8:
6351 case Match_InvalidSVEVectorListStrided2x16:
6352 case Match_InvalidSVEVectorListStrided2x32:
6353 case Match_InvalidSVEVectorListStrided2x64:
6354 return Error(
6355 Loc,
6356 "Invalid vector list, expected list with each SVE vector in the list "
6357 "8 registers apart, and the first register in the range [z0, z7] or "
6358 "[z16, z23] and with correct element type");
6359 case Match_InvalidSVEVectorListStrided4x8:
6360 case Match_InvalidSVEVectorListStrided4x16:
6361 case Match_InvalidSVEVectorListStrided4x32:
6362 case Match_InvalidSVEVectorListStrided4x64:
6363 return Error(
6364 Loc,
6365 "Invalid vector list, expected list with each SVE vector in the list "
6366 "4 registers apart, and the first register in the range [z0, z3] or "
6367 "[z16, z19] and with correct element type");
6368 case Match_AddSubLSLImm3ShiftLarge:
6369 return Error(Loc,
6370 "expected 'lsl' with optional integer in range [0, 7]");
6371 default:
6372 llvm_unreachable("unexpected error code!");
6373 }
6374}
6375
6376static const char *getSubtargetFeatureName(uint64_t Val);
6377
6378bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6380 MCStreamer &Out,
6382 bool MatchingInlineAsm) {
6383 assert(!Operands.empty() && "Unexpect empty operand list!");
6384 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6385 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6386
6387 StringRef Tok = Op.getToken();
6388 unsigned NumOperands = Operands.size();
6389
6390 if (NumOperands == 4 && Tok == "lsl") {
6391 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6392 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6393 if (Op2.isScalarReg() && Op3.isImm()) {
6394 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6395 if (Op3CE) {
6396 uint64_t Op3Val = Op3CE->getValue();
6397 uint64_t NewOp3Val = 0;
6398 uint64_t NewOp4Val = 0;
6399 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6400 Op2.getReg())) {
6401 NewOp3Val = (32 - Op3Val) & 0x1f;
6402 NewOp4Val = 31 - Op3Val;
6403 } else {
6404 NewOp3Val = (64 - Op3Val) & 0x3f;
6405 NewOp4Val = 63 - Op3Val;
6406 }
6407
6408 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6409 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6410
6411 Operands[0] =
6412 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6413 Operands.push_back(AArch64Operand::CreateImm(
6414 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6415 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6416 Op3.getEndLoc(), getContext());
6417 }
6418 }
6419 } else if (NumOperands == 4 && Tok == "bfc") {
6420 // FIXME: Horrible hack to handle BFC->BFM alias.
6421 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6422 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6423 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6424
6425 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6426 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6427 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6428
6429 if (LSBCE && WidthCE) {
6430 uint64_t LSB = LSBCE->getValue();
6431 uint64_t Width = WidthCE->getValue();
6432
6433 uint64_t RegWidth = 0;
6434 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6435 Op1.getReg()))
6436 RegWidth = 64;
6437 else
6438 RegWidth = 32;
6439
6440 if (LSB >= RegWidth)
6441 return Error(LSBOp.getStartLoc(),
6442 "expected integer in range [0, 31]");
6443 if (Width < 1 || Width > RegWidth)
6444 return Error(WidthOp.getStartLoc(),
6445 "expected integer in range [1, 32]");
6446
6447 uint64_t ImmR = 0;
6448 if (RegWidth == 32)
6449 ImmR = (32 - LSB) & 0x1f;
6450 else
6451 ImmR = (64 - LSB) & 0x3f;
6452
6453 uint64_t ImmS = Width - 1;
6454
6455 if (ImmR != 0 && ImmS >= ImmR)
6456 return Error(WidthOp.getStartLoc(),
6457 "requested insert overflows register");
6458
6459 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6460 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6461 Operands[0] =
6462 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6463 Operands[2] = AArch64Operand::CreateReg(
6464 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6465 SMLoc(), SMLoc(), getContext());
6466 Operands[3] = AArch64Operand::CreateImm(
6467 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6468 Operands.emplace_back(
6469 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6470 WidthOp.getEndLoc(), getContext()));
6471 }
6472 }
6473 } else if (NumOperands == 5) {
6474 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6475 // UBFIZ -> UBFM aliases.
6476 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6477 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6478 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6479 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6480
6481 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6482 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6483 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6484
6485 if (Op3CE && Op4CE) {
6486 uint64_t Op3Val = Op3CE->getValue();
6487 uint64_t Op4Val = Op4CE->getValue();
6488
6489 uint64_t RegWidth = 0;
6490 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6491 Op1.getReg()))
6492 RegWidth = 64;
6493 else
6494 RegWidth = 32;
6495
6496 if (Op3Val >= RegWidth)
6497 return Error(Op3.getStartLoc(),
6498 "expected integer in range [0, 31]");
6499 if (Op4Val < 1 || Op4Val > RegWidth)
6500 return Error(Op4.getStartLoc(),
6501 "expected integer in range [1, 32]");
6502
6503 uint64_t NewOp3Val = 0;
6504 if (RegWidth == 32)
6505 NewOp3Val = (32 - Op3Val) & 0x1f;
6506 else
6507 NewOp3Val = (64 - Op3Val) & 0x3f;
6508
6509 uint64_t NewOp4Val = Op4Val - 1;
6510
6511 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6512 return Error(Op4.getStartLoc(),
6513 "requested insert overflows register");
6514
6515 const MCExpr *NewOp3 =
6516 MCConstantExpr::create(NewOp3Val, getContext());
6517 const MCExpr *NewOp4 =
6518 MCConstantExpr::create(NewOp4Val, getContext());
6519 Operands[3] = AArch64Operand::CreateImm(
6520 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6521 Operands[4] = AArch64Operand::CreateImm(
6522 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6523 if (Tok == "bfi")
6524 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6525 getContext());
6526 else if (Tok == "sbfiz")
6527 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6528 getContext());
6529 else if (Tok == "ubfiz")
6530 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6531 getContext());
6532 else
6533 llvm_unreachable("No valid mnemonic for alias?");
6534 }
6535 }
6536
6537 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6538 // UBFX -> UBFM aliases.
6539 } else if (NumOperands == 5 &&
6540 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6541 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6542 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6543 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6544
6545 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6546 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6547 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6548
6549 if (Op3CE && Op4CE) {
6550 uint64_t Op3Val = Op3CE->getValue();
6551 uint64_t Op4Val = Op4CE->getValue();
6552
6553 uint64_t RegWidth = 0;
6554 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6555 Op1.getReg()))
6556 RegWidth = 64;
6557 else
6558 RegWidth = 32;
6559
6560 if (Op3Val >= RegWidth)
6561 return Error(Op3.getStartLoc(),
6562 "expected integer in range [0, 31]");
6563 if (Op4Val < 1 || Op4Val > RegWidth)
6564 return Error(Op4.getStartLoc(),
6565 "expected integer in range [1, 32]");
6566
6567 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6568
6569 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6570 return Error(Op4.getStartLoc(),
6571 "requested extract overflows register");
6572
6573 const MCExpr *NewOp4 =
6574 MCConstantExpr::create(NewOp4Val, getContext());
6575 Operands[4] = AArch64Operand::CreateImm(
6576 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6577 if (Tok == "bfxil")
6578 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6579 getContext());
6580 else if (Tok == "sbfx")
6581 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6582 getContext());
6583 else if (Tok == "ubfx")
6584 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6585 getContext());
6586 else
6587 llvm_unreachable("No valid mnemonic for alias?");
6588 }
6589 }
6590 }
6591 }
6592
6593 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6594 // instruction for FP registers correctly in some rare circumstances. Convert
6595 // it to a safe instruction and warn (because silently changing someone's
6596 // assembly is rude).
6597 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6598 NumOperands == 4 && Tok == "movi") {
6599 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6600 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6601 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6602 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6603 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6604 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6605 if (Suffix.lower() == ".2d" &&
6606 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6607 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6608 " correctly on this CPU, converting to equivalent movi.16b");
6609 // Switch the suffix to .16b.
6610 unsigned Idx = Op1.isToken() ? 1 : 2;
6611 Operands[Idx] =
6612 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6613 }
6614 }
6615 }
6616
6617 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6618 // InstAlias can't quite handle this since the reg classes aren't
6619 // subclasses.
6620 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6621 // The source register can be Wn here, but the matcher expects a
6622 // GPR64. Twiddle it here if necessary.
6623 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6624 if (Op.isScalarReg()) {
6625 MCRegister Reg = getXRegFromWReg(Op.getReg());
6626 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6627 Op.getStartLoc(), Op.getEndLoc(),
6628 getContext());
6629 }
6630 }
6631 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6632 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6633 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6634 if (Op.isScalarReg() &&
6635 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6636 Op.getReg())) {
6637 // The source register can be Wn here, but the matcher expects a
6638 // GPR64. Twiddle it here if necessary.
6639 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6640 if (Op.isScalarReg()) {
6641 MCRegister Reg = getXRegFromWReg(Op.getReg());
6642 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6643 Op.getStartLoc(),
6644 Op.getEndLoc(), getContext());
6645 }
6646 }
6647 }
6648 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6649 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6650 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6651 if (Op.isScalarReg() &&
6652 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6653 Op.getReg())) {
6654 // The source register can be Wn here, but the matcher expects a
6655 // GPR32. Twiddle it here if necessary.
6656 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6657 if (Op.isScalarReg()) {
6658 MCRegister Reg = getWRegFromXReg(Op.getReg());
6659 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6660 Op.getStartLoc(),
6661 Op.getEndLoc(), getContext());
6662 }
6663 }
6664 }
6665
6666 MCInst Inst;
6667 FeatureBitset MissingFeatures;
6668 // First try to match against the secondary set of tables containing the
6669 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6670 unsigned MatchResult =
6671 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6672 MatchingInlineAsm, 1);
6673
6674 // If that fails, try against the alternate table containing long-form NEON:
6675 // "fadd v0.2s, v1.2s, v2.2s"
6676 if (MatchResult != Match_Success) {
6677 // But first, save the short-form match result: we can use it in case the
6678 // long-form match also fails.
6679 auto ShortFormNEONErrorInfo = ErrorInfo;
6680 auto ShortFormNEONMatchResult = MatchResult;
6681 auto ShortFormNEONMissingFeatures = MissingFeatures;
6682
6683 MatchResult =
6684 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6685 MatchingInlineAsm, 0);
6686
6687 // Now, both matches failed, and the long-form match failed on the mnemonic
6688 // suffix token operand. The short-form match failure is probably more
6689 // relevant: use it instead.
6690 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6691 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6692 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6693 MatchResult = ShortFormNEONMatchResult;
6694 ErrorInfo = ShortFormNEONErrorInfo;
6695 MissingFeatures = ShortFormNEONMissingFeatures;
6696 }
6697 }
6698
6699 switch (MatchResult) {
6700 case Match_Success: {
6701 // Perform range checking and other semantic validations
6702 SmallVector<SMLoc, 8> OperandLocs;
6703 NumOperands = Operands.size();
6704 for (unsigned i = 1; i < NumOperands; ++i)
6705 OperandLocs.push_back(Operands[i]->getStartLoc());
6706 if (validateInstruction(Inst, IDLoc, OperandLocs))
6707 return true;
6708
6709 Inst.setLoc(IDLoc);
6710 Out.emitInstruction(Inst, getSTI());
6711 return false;
6712 }
6713 case Match_MissingFeature: {
6714 assert(MissingFeatures.any() && "Unknown missing feature!");
6715 // Special case the error message for the very common case where only
6716 // a single subtarget feature is missing (neon, e.g.).
6717 std::string Msg = "instruction requires:";
6718 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6719 if (MissingFeatures[i]) {
6720 Msg += " ";
6721 Msg += getSubtargetFeatureName(i);
6722 }
6723 }
6724 return Error(IDLoc, Msg);
6725 }
6726 case Match_MnemonicFail:
6727 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6728 case Match_InvalidOperand: {
6729 SMLoc ErrorLoc = IDLoc;
6730
6731 if (ErrorInfo != ~0ULL) {
6732 if (ErrorInfo >= Operands.size())
6733 return Error(IDLoc, "too few operands for instruction",
6734 SMRange(IDLoc, getTok().getLoc()));
6735
6736 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6737 if (ErrorLoc == SMLoc())
6738 ErrorLoc = IDLoc;
6739 }
6740 // If the match failed on a suffix token operand, tweak the diagnostic
6741 // accordingly.
6742 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6743 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6744 MatchResult = Match_InvalidSuffix;
6745
6746 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6747 }
6748 case Match_InvalidTiedOperand:
6749 case Match_InvalidMemoryIndexed1:
6750 case Match_InvalidMemoryIndexed2:
6751 case Match_InvalidMemoryIndexed4:
6752 case Match_InvalidMemoryIndexed8:
6753 case Match_InvalidMemoryIndexed16:
6754 case Match_InvalidCondCode:
6755 case Match_AddSubLSLImm3ShiftLarge:
6756 case Match_AddSubRegExtendSmall:
6757 case Match_AddSubRegExtendLarge:
6758 case Match_AddSubSecondSource:
6759 case Match_LogicalSecondSource:
6760 case Match_AddSubRegShift32:
6761 case Match_AddSubRegShift64:
6762 case Match_InvalidMovImm32Shift:
6763 case Match_InvalidMovImm64Shift:
6764 case Match_InvalidFPImm:
6765 case Match_InvalidMemoryWExtend8:
6766 case Match_InvalidMemoryWExtend16:
6767 case Match_InvalidMemoryWExtend32:
6768 case Match_InvalidMemoryWExtend64:
6769 case Match_InvalidMemoryWExtend128:
6770 case Match_InvalidMemoryXExtend8:
6771 case Match_InvalidMemoryXExtend16:
6772 case Match_InvalidMemoryXExtend32:
6773 case Match_InvalidMemoryXExtend64:
6774 case Match_InvalidMemoryXExtend128:
6775 case Match_InvalidMemoryIndexed1SImm4:
6776 case Match_InvalidMemoryIndexed2SImm4:
6777 case Match_InvalidMemoryIndexed3SImm4:
6778 case Match_InvalidMemoryIndexed4SImm4:
6779 case Match_InvalidMemoryIndexed1SImm6:
6780 case Match_InvalidMemoryIndexed16SImm4:
6781 case Match_InvalidMemoryIndexed32SImm4:
6782 case Match_InvalidMemoryIndexed4SImm7:
6783 case Match_InvalidMemoryIndexed8SImm7:
6784 case Match_InvalidMemoryIndexed16SImm7:
6785 case Match_InvalidMemoryIndexed8UImm5:
6786 case Match_InvalidMemoryIndexed8UImm3:
6787 case Match_InvalidMemoryIndexed4UImm5:
6788 case Match_InvalidMemoryIndexed2UImm5:
6789 case Match_InvalidMemoryIndexed1UImm6:
6790 case Match_InvalidMemoryIndexed2UImm6:
6791 case Match_InvalidMemoryIndexed4UImm6:
6792 case Match_InvalidMemoryIndexed8UImm6:
6793 case Match_InvalidMemoryIndexed16UImm6:
6794 case Match_InvalidMemoryIndexedSImm6:
6795 case Match_InvalidMemoryIndexedSImm5:
6796 case Match_InvalidMemoryIndexedSImm8:
6797 case Match_InvalidMemoryIndexedSImm9:
6798 case Match_InvalidMemoryIndexed16SImm9:
6799 case Match_InvalidMemoryIndexed8SImm10:
6800 case Match_InvalidImm0_0:
6801 case Match_InvalidImm0_1:
6802 case Match_InvalidImm0_3:
6803 case Match_InvalidImm0_7:
6804 case Match_InvalidImm0_15:
6805 case Match_InvalidImm0_31:
6806 case Match_InvalidImm0_63:
6807 case Match_InvalidImm0_127:
6808 case Match_InvalidImm0_255:
6809 case Match_InvalidImm0_65535:
6810 case Match_InvalidImm1_8:
6811 case Match_InvalidImm1_16:
6812 case Match_InvalidImm1_32:
6813 case Match_InvalidImm1_64:
6814 case Match_InvalidImmM1_62:
6815 case Match_InvalidMemoryIndexedRange2UImm0:
6816 case Match_InvalidMemoryIndexedRange2UImm1:
6817 case Match_InvalidMemoryIndexedRange2UImm2:
6818 case Match_InvalidMemoryIndexedRange2UImm3:
6819 case Match_InvalidMemoryIndexedRange4UImm0:
6820 case Match_InvalidMemoryIndexedRange4UImm1:
6821 case Match_InvalidMemoryIndexedRange4UImm2:
6822 case Match_InvalidSVEAddSubImm8:
6823 case Match_InvalidSVEAddSubImm16:
6824 case Match_InvalidSVEAddSubImm32:
6825 case Match_InvalidSVEAddSubImm64:
6826 case Match_InvalidSVECpyImm8:
6827 case Match_InvalidSVECpyImm16:
6828 case Match_InvalidSVECpyImm32:
6829 case Match_InvalidSVECpyImm64:
6830 case Match_InvalidIndexRange0_0:
6831 case Match_InvalidIndexRange1_1:
6832 case Match_InvalidIndexRange0_15:
6833 case Match_InvalidIndexRange0_7:
6834 case Match_InvalidIndexRange0_3:
6835 case Match_InvalidIndexRange0_1:
6836 case Match_InvalidSVEIndexRange0_63:
6837 case Match_InvalidSVEIndexRange0_31:
6838 case Match_InvalidSVEIndexRange0_15:
6839 case Match_InvalidSVEIndexRange0_7:
6840 case Match_InvalidSVEIndexRange0_3:
6841 case Match_InvalidLabel:
6842 case Match_InvalidComplexRotationEven:
6843 case Match_InvalidComplexRotationOdd:
6844 case Match_InvalidGPR64shifted8:
6845 case Match_InvalidGPR64shifted16:
6846 case Match_InvalidGPR64shifted32:
6847 case Match_InvalidGPR64shifted64:
6848 case Match_InvalidGPR64shifted128:
6849 case Match_InvalidGPR64NoXZRshifted8:
6850 case Match_InvalidGPR64NoXZRshifted16:
6851 case Match_InvalidGPR64NoXZRshifted32:
6852 case Match_InvalidGPR64NoXZRshifted64:
6853 case Match_InvalidGPR64NoXZRshifted128:
6854 case Match_InvalidZPR32UXTW8:
6855 case Match_InvalidZPR32UXTW16:
6856 case Match_InvalidZPR32UXTW32:
6857 case Match_InvalidZPR32UXTW64:
6858 case Match_InvalidZPR32SXTW8:
6859 case Match_InvalidZPR32SXTW16:
6860 case Match_InvalidZPR32SXTW32:
6861 case Match_InvalidZPR32SXTW64:
6862 case Match_InvalidZPR64UXTW8:
6863 case Match_InvalidZPR64SXTW8:
6864 case Match_InvalidZPR64UXTW16:
6865 case Match_InvalidZPR64SXTW16:
6866 case Match_InvalidZPR64UXTW32:
6867 case Match_InvalidZPR64SXTW32:
6868 case Match_InvalidZPR64UXTW64:
6869 case Match_InvalidZPR64SXTW64:
6870 case Match_InvalidZPR32LSL8:
6871 case Match_InvalidZPR32LSL16:
6872 case Match_InvalidZPR32LSL32:
6873 case Match_InvalidZPR32LSL64:
6874 case Match_InvalidZPR64LSL8:
6875 case Match_InvalidZPR64LSL16:
6876 case Match_InvalidZPR64LSL32:
6877 case Match_InvalidZPR64LSL64:
6878 case Match_InvalidZPR0:
6879 case Match_InvalidZPR8:
6880 case Match_InvalidZPR16:
6881 case Match_InvalidZPR32:
6882 case Match_InvalidZPR64:
6883 case Match_InvalidZPR128:
6884 case Match_InvalidZPR_3b8:
6885 case Match_InvalidZPR_3b16:
6886 case Match_InvalidZPR_3b32:
6887 case Match_InvalidZPR_4b8:
6888 case Match_InvalidZPR_4b16:
6889 case Match_InvalidZPR_4b32:
6890 case Match_InvalidZPR_4b64:
6891 case Match_InvalidSVEPPRorPNRAnyReg:
6892 case Match_InvalidSVEPPRorPNRBReg:
6893 case Match_InvalidSVEPredicateAnyReg:
6894 case Match_InvalidSVEPattern:
6895 case Match_InvalidSVEVecLenSpecifier:
6896 case Match_InvalidSVEPredicateBReg:
6897 case Match_InvalidSVEPredicateHReg:
6898 case Match_InvalidSVEPredicateSReg:
6899 case Match_InvalidSVEPredicateDReg:
6900 case Match_InvalidSVEPredicate3bAnyReg:
6901 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6902 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6903 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6904 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6905 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6906 case Match_InvalidSVEPNPredicateBReg:
6907 case Match_InvalidSVEPNPredicateHReg:
6908 case Match_InvalidSVEPNPredicateSReg:
6909 case Match_InvalidSVEPNPredicateDReg:
6910 case Match_InvalidSVEPredicateListMul2x8:
6911 case Match_InvalidSVEPredicateListMul2x16:
6912 case Match_InvalidSVEPredicateListMul2x32:
6913 case Match_InvalidSVEPredicateListMul2x64:
6914 case Match_InvalidSVEExactFPImmOperandHalfOne:
6915 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6916 case Match_InvalidSVEExactFPImmOperandZeroOne:
6917 case Match_InvalidMatrixTile16:
6918 case Match_InvalidMatrixTile32:
6919 case Match_InvalidMatrixTile64:
6920 case Match_InvalidMatrix:
6921 case Match_InvalidMatrix8:
6922 case Match_InvalidMatrix16:
6923 case Match_InvalidMatrix32:
6924 case Match_InvalidMatrix64:
6925 case Match_InvalidMatrixTileVectorH8:
6926 case Match_InvalidMatrixTileVectorH16:
6927 case Match_InvalidMatrixTileVectorH32:
6928 case Match_InvalidMatrixTileVectorH64:
6929 case Match_InvalidMatrixTileVectorH128:
6930 case Match_InvalidMatrixTileVectorV8:
6931 case Match_InvalidMatrixTileVectorV16:
6932 case Match_InvalidMatrixTileVectorV32:
6933 case Match_InvalidMatrixTileVectorV64:
6934 case Match_InvalidMatrixTileVectorV128:
6935 case Match_InvalidSVCR:
6936 case Match_InvalidMatrixIndexGPR32_12_15:
6937 case Match_InvalidMatrixIndexGPR32_8_11:
6938 case Match_InvalidLookupTable:
6939 case Match_InvalidZPRMul2_Lo8:
6940 case Match_InvalidZPRMul2_Hi8:
6941 case Match_InvalidZPRMul2_Lo16:
6942 case Match_InvalidZPRMul2_Hi16:
6943 case Match_InvalidZPRMul2_Lo32:
6944 case Match_InvalidZPRMul2_Hi32:
6945 case Match_InvalidZPRMul2_Lo64:
6946 case Match_InvalidZPRMul2_Hi64:
6947 case Match_InvalidZPR_K0:
6948 case Match_InvalidSVEVectorList2x8Mul2:
6949 case Match_InvalidSVEVectorList2x16Mul2:
6950 case Match_InvalidSVEVectorList2x32Mul2:
6951 case Match_InvalidSVEVectorList2x64Mul2:
6952 case Match_InvalidSVEVectorList2x128Mul2:
6953 case Match_InvalidSVEVectorList4x8Mul4:
6954 case Match_InvalidSVEVectorList4x16Mul4:
6955 case Match_InvalidSVEVectorList4x32Mul4:
6956 case Match_InvalidSVEVectorList4x64Mul4:
6957 case Match_InvalidSVEVectorList4x128Mul4:
6958 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6959 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6960 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6961 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6962 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6963 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6964 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6965 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6966 case Match_InvalidSVEVectorListStrided2x8:
6967 case Match_InvalidSVEVectorListStrided2x16:
6968 case Match_InvalidSVEVectorListStrided2x32:
6969 case Match_InvalidSVEVectorListStrided2x64:
6970 case Match_InvalidSVEVectorListStrided4x8:
6971 case Match_InvalidSVEVectorListStrided4x16:
6972 case Match_InvalidSVEVectorListStrided4x32:
6973 case Match_InvalidSVEVectorListStrided4x64:
6974 case Match_MSR:
6975 case Match_MRS: {
6976 if (ErrorInfo >= Operands.size())
6977 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6978 // Any time we get here, there's nothing fancy to do. Just get the
6979 // operand SMLoc and display the diagnostic.
6980 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6981 if (ErrorLoc == SMLoc())
6982 ErrorLoc = IDLoc;
6983 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6984 }
6985 }
6986
6987 llvm_unreachable("Implement any new match types added!");
6988}
6989
6990/// ParseDirective parses the arm specific directives
6991bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6992 const MCContext::Environment Format = getContext().getObjectFileType();
6993 bool IsMachO = Format == MCContext::IsMachO;
6994 bool IsCOFF = Format == MCContext::IsCOFF;
6995
6996 auto IDVal = DirectiveID.getIdentifier().lower();
6997 SMLoc Loc = DirectiveID.getLoc();
6998 if (IDVal == ".arch")
6999 parseDirectiveArch(Loc);
7000 else if (IDVal == ".cpu")
7001 parseDirectiveCPU(Loc);
7002 else if (IDVal == ".tlsdesccall")
7003 parseDirectiveTLSDescCall(Loc);
7004 else if (IDVal == ".ltorg" || IDVal == ".pool")
7005 parseDirectiveLtorg(Loc);
7006 else if (IDVal == ".unreq")
7007 parseDirectiveUnreq(Loc);
7008 else if (IDVal == ".inst")
7009 parseDirectiveInst(Loc);
7010 else if (IDVal == ".cfi_negate_ra_state")
7011 parseDirectiveCFINegateRAState();
7012 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7013 parseDirectiveCFINegateRAStateWithPC();
7014 else if (IDVal == ".cfi_b_key_frame")
7015 parseDirectiveCFIBKeyFrame();
7016 else if (IDVal == ".cfi_mte_tagged_frame")
7017 parseDirectiveCFIMTETaggedFrame();
7018 else if (IDVal == ".arch_extension")
7019 parseDirectiveArchExtension(Loc);
7020 else if (IDVal == ".variant_pcs")
7021 parseDirectiveVariantPCS(Loc);
7022 else if (IsMachO) {
7023 if (IDVal == MCLOHDirectiveName())
7024 parseDirectiveLOH(IDVal, Loc);
7025 else
7026 return true;
7027 } else if (IsCOFF) {
7028 if (IDVal == ".seh_stackalloc")
7029 parseDirectiveSEHAllocStack(Loc);
7030 else if (IDVal == ".seh_endprologue")
7031 parseDirectiveSEHPrologEnd(Loc);
7032 else if (IDVal == ".seh_save_r19r20_x")
7033 parseDirectiveSEHSaveR19R20X(Loc);
7034 else if (IDVal == ".seh_save_fplr")
7035 parseDirectiveSEHSaveFPLR(Loc);
7036 else if (IDVal == ".seh_save_fplr_x")
7037 parseDirectiveSEHSaveFPLRX(Loc);
7038 else if (IDVal == ".seh_save_reg")
7039 parseDirectiveSEHSaveReg(Loc);
7040 else if (IDVal == ".seh_save_reg_x")
7041 parseDirectiveSEHSaveRegX(Loc);
7042 else if (IDVal == ".seh_save_regp")
7043 parseDirectiveSEHSaveRegP(Loc);
7044 else if (IDVal == ".seh_save_regp_x")
7045 parseDirectiveSEHSaveRegPX(Loc);
7046 else if (IDVal == ".seh_save_lrpair")
7047 parseDirectiveSEHSaveLRPair(Loc);
7048 else if (IDVal == ".seh_save_freg")
7049 parseDirectiveSEHSaveFReg(Loc);
7050 else if (IDVal == ".seh_save_freg_x")
7051 parseDirectiveSEHSaveFRegX(Loc);
7052 else if (IDVal == ".seh_save_fregp")
7053 parseDirectiveSEHSaveFRegP(Loc);
7054 else if (IDVal == ".seh_save_fregp_x")
7055 parseDirectiveSEHSaveFRegPX(Loc);
7056 else if (IDVal == ".seh_set_fp")
7057 parseDirectiveSEHSetFP(Loc);
7058 else if (IDVal == ".seh_add_fp")
7059 parseDirectiveSEHAddFP(Loc);
7060 else if (IDVal == ".seh_nop")
7061 parseDirectiveSEHNop(Loc);
7062 else if (IDVal == ".seh_save_next")
7063 parseDirectiveSEHSaveNext(Loc);
7064 else if (IDVal == ".seh_startepilogue")
7065 parseDirectiveSEHEpilogStart(Loc);
7066 else if (IDVal == ".seh_endepilogue")
7067 parseDirectiveSEHEpilogEnd(Loc);
7068 else if (IDVal == ".seh_trap_frame")
7069 parseDirectiveSEHTrapFrame(Loc);
7070 else if (IDVal == ".seh_pushframe")
7071 parseDirectiveSEHMachineFrame(Loc);
7072 else if (IDVal == ".seh_context")
7073 parseDirectiveSEHContext(Loc);
7074 else if (IDVal == ".seh_ec_context")
7075 parseDirectiveSEHECContext(Loc);
7076 else if (IDVal == ".seh_clear_unwound_to_call")
7077 parseDirectiveSEHClearUnwoundToCall(Loc);
7078 else if (IDVal == ".seh_pac_sign_lr")
7079 parseDirectiveSEHPACSignLR(Loc);
7080 else if (IDVal == ".seh_save_any_reg")
7081 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7082 else if (IDVal == ".seh_save_any_reg_p")
7083 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7084 else if (IDVal == ".seh_save_any_reg_x")
7085 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7086 else if (IDVal == ".seh_save_any_reg_px")
7087 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7088 else
7089 return true;
7090 } else
7091 return true;
7092 return false;
7093}
7094
7095static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7096 SmallVector<StringRef, 4> &RequestedExtensions) {
7097 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7098 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7099
7100 if (!NoCrypto && Crypto) {
7101 // Map 'generic' (and others) to sha2 and aes, because
7102 // that was the traditional meaning of crypto.
7103 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7104 ArchInfo == AArch64::ARMV8_3A) {
7105 RequestedExtensions.push_back("sha2");
7106 RequestedExtensions.push_back("aes");
7107 }
7108 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7109 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7110 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7111 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7112 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7113 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7114 RequestedExtensions.push_back("sm4");
7115 RequestedExtensions.push_back("sha3");
7116 RequestedExtensions.push_back("sha2");
7117 RequestedExtensions.push_back("aes");
7118 }
7119 } else if (NoCrypto) {
7120 // Map 'generic' (and others) to sha2 and aes, because
7121 // that was the traditional meaning of crypto.
7122 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7123 ArchInfo == AArch64::ARMV8_3A) {
7124 RequestedExtensions.push_back("nosha2");
7125 RequestedExtensions.push_back("noaes");
7126 }
7127 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7128 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7129 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7130 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7131 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7132 ArchInfo == AArch64::ARMV9_4A) {
7133 RequestedExtensions.push_back("nosm4");
7134 RequestedExtensions.push_back("nosha3");
7135 RequestedExtensions.push_back("nosha2");
7136 RequestedExtensions.push_back("noaes");
7137 }
7138 }
7139}
7140
7142 return SMLoc::getFromPointer(L.getPointer() + Offset);
7143}
7144
7145/// parseDirectiveArch
7146/// ::= .arch token
7147bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7148 SMLoc CurLoc = getLoc();
7149
7150 StringRef Arch, ExtensionString;
7151 std::tie(Arch, ExtensionString) =
7152 getParser().parseStringToEndOfStatement().trim().split('+');
7153
7154 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7155 if (!ArchInfo)
7156 return Error(CurLoc, "unknown arch name");
7157
7158 if (parseToken(AsmToken::EndOfStatement))
7159 return true;
7160
7161 // Get the architecture and extension features.
7162 std::vector<StringRef> AArch64Features;
7163 AArch64Features.push_back(ArchInfo->ArchFeature);
7164 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7165
7166 MCSubtargetInfo &STI = copySTI();
7167 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7168 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7169 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7170
7171 SmallVector<StringRef, 4> RequestedExtensions;
7172 if (!ExtensionString.empty())
7173 ExtensionString.split(RequestedExtensions, '+');
7174
7175 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7176 CurLoc = incrementLoc(CurLoc, Arch.size());
7177
7178 for (auto Name : RequestedExtensions) {
7179 // Advance source location past '+'.
7180 CurLoc = incrementLoc(CurLoc, 1);
7181
7182 bool EnableFeature = !Name.consume_front_insensitive("no");
7183
7184 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7185 return Extension.Name == Name;
7186 });
7187
7188 if (It == std::end(ExtensionMap))
7189 return Error(CurLoc, "unsupported architectural extension: " + Name);
7190
7191 if (EnableFeature)
7192 STI.SetFeatureBitsTransitively(It->Features);
7193 else
7194 STI.ClearFeatureBitsTransitively(It->Features);
7195 CurLoc = incrementLoc(CurLoc, Name.size());
7196 }
7197 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7198 setAvailableFeatures(Features);
7199 return false;
7200}
7201
7202/// parseDirectiveArchExtension
7203/// ::= .arch_extension [no]feature
7204bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7205 SMLoc ExtLoc = getLoc();
7206
7207 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7208
7209 if (parseEOL())
7210 return true;
7211
7212 bool EnableFeature = true;
7213 if (Name.starts_with_insensitive("no")) {
7214 EnableFeature = false;
7215 Name = Name.substr(2);
7216 }
7217
7218 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7219 return Extension.Name == Name;
7220 });
7221
7222 if (It == std::end(ExtensionMap))
7223 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7224
7225 MCSubtargetInfo &STI = copySTI();
7226 if (EnableFeature)
7227 STI.SetFeatureBitsTransitively(It->Features);
7228 else
7229 STI.ClearFeatureBitsTransitively(It->Features);
7230 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7231 setAvailableFeatures(Features);
7232 return false;
7233}
7234
7235/// parseDirectiveCPU
7236/// ::= .cpu id
7237bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7238 SMLoc CurLoc = getLoc();
7239
7240 StringRef CPU, ExtensionString;
7241 std::tie(CPU, ExtensionString) =
7242 getParser().parseStringToEndOfStatement().trim().split('+');
7243
7244 if (parseToken(AsmToken::EndOfStatement))
7245 return true;
7246
7247 SmallVector<StringRef, 4> RequestedExtensions;
7248 if (!ExtensionString.empty())
7249 ExtensionString.split(RequestedExtensions, '+');
7250
7252 if (!CpuArch) {
7253 Error(CurLoc, "unknown CPU name");
7254 return false;
7255 }
7256 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7257
7258 MCSubtargetInfo &STI = copySTI();
7259 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7260 CurLoc = incrementLoc(CurLoc, CPU.size());
7261
7262 for (auto Name : RequestedExtensions) {
7263 // Advance source location past '+'.
7264 CurLoc = incrementLoc(CurLoc, 1);
7265
7266 bool EnableFeature = !Name.consume_front_insensitive("no");
7267
7268 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7269 return Extension.Name == Name;
7270 });
7271
7272 if (It == std::end(ExtensionMap))
7273 return Error(CurLoc, "unsupported architectural extension: " + Name);
7274
7275 if (EnableFeature)
7276 STI.SetFeatureBitsTransitively(It->Features);
7277 else
7278 STI.ClearFeatureBitsTransitively(It->Features);
7279 CurLoc = incrementLoc(CurLoc, Name.size());
7280 }
7281 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7282 setAvailableFeatures(Features);
7283 return false;
7284}
7285
7286/// parseDirectiveInst
7287/// ::= .inst opcode [, ...]
7288bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7289 if (getLexer().is(AsmToken::EndOfStatement))
7290 return Error(Loc, "expected expression following '.inst' directive");
7291
7292 auto parseOp = [&]() -> bool {
7293 SMLoc L = getLoc();
7294 const MCExpr *Expr = nullptr;
7295 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7296 return true;
7297 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7298 if (check(!Value, L, "expected constant expression"))
7299 return true;
7300 getTargetStreamer().emitInst(Value->getValue());
7301 return false;
7302 };
7303
7304 return parseMany(parseOp);
7305}
7306
7307// parseDirectiveTLSDescCall:
7308// ::= .tlsdesccall symbol
7309bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7311 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7312 parseToken(AsmToken::EndOfStatement))
7313 return true;
7314
7315 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7316 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7317 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7318
7319 MCInst Inst;
7320 Inst.setOpcode(AArch64::TLSDESCCALL);
7322
7323 getParser().getStreamer().emitInstruction(Inst, getSTI());
7324 return false;
7325}
7326
7327/// ::= .loh <lohName | lohId> label1, ..., labelN
7328/// The number of arguments depends on the loh identifier.
7329bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7331 if (getTok().isNot(AsmToken::Identifier)) {
7332 if (getTok().isNot(AsmToken::Integer))
7333 return TokError("expected an identifier or a number in directive");
7334 // We successfully get a numeric value for the identifier.
7335 // Check if it is valid.
7336 int64_t Id = getTok().getIntVal();
7337 if (Id <= -1U && !isValidMCLOHType(Id))
7338 return TokError("invalid numeric identifier in directive");
7339 Kind = (MCLOHType)Id;
7340 } else {
7341 StringRef Name = getTok().getIdentifier();
7342 // We successfully parse an identifier.
7343 // Check if it is a recognized one.
7344 int Id = MCLOHNameToId(Name);
7345
7346 if (Id == -1)
7347 return TokError("invalid identifier in directive");
7348 Kind = (MCLOHType)Id;
7349 }
7350 // Consume the identifier.
7351 Lex();
7352 // Get the number of arguments of this LOH.
7353 int NbArgs = MCLOHIdToNbArgs(Kind);
7354
7355 assert(NbArgs != -1 && "Invalid number of arguments");
7356
7358 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7360 if (getParser().parseIdentifier(Name))
7361 return TokError("expected identifier in directive");
7362 Args.push_back(getContext().getOrCreateSymbol(Name));
7363
7364 if (Idx + 1 == NbArgs)
7365 break;
7366 if (parseComma())
7367 return true;
7368 }
7369 if (parseEOL())
7370 return true;
7371
7372 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7373 return false;
7374}
7375
7376/// parseDirectiveLtorg
7377/// ::= .ltorg | .pool
7378bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7379 if (parseEOL())
7380 return true;
7381 getTargetStreamer().emitCurrentConstantPool();
7382 return false;
7383}
7384
7385/// parseDirectiveReq
7386/// ::= name .req registername
7387bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7388 Lex(); // Eat the '.req' token.
7389 SMLoc SRegLoc = getLoc();
7390 RegKind RegisterKind = RegKind::Scalar;
7391 MCRegister RegNum;
7392 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7393
7394 if (!ParseRes.isSuccess()) {
7396 RegisterKind = RegKind::NeonVector;
7397 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7398
7399 if (ParseRes.isFailure())
7400 return true;
7401
7402 if (ParseRes.isSuccess() && !Kind.empty())
7403 return Error(SRegLoc, "vector register without type specifier expected");
7404 }
7405
7406 if (!ParseRes.isSuccess()) {
7408 RegisterKind = RegKind::SVEDataVector;
7409 ParseRes =
7410 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7411
7412 if (ParseRes.isFailure())
7413 return true;
7414
7415 if (ParseRes.isSuccess() && !Kind.empty())
7416 return Error(SRegLoc,
7417 "sve vector register without type specifier expected");
7418 }
7419
7420 if (!ParseRes.isSuccess()) {
7422 RegisterKind = RegKind::SVEPredicateVector;
7423 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7424
7425 if (ParseRes.isFailure())
7426 return true;
7427
7428 if (ParseRes.isSuccess() && !Kind.empty())
7429 return Error(SRegLoc,
7430 "sve predicate register without type specifier expected");
7431 }
7432
7433 if (!ParseRes.isSuccess())
7434 return Error(SRegLoc, "register name or alias expected");
7435
7436 // Shouldn't be anything else.
7437 if (parseEOL())
7438 return true;
7439
7440 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7441 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7442 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7443
7444 return false;
7445}
7446
7447/// parseDirectiveUneq
7448/// ::= .unreq registername
7449bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7450 if (getTok().isNot(AsmToken::Identifier))
7451 return TokError("unexpected input in .unreq directive.");
7452 RegisterReqs.erase(getTok().getIdentifier().lower());
7453 Lex(); // Eat the identifier.
7454 return parseToken(AsmToken::EndOfStatement);
7455}
7456
7457bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7458 if (parseEOL())
7459 return true;
7460 getStreamer().emitCFINegateRAState();
7461 return false;
7462}
7463
7464bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7465 if (parseEOL())
7466 return true;
7467 getStreamer().emitCFINegateRAStateWithPC();
7468 return false;
7469}
7470
7471/// parseDirectiveCFIBKeyFrame
7472/// ::= .cfi_b_key
7473bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7474 if (parseEOL())
7475 return true;
7476 getStreamer().emitCFIBKeyFrame();
7477 return false;
7478}
7479
7480/// parseDirectiveCFIMTETaggedFrame
7481/// ::= .cfi_mte_tagged_frame
7482bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7483 if (parseEOL())
7484 return true;
7485 getStreamer().emitCFIMTETaggedFrame();
7486 return false;
7487}
7488
7489/// parseDirectiveVariantPCS
7490/// ::= .variant_pcs symbolname
7491bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7493 if (getParser().parseIdentifier(Name))
7494 return TokError("expected symbol name");
7495 if (parseEOL())
7496 return true;
7497 getTargetStreamer().emitDirectiveVariantPCS(
7498 getContext().getOrCreateSymbol(Name));
7499 return false;
7500}
7501
7502/// parseDirectiveSEHAllocStack
7503/// ::= .seh_stackalloc
7504bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7505 int64_t Size;
7506 if (parseImmExpr(Size))
7507 return true;
7508 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7509 return false;
7510}
7511
7512/// parseDirectiveSEHPrologEnd
7513/// ::= .seh_endprologue
7514bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7515 getTargetStreamer().emitARM64WinCFIPrologEnd();
7516 return false;
7517}
7518
7519/// parseDirectiveSEHSaveR19R20X
7520/// ::= .seh_save_r19r20_x
7521bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7522 int64_t Offset;
7523 if (parseImmExpr(Offset))
7524 return true;
7525 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7526 return false;
7527}
7528
7529/// parseDirectiveSEHSaveFPLR
7530/// ::= .seh_save_fplr
7531bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7532 int64_t Offset;
7533 if (parseImmExpr(Offset))
7534 return true;
7535 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7536 return false;
7537}
7538
7539/// parseDirectiveSEHSaveFPLRX
7540/// ::= .seh_save_fplr_x
7541bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7542 int64_t Offset;
7543 if (parseImmExpr(Offset))
7544 return true;
7545 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7546 return false;
7547}
7548
7549/// parseDirectiveSEHSaveReg
7550/// ::= .seh_save_reg
7551bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7552 unsigned Reg;
7553 int64_t Offset;
7554 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7555 parseComma() || parseImmExpr(Offset))
7556 return true;
7557 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7558 return false;
7559}
7560
7561/// parseDirectiveSEHSaveRegX
7562/// ::= .seh_save_reg_x
7563bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7564 unsigned Reg;
7565 int64_t Offset;
7566 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7567 parseComma() || parseImmExpr(Offset))
7568 return true;
7569 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7570 return false;
7571}
7572
7573/// parseDirectiveSEHSaveRegP
7574/// ::= .seh_save_regp
7575bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7576 unsigned Reg;
7577 int64_t Offset;
7578 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7579 parseComma() || parseImmExpr(Offset))
7580 return true;
7581 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7582 return false;
7583}
7584
7585/// parseDirectiveSEHSaveRegPX
7586/// ::= .seh_save_regp_x
7587bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7588 unsigned Reg;
7589 int64_t Offset;
7590 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7591 parseComma() || parseImmExpr(Offset))
7592 return true;
7593 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7594 return false;
7595}
7596
7597/// parseDirectiveSEHSaveLRPair
7598/// ::= .seh_save_lrpair
7599bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7600 unsigned Reg;
7601 int64_t Offset;
7602 L = getLoc();
7603 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7604 parseComma() || parseImmExpr(Offset))
7605 return true;
7606 if (check(((Reg - 19) % 2 != 0), L,
7607 "expected register with even offset from x19"))
7608 return true;
7609 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7610 return false;
7611}
7612
7613/// parseDirectiveSEHSaveFReg
7614/// ::= .seh_save_freg
7615bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7616 unsigned Reg;
7617 int64_t Offset;
7618 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7619 parseComma() || parseImmExpr(Offset))
7620 return true;
7621 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7622 return false;
7623}
7624
7625/// parseDirectiveSEHSaveFRegX
7626/// ::= .seh_save_freg_x
7627bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7628 unsigned Reg;
7629 int64_t Offset;
7630 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7631 parseComma() || parseImmExpr(Offset))
7632 return true;
7633 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7634 return false;
7635}
7636
7637/// parseDirectiveSEHSaveFRegP
7638/// ::= .seh_save_fregp
7639bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7640 unsigned Reg;
7641 int64_t Offset;
7642 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7643 parseComma() || parseImmExpr(Offset))
7644 return true;
7645 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7646 return false;
7647}
7648
7649/// parseDirectiveSEHSaveFRegPX
7650/// ::= .seh_save_fregp_x
7651bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7652 unsigned Reg;
7653 int64_t Offset;
7654 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7655 parseComma() || parseImmExpr(Offset))
7656 return true;
7657 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7658 return false;
7659}
7660
7661/// parseDirectiveSEHSetFP
7662/// ::= .seh_set_fp
7663bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7664 getTargetStreamer().emitARM64WinCFISetFP();
7665 return false;
7666}
7667
7668/// parseDirectiveSEHAddFP
7669/// ::= .seh_add_fp
7670bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7671 int64_t Size;
7672 if (parseImmExpr(Size))
7673 return true;
7674 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7675 return false;
7676}
7677
7678/// parseDirectiveSEHNop
7679/// ::= .seh_nop
7680bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7681 getTargetStreamer().emitARM64WinCFINop();
7682 return false;
7683}
7684
7685/// parseDirectiveSEHSaveNext
7686/// ::= .seh_save_next
7687bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7688 getTargetStreamer().emitARM64WinCFISaveNext();
7689 return false;
7690}
7691
7692/// parseDirectiveSEHEpilogStart
7693/// ::= .seh_startepilogue
7694bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7695 getTargetStreamer().emitARM64WinCFIEpilogStart();
7696 return false;
7697}
7698
7699/// parseDirectiveSEHEpilogEnd
7700/// ::= .seh_endepilogue
7701bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7702 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7703 return false;
7704}
7705
7706/// parseDirectiveSEHTrapFrame
7707/// ::= .seh_trap_frame
7708bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7709 getTargetStreamer().emitARM64WinCFITrapFrame();
7710 return false;
7711}
7712
7713/// parseDirectiveSEHMachineFrame
7714/// ::= .seh_pushframe
7715bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7716 getTargetStreamer().emitARM64WinCFIMachineFrame();
7717 return false;
7718}
7719
7720/// parseDirectiveSEHContext
7721/// ::= .seh_context
7722bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7723 getTargetStreamer().emitARM64WinCFIContext();
7724 return false;
7725}
7726
7727/// parseDirectiveSEHECContext
7728/// ::= .seh_ec_context
7729bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7730 getTargetStreamer().emitARM64WinCFIECContext();
7731 return false;
7732}
7733
7734/// parseDirectiveSEHClearUnwoundToCall
7735/// ::= .seh_clear_unwound_to_call
7736bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7737 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7738 return false;
7739}
7740
7741/// parseDirectiveSEHPACSignLR
7742/// ::= .seh_pac_sign_lr
7743bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7744 getTargetStreamer().emitARM64WinCFIPACSignLR();
7745 return false;
7746}
7747
7748/// parseDirectiveSEHSaveAnyReg
7749/// ::= .seh_save_any_reg
7750/// ::= .seh_save_any_reg_p
7751/// ::= .seh_save_any_reg_x
7752/// ::= .seh_save_any_reg_px
7753bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7754 bool Writeback) {
7756 SMLoc Start, End;
7757 int64_t Offset;
7758 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7759 parseComma() || parseImmExpr(Offset))
7760 return true;
7761
7762 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7763 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7764 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7765 return Error(L, "invalid save_any_reg offset");
7766 unsigned EncodedReg;
7767 if (Reg == AArch64::FP)
7768 EncodedReg = 29;
7769 else if (Reg == AArch64::LR)
7770 EncodedReg = 30;
7771 else
7772 EncodedReg = Reg - AArch64::X0;
7773 if (Paired) {
7774 if (Reg == AArch64::LR)
7775 return Error(Start, "lr cannot be paired with another register");
7776 if (Writeback)
7777 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7778 else
7779 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7780 } else {
7781 if (Writeback)
7782 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7783 else
7784 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7785 }
7786 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7787 unsigned EncodedReg = Reg - AArch64::D0;
7788 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7789 return Error(L, "invalid save_any_reg offset");
7790 if (Paired) {
7791 if (Reg == AArch64::D31)
7792 return Error(Start, "d31 cannot be paired with another register");
7793 if (Writeback)
7794 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7795 else
7796 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7797 } else {
7798 if (Writeback)
7799 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7800 else
7801 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7802 }
7803 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7804 unsigned EncodedReg = Reg - AArch64::Q0;
7805 if (Offset < 0 || Offset % 16)
7806 return Error(L, "invalid save_any_reg offset");
7807 if (Paired) {
7808 if (Reg == AArch64::Q31)
7809 return Error(Start, "q31 cannot be paired with another register");
7810 if (Writeback)
7811 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7812 else
7813 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7814 } else {
7815 if (Writeback)
7816 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7817 else
7818 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7819 }
7820 } else {
7821 return Error(Start, "save_any_reg register must be x, q or d register");
7822 }
7823 return false;
7824}
7825
7826bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7827 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7828 if (!parseAuthExpr(Res, EndLoc))
7829 return false;
7830 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7831}
7832
7833/// parseAuthExpr
7834/// ::= _sym@AUTH(ib,123[,addr])
7835/// ::= (_sym + 5)@AUTH(ib,123[,addr])
7836/// ::= (_sym - 5)@AUTH(ib,123[,addr])
7837bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7838 MCAsmParser &Parser = getParser();
7839 MCContext &Ctx = getContext();
7840
7841 AsmToken Tok = Parser.getTok();
7842
7843 // Look for '_sym@AUTH' ...
7844 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7845 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7846 if (SymName.contains('@'))
7847 return TokError(
7848 "combination of @AUTH with other modifiers not supported");
7849 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7850
7851 Parser.Lex(); // Eat the identifier.
7852 } else {
7853 // ... or look for a more complex symbol reference, such as ...
7855
7856 // ... '"_long sym"@AUTH' ...
7857 if (Tok.is(AsmToken::String))
7858 Tokens.resize(2);
7859 // ... or '(_sym + 5)@AUTH'.
7860 else if (Tok.is(AsmToken::LParen))
7861 Tokens.resize(6);
7862 else
7863 return true;
7864
7865 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7866 return true;
7867
7868 // In either case, the expression ends with '@' 'AUTH'.
7869 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7870 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7871 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7872 return true;
7873
7874 if (Tok.is(AsmToken::String)) {
7875 StringRef SymName;
7876 if (Parser.parseIdentifier(SymName))
7877 return true;
7878 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7879 } else {
7880 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7881 return true;
7882 }
7883
7884 Parser.Lex(); // '@'
7885 Parser.Lex(); // 'AUTH'
7886 }
7887
7888 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7889 if (parseToken(AsmToken::LParen, "expected '('"))
7890 return true;
7891
7892 if (Parser.getTok().isNot(AsmToken::Identifier))
7893 return TokError("expected key name");
7894
7895 StringRef KeyStr = Parser.getTok().getIdentifier();
7896 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7897 if (!KeyIDOrNone)
7898 return TokError("invalid key '" + KeyStr + "'");
7899 Parser.Lex();
7900
7901 if (parseToken(AsmToken::Comma, "expected ','"))
7902 return true;
7903
7904 if (Parser.getTok().isNot(AsmToken::Integer))
7905 return TokError("expected integer discriminator");
7906 int64_t Discriminator = Parser.getTok().getIntVal();
7907
7908 if (!isUInt<16>(Discriminator))
7909 return TokError("integer discriminator " + Twine(Discriminator) +
7910 " out of range [0, 0xFFFF]");
7911 Parser.Lex();
7912
7913 bool UseAddressDiversity = false;
7914 if (Parser.getTok().is(AsmToken::Comma)) {
7915 Parser.Lex();
7916 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7917 Parser.getTok().getIdentifier() != "addr")
7918 return TokError("expected 'addr'");
7919 UseAddressDiversity = true;
7920 Parser.Lex();
7921 }
7922
7923 EndLoc = Parser.getTok().getEndLoc();
7924 if (parseToken(AsmToken::RParen, "expected ')'"))
7925 return true;
7926
7927 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7928 UseAddressDiversity, Ctx);
7929 return false;
7930}
7931
7932bool
7933AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7934 AArch64MCExpr::VariantKind &ELFRefKind,
7935 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7936 int64_t &Addend) {
7937 ELFRefKind = AArch64MCExpr::VK_INVALID;
7938 DarwinRefKind = MCSymbolRefExpr::VK_None;
7939 Addend = 0;
7940
7941 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7942 ELFRefKind = AE->getKind();
7943 Expr = AE->getSubExpr();
7944 }
7945
7946 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7947 if (SE) {
7948 // It's a simple symbol reference with no addend.
7949 DarwinRefKind = SE->getKind();
7950 return true;
7951 }
7952
7953 // Check that it looks like a symbol + an addend
7954 MCValue Res;
7955 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7956 if (!Relocatable || Res.getSymB())
7957 return false;
7958
7959 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7960 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7961 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7962 return false;
7963
7964 if (Res.getSymA())
7965 DarwinRefKind = Res.getSymA()->getKind();
7966 Addend = Res.getConstant();
7967
7968 // It's some symbol reference + a constant addend, but really
7969 // shouldn't use both Darwin and ELF syntax.
7970 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7971 DarwinRefKind == MCSymbolRefExpr::VK_None;
7972}
7973
7974/// Force static initialization.
7981}
7982
7983#define GET_REGISTER_MATCHER
7984#define GET_SUBTARGET_FEATURE_NAME
7985#define GET_MATCHER_IMPLEMENTATION
7986#define GET_MNEMONIC_SPELL_CHECKER
7987#include "AArch64GenAsmMatcher.inc"
7988
7989// Define this matcher function after the auto-generated include so we
7990// have the match class enum definitions.
7991unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7992 unsigned Kind) {
7993 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7994
7995 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7996 if (!Op.isImm())
7997 return Match_InvalidOperand;
7998 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7999 if (!CE)
8000 return Match_InvalidOperand;
8001 if (CE->getValue() == ExpectedVal)
8002 return Match_Success;
8003 return Match_InvalidOperand;
8004 };
8005
8006 switch (Kind) {
8007 default:
8008 return Match_InvalidOperand;
8009 case MCK_MPR:
8010 // If the Kind is a token for the MPR register class which has the "za"
8011 // register (SME accumulator array), check if the asm is a literal "za"
8012 // token. This is for the "smstart za" alias that defines the register
8013 // as a literal token.
8014 if (Op.isTokenEqual("za"))
8015 return Match_Success;
8016 return Match_InvalidOperand;
8017
8018 // If the kind is a token for a literal immediate, check if our asm operand
8019 // matches. This is for InstAliases which have a fixed-value immediate in
8020 // the asm string, such as hints which are parsed into a specific
8021 // instruction definition.
8022#define MATCH_HASH(N) \
8023 case MCK__HASH_##N: \
8024 return MatchesOpImmediate(N);
8025 MATCH_HASH(0)
8026 MATCH_HASH(1)
8027 MATCH_HASH(2)
8028 MATCH_HASH(3)
8029 MATCH_HASH(4)
8030 MATCH_HASH(6)
8031 MATCH_HASH(7)
8032 MATCH_HASH(8)
8033 MATCH_HASH(10)
8034 MATCH_HASH(12)
8035 MATCH_HASH(14)
8036 MATCH_HASH(16)
8037 MATCH_HASH(24)
8038 MATCH_HASH(25)
8039 MATCH_HASH(26)
8040 MATCH_HASH(27)
8041 MATCH_HASH(28)
8042 MATCH_HASH(29)
8043 MATCH_HASH(30)
8044 MATCH_HASH(31)
8045 MATCH_HASH(32)
8046 MATCH_HASH(40)
8047 MATCH_HASH(48)
8048 MATCH_HASH(64)
8049#undef MATCH_HASH
8050#define MATCH_HASH_MINUS(N) \
8051 case MCK__HASH__MINUS_##N: \
8052 return MatchesOpImmediate(-N);
8056#undef MATCH_HASH_MINUS
8057 }
8058}
8059
8060ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8061
8062 SMLoc S = getLoc();
8063
8064 if (getTok().isNot(AsmToken::Identifier))
8065 return Error(S, "expected register");
8066
8067 MCRegister FirstReg;
8068 ParseStatus Res = tryParseScalarRegister(FirstReg);
8069 if (!Res.isSuccess())
8070 return Error(S, "expected first even register of a consecutive same-size "
8071 "even/odd register pair");
8072
8073 const MCRegisterClass &WRegClass =
8074 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8075 const MCRegisterClass &XRegClass =
8076 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8077
8078 bool isXReg = XRegClass.contains(FirstReg),
8079 isWReg = WRegClass.contains(FirstReg);
8080 if (!isXReg && !isWReg)
8081 return Error(S, "expected first even register of a consecutive same-size "
8082 "even/odd register pair");
8083
8084 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8085 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8086
8087 if (FirstEncoding & 0x1)
8088 return Error(S, "expected first even register of a consecutive same-size "
8089 "even/odd register pair");
8090
8091 if (getTok().isNot(AsmToken::Comma))
8092 return Error(getLoc(), "expected comma");
8093 // Eat the comma
8094 Lex();
8095
8096 SMLoc E = getLoc();
8097 MCRegister SecondReg;
8098 Res = tryParseScalarRegister(SecondReg);
8099 if (!Res.isSuccess())
8100 return Error(E, "expected second odd register of a consecutive same-size "
8101 "even/odd register pair");
8102
8103 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8104 (isXReg && !XRegClass.contains(SecondReg)) ||
8105 (isWReg && !WRegClass.contains(SecondReg)))
8106 return Error(E, "expected second odd register of a consecutive same-size "
8107 "even/odd register pair");
8108
8109 MCRegister Pair;
8110 if (isXReg) {
8111 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8112 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8113 } else {
8114 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8115 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8116 }
8117
8118 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8119 getLoc(), getContext()));
8120
8121 return ParseStatus::Success;
8122}
8123
8124template <bool ParseShiftExtend, bool ParseSuffix>
8125ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8126 const SMLoc S = getLoc();
8127 // Check for a SVE vector register specifier first.
8128 MCRegister RegNum;
8130
8131 ParseStatus Res =
8132 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8133
8134 if (!Res.isSuccess())
8135 return Res;
8136
8137 if (ParseSuffix && Kind.empty())
8138 return ParseStatus::NoMatch;
8139
8140 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8141 if (!KindRes)
8142 return ParseStatus::NoMatch;
8143
8144 unsigned ElementWidth = KindRes->second;
8145
8146 // No shift/extend is the default.
8147 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8148 Operands.push_back(AArch64Operand::CreateVectorReg(
8149 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8150
8151 ParseStatus Res = tryParseVectorIndex(Operands);
8152 if (Res.isFailure())
8153 return ParseStatus::Failure;
8154 return ParseStatus::Success;
8155 }
8156
8157 // Eat the comma
8158 Lex();
8159
8160 // Match the shift
8162 Res = tryParseOptionalShiftExtend(ExtOpnd);
8163 if (!Res.isSuccess())
8164 return Res;
8165
8166 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8167 Operands.push_back(AArch64Operand::CreateVectorReg(
8168 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8169 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8170 Ext->hasShiftExtendAmount()));
8171
8172 return ParseStatus::Success;
8173}
8174
8175ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8176 MCAsmParser &Parser = getParser();
8177
8178 SMLoc SS = getLoc();
8179 const AsmToken &TokE = getTok();
8180 bool IsHash = TokE.is(AsmToken::Hash);
8181
8182 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8183 return ParseStatus::NoMatch;
8184
8185 int64_t Pattern;
8186 if (IsHash) {
8187 Lex(); // Eat hash
8188
8189 // Parse the immediate operand.
8190 const MCExpr *ImmVal;
8191 SS = getLoc();
8192 if (Parser.parseExpression(ImmVal))
8193 return ParseStatus::Failure;
8194
8195 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8196 if (!MCE)
8197 return TokError("invalid operand for instruction");
8198
8199 Pattern = MCE->getValue();
8200 } else {
8201 // Parse the pattern
8202 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8203 if (!Pat)
8204 return ParseStatus::NoMatch;
8205
8206 Lex();
8207 Pattern = Pat->Encoding;
8208 assert(Pattern >= 0 && Pattern < 32);
8209 }
8210
8211 Operands.push_back(
8212 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8213 SS, getLoc(), getContext()));
8214
8215 return ParseStatus::Success;
8216}
8217
8219AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8220 int64_t Pattern;
8221 SMLoc SS = getLoc();
8222 const AsmToken &TokE = getTok();
8223 // Parse the pattern
8224 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8225 TokE.getString());
8226 if (!Pat)
8227 return ParseStatus::NoMatch;
8228
8229 Lex();
8230 Pattern = Pat->Encoding;
8231 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8232
8233 Operands.push_back(
8234 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8235 SS, getLoc(), getContext()));
8236
8237 return ParseStatus::Success;
8238}
8239
8240ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8241 SMLoc SS = getLoc();
8242
8243 MCRegister XReg;
8244 if (!tryParseScalarRegister(XReg).isSuccess())
8245 return ParseStatus::NoMatch;
8246
8247 MCContext &ctx = getContext();
8248 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8249 MCRegister X8Reg = RI->getMatchingSuperReg(
8250 XReg, AArch64::x8sub_0,
8251 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8252 if (!X8Reg)
8253 return Error(SS,
8254 "expected an even-numbered x-register in the range [x0,x22]");
8255
8256 Operands.push_back(
8257 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8258 return ParseStatus::Success;
8259}
8260
8261ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8262 SMLoc S = getLoc();
8263
8264 if (getTok().isNot(AsmToken::Integer))
8265 return ParseStatus::NoMatch;
8266
8267 if (getLexer().peekTok().isNot(AsmToken::Colon))
8268 return ParseStatus::NoMatch;
8269
8270 const MCExpr *ImmF;
8271 if (getParser().parseExpression(ImmF))
8272 return ParseStatus::NoMatch;
8273
8274 if (getTok().isNot(AsmToken::Colon))
8275 return ParseStatus::NoMatch;
8276
8277 Lex(); // Eat ':'
8278 if (getTok().isNot(AsmToken::Integer))
8279 return ParseStatus::NoMatch;
8280
8281 SMLoc E = getTok().getLoc();
8282 const MCExpr *ImmL;
8283 if (getParser().parseExpression(ImmL))
8284 return ParseStatus::NoMatch;
8285
8286 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8287 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8288
8289 Operands.push_back(
8290 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8291 return ParseStatus::Success;
8292}
8293
8294template <int Adj>
8295ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8296 SMLoc S = getLoc();
8297
8298 parseOptionalToken(AsmToken::Hash);
8299 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8300
8301 if (getTok().isNot(AsmToken::Integer))
8302 return ParseStatus::NoMatch;
8303
8304 const MCExpr *Ex;
8305 if (getParser().parseExpression(Ex))
8306 return ParseStatus::NoMatch;
8307
8308 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8309 if (IsNegative)
8310 Imm = -Imm;
8311
8312 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8313 // return a value, which is certain to trigger a error message about invalid
8314 // immediate range instead of a non-descriptive invalid operand error.
8315 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8316 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8317 Imm = -2;
8318 else
8319 Imm += Adj;
8320
8321 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8322 Operands.push_back(AArch64Operand::CreateImm(
8323 MCConstantExpr::create(Imm, getContext()), S, E, getContext()));
8324
8325 return ParseStatus::Success;
8326}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1351
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:435
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:432
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:355
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:819
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
unsigned getNumOperands() const
Definition: MCInst.h:209
void setLoc(SMLoc loc)
Definition: MCInst.h:204
unsigned getOpcode() const
Definition: MCInst.h:199
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
int64_t getImm() const
Definition: MCInst.h:81
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
VariantKind getKind() const
Definition: MCExpr.h:413
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
Target specific streamer interface.
Definition: MCStreamer.h:94
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:308
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:609
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:118
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:589
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:815
std::string lower() const
Definition: StringRef.cpp:113
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:616
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:176
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:400
bool isWindowsArm64EC() const
Definition: Triple.h:656
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define INT64_MIN
Definition: DataTypes.h:74
#define INT64_MAX
Definition: DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const ArchInfo * parseArch(StringRef Arch)
const ArchInfo * getArchForCpu(StringRef CPU)
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition: COFF.h:844
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1606
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:212
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1099
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:170
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
Target & getTheAArch64_32Target()
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
Target & getTheARM64Target()
DWARFExpression::Operation Op
static MCRegister getWRegFromXReg(MCRegister Reg)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired