LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 MCRegister getDstReg() const { return Dst; }
140 MCRegister getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 MCRegister Dst;
150 MCRegister Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
201
202 bool parseDirectiveVariantPCS(SMLoc L);
203
204 bool parseDirectiveSEHAllocStack(SMLoc L);
205 bool parseDirectiveSEHPrologEnd(SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
209 bool parseDirectiveSEHSaveReg(SMLoc L);
210 bool parseDirectiveSEHSaveRegX(SMLoc L);
211 bool parseDirectiveSEHSaveRegP(SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(SMLoc L);
214 bool parseDirectiveSEHSaveFReg(SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
218 bool parseDirectiveSEHSetFP(SMLoc L);
219 bool parseDirectiveSEHAddFP(SMLoc L);
220 bool parseDirectiveSEHNop(SMLoc L);
221 bool parseDirectiveSEHSaveNext(SMLoc L);
222 bool parseDirectiveSEHEpilogStart(SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(SMLoc L);
224 bool parseDirectiveSEHTrapFrame(SMLoc L);
225 bool parseDirectiveSEHMachineFrame(SMLoc L);
226 bool parseDirectiveSEHContext(SMLoc L);
227 bool parseDirectiveSEHECContext(SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
229 bool parseDirectiveSEHPACSignLR(SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
231
232 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
234 unsigned getNumRegsForRegKind(RegKind K);
235 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
238 bool MatchingInlineAsm) override;
239 /// @name Auto-generated Match Functions
240 /// {
241
242#define GET_ASSEMBLER_HEADER
243#include "AArch64GenAsmMatcher.inc"
244
245 /// }
246
247 ParseStatus tryParseScalarRegister(MCRegister &Reg);
248 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
249 RegKind MatchKind);
250 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
251 ParseStatus tryParseSVCR(OperandVector &Operands);
252 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
253 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
254 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
255 ParseStatus tryParseSysReg(OperandVector &Operands);
256 ParseStatus tryParseSysCROperand(OperandVector &Operands);
257 template <bool IsSVEPrefetch = false>
258 ParseStatus tryParsePrefetch(OperandVector &Operands);
259 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
260 ParseStatus tryParsePSBHint(OperandVector &Operands);
261 ParseStatus tryParseBTIHint(OperandVector &Operands);
262 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
263 ParseStatus tryParseAdrLabel(OperandVector &Operands);
264 template <bool AddFPZeroAsLiteral>
265 ParseStatus tryParseFPImm(OperandVector &Operands);
266 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
267 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
268 bool tryParseNeonVectorRegister(OperandVector &Operands);
269 ParseStatus tryParseVectorIndex(OperandVector &Operands);
270 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
271 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
272 template <bool ParseShiftExtend,
273 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
274 ParseStatus tryParseGPROperand(OperandVector &Operands);
275 ParseStatus tryParseZTOperand(OperandVector &Operands);
276 template <bool ParseShiftExtend, bool ParseSuffix>
277 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
278 template <RegKind RK>
279 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
281 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
282 template <RegKind VectorKind>
283 ParseStatus tryParseVectorList(OperandVector &Operands,
284 bool ExpectMatch = false);
285 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
286 ParseStatus tryParseSVEPattern(OperandVector &Operands);
287 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
288 ParseStatus tryParseGPR64x8(OperandVector &Operands);
289 ParseStatus tryParseImmRange(OperandVector &Operands);
290 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
291 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
292
293public:
294 enum AArch64MatchResultTy {
295 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
296#define GET_OPERAND_DIAGNOSTIC_TYPES
297#include "AArch64GenAsmMatcher.inc"
298 };
299 bool IsILP32;
300 bool IsWindowsArm64EC;
301
302 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
303 const MCInstrInfo &MII, const MCTargetOptions &Options)
304 : MCTargetAsmParser(Options, STI, MII) {
306 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
309 if (S.getTargetStreamer() == nullptr)
311
312 // Alias .hword/.word/.[dx]word to the target-independent
313 // .2byte/.4byte/.8byte directives as they have the same form and
314 // semantics:
315 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
316 Parser.addAliasForDirective(".hword", ".2byte");
317 Parser.addAliasForDirective(".word", ".4byte");
318 Parser.addAliasForDirective(".dword", ".8byte");
319 Parser.addAliasForDirective(".xword", ".8byte");
320
321 // Initialize the set of available features.
322 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
323 }
324
325 bool areEqualRegs(const MCParsedAsmOperand &Op1,
326 const MCParsedAsmOperand &Op2) const override;
328 SMLoc NameLoc, OperandVector &Operands) override;
329 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
331 SMLoc &EndLoc) override;
332 bool ParseDirective(AsmToken DirectiveID) override;
334 unsigned Kind) override;
335
336 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
337
338 static bool classifySymbolRef(const MCExpr *Expr,
339 AArch64MCExpr::VariantKind &ELFRefKind,
340 MCSymbolRefExpr::VariantKind &DarwinRefKind,
341 int64_t &Addend);
342};
343
344/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
345/// instruction.
346class AArch64Operand : public MCParsedAsmOperand {
347private:
348 enum KindTy {
349 k_Immediate,
350 k_ShiftedImm,
351 k_ImmRange,
352 k_CondCode,
353 k_Register,
354 k_MatrixRegister,
355 k_MatrixTileList,
356 k_SVCR,
357 k_VectorList,
358 k_VectorIndex,
359 k_Token,
360 k_SysReg,
361 k_SysCR,
362 k_Prefetch,
363 k_ShiftExtend,
364 k_FPImm,
365 k_Barrier,
366 k_PSBHint,
367 k_PHint,
368 k_BTIHint,
369 } Kind;
370
371 SMLoc StartLoc, EndLoc;
372
373 struct TokOp {
374 const char *Data;
375 unsigned Length;
376 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
377 };
378
379 // Separate shift/extend operand.
380 struct ShiftExtendOp {
382 unsigned Amount;
383 bool HasExplicitAmount;
384 };
385
386 struct RegOp {
387 unsigned RegNum;
388 RegKind Kind;
389 int ElementWidth;
390
391 // The register may be allowed as a different register class,
392 // e.g. for GPR64as32 or GPR32as64.
393 RegConstraintEqualityTy EqualityTy;
394
395 // In some cases the shift/extend needs to be explicitly parsed together
396 // with the register, rather than as a separate operand. This is needed
397 // for addressing modes where the instruction as a whole dictates the
398 // scaling/extend, rather than specific bits in the instruction.
399 // By parsing them as a single operand, we avoid the need to pass an
400 // extra operand in all CodeGen patterns (because all operands need to
401 // have an associated value), and we avoid the need to update TableGen to
402 // accept operands that have no associated bits in the instruction.
403 //
404 // An added benefit of parsing them together is that the assembler
405 // can give a sensible diagnostic if the scaling is not correct.
406 //
407 // The default is 'lsl #0' (HasExplicitAmount = false) if no
408 // ShiftExtend is specified.
409 ShiftExtendOp ShiftExtend;
410 };
411
412 struct MatrixRegOp {
413 unsigned RegNum;
414 unsigned ElementWidth;
415 MatrixKind Kind;
416 };
417
418 struct MatrixTileListOp {
419 unsigned RegMask = 0;
420 };
421
422 struct VectorListOp {
423 unsigned RegNum;
424 unsigned Count;
425 unsigned Stride;
426 unsigned NumElements;
427 unsigned ElementWidth;
428 RegKind RegisterKind;
429 };
430
431 struct VectorIndexOp {
432 int Val;
433 };
434
435 struct ImmOp {
436 const MCExpr *Val;
437 };
438
439 struct ShiftedImmOp {
440 const MCExpr *Val;
441 unsigned ShiftAmount;
442 };
443
444 struct ImmRangeOp {
445 unsigned First;
446 unsigned Last;
447 };
448
449 struct CondCodeOp {
451 };
452
453 struct FPImmOp {
454 uint64_t Val; // APFloat value bitcasted to uint64_t.
455 bool IsExact; // describes whether parsed value was exact.
456 };
457
458 struct BarrierOp {
459 const char *Data;
460 unsigned Length;
461 unsigned Val; // Not the enum since not all values have names.
462 bool HasnXSModifier;
463 };
464
465 struct SysRegOp {
466 const char *Data;
467 unsigned Length;
468 uint32_t MRSReg;
469 uint32_t MSRReg;
470 uint32_t PStateField;
471 };
472
473 struct SysCRImmOp {
474 unsigned Val;
475 };
476
477 struct PrefetchOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct PSBHintOp {
484 const char *Data;
485 unsigned Length;
486 unsigned Val;
487 };
488 struct PHintOp {
489 const char *Data;
490 unsigned Length;
491 unsigned Val;
492 };
493 struct BTIHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498
499 struct SVCROp {
500 const char *Data;
501 unsigned Length;
502 unsigned PStateField;
503 };
504
505 union {
506 struct TokOp Tok;
507 struct RegOp Reg;
508 struct MatrixRegOp MatrixReg;
509 struct MatrixTileListOp MatrixTileList;
510 struct VectorListOp VectorList;
511 struct VectorIndexOp VectorIndex;
512 struct ImmOp Imm;
513 struct ShiftedImmOp ShiftedImm;
514 struct ImmRangeOp ImmRange;
515 struct CondCodeOp CondCode;
516 struct FPImmOp FPImm;
517 struct BarrierOp Barrier;
518 struct SysRegOp SysReg;
519 struct SysCRImmOp SysCRImm;
520 struct PrefetchOp Prefetch;
521 struct PSBHintOp PSBHint;
522 struct PHintOp PHint;
523 struct BTIHintOp BTIHint;
524 struct ShiftExtendOp ShiftExtend;
525 struct SVCROp SVCR;
526 };
527
528 // Keep the MCContext around as the MCExprs may need manipulated during
529 // the add<>Operands() calls.
530 MCContext &Ctx;
531
532public:
533 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
534
535 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
536 Kind = o.Kind;
537 StartLoc = o.StartLoc;
538 EndLoc = o.EndLoc;
539 switch (Kind) {
540 case k_Token:
541 Tok = o.Tok;
542 break;
543 case k_Immediate:
544 Imm = o.Imm;
545 break;
546 case k_ShiftedImm:
547 ShiftedImm = o.ShiftedImm;
548 break;
549 case k_ImmRange:
550 ImmRange = o.ImmRange;
551 break;
552 case k_CondCode:
553 CondCode = o.CondCode;
554 break;
555 case k_FPImm:
556 FPImm = o.FPImm;
557 break;
558 case k_Barrier:
559 Barrier = o.Barrier;
560 break;
561 case k_Register:
562 Reg = o.Reg;
563 break;
564 case k_MatrixRegister:
565 MatrixReg = o.MatrixReg;
566 break;
567 case k_MatrixTileList:
568 MatrixTileList = o.MatrixTileList;
569 break;
570 case k_VectorList:
571 VectorList = o.VectorList;
572 break;
573 case k_VectorIndex:
574 VectorIndex = o.VectorIndex;
575 break;
576 case k_SysReg:
577 SysReg = o.SysReg;
578 break;
579 case k_SysCR:
580 SysCRImm = o.SysCRImm;
581 break;
582 case k_Prefetch:
583 Prefetch = o.Prefetch;
584 break;
585 case k_PSBHint:
586 PSBHint = o.PSBHint;
587 break;
588 case k_PHint:
589 PHint = o.PHint;
590 break;
591 case k_BTIHint:
592 BTIHint = o.BTIHint;
593 break;
594 case k_ShiftExtend:
595 ShiftExtend = o.ShiftExtend;
596 break;
597 case k_SVCR:
598 SVCR = o.SVCR;
599 break;
600 }
601 }
602
603 /// getStartLoc - Get the location of the first token of this operand.
604 SMLoc getStartLoc() const override { return StartLoc; }
605 /// getEndLoc - Get the location of the last token of this operand.
606 SMLoc getEndLoc() const override { return EndLoc; }
607
608 StringRef getToken() const {
609 assert(Kind == k_Token && "Invalid access!");
610 return StringRef(Tok.Data, Tok.Length);
611 }
612
613 bool isTokenSuffix() const {
614 assert(Kind == k_Token && "Invalid access!");
615 return Tok.IsSuffix;
616 }
617
618 const MCExpr *getImm() const {
619 assert(Kind == k_Immediate && "Invalid access!");
620 return Imm.Val;
621 }
622
623 const MCExpr *getShiftedImmVal() const {
624 assert(Kind == k_ShiftedImm && "Invalid access!");
625 return ShiftedImm.Val;
626 }
627
628 unsigned getShiftedImmShift() const {
629 assert(Kind == k_ShiftedImm && "Invalid access!");
630 return ShiftedImm.ShiftAmount;
631 }
632
633 unsigned getFirstImmVal() const {
634 assert(Kind == k_ImmRange && "Invalid access!");
635 return ImmRange.First;
636 }
637
638 unsigned getLastImmVal() const {
639 assert(Kind == k_ImmRange && "Invalid access!");
640 return ImmRange.Last;
641 }
642
644 assert(Kind == k_CondCode && "Invalid access!");
645 return CondCode.Code;
646 }
647
648 APFloat getFPImm() const {
649 assert (Kind == k_FPImm && "Invalid access!");
650 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
651 }
652
653 bool getFPImmIsExact() const {
654 assert (Kind == k_FPImm && "Invalid access!");
655 return FPImm.IsExact;
656 }
657
658 unsigned getBarrier() const {
659 assert(Kind == k_Barrier && "Invalid access!");
660 return Barrier.Val;
661 }
662
663 StringRef getBarrierName() const {
664 assert(Kind == k_Barrier && "Invalid access!");
665 return StringRef(Barrier.Data, Barrier.Length);
666 }
667
668 bool getBarriernXSModifier() const {
669 assert(Kind == k_Barrier && "Invalid access!");
670 return Barrier.HasnXSModifier;
671 }
672
673 MCRegister getReg() const override {
674 assert(Kind == k_Register && "Invalid access!");
675 return Reg.RegNum;
676 }
677
678 unsigned getMatrixReg() const {
679 assert(Kind == k_MatrixRegister && "Invalid access!");
680 return MatrixReg.RegNum;
681 }
682
683 unsigned getMatrixElementWidth() const {
684 assert(Kind == k_MatrixRegister && "Invalid access!");
685 return MatrixReg.ElementWidth;
686 }
687
688 MatrixKind getMatrixKind() const {
689 assert(Kind == k_MatrixRegister && "Invalid access!");
690 return MatrixReg.Kind;
691 }
692
693 unsigned getMatrixTileListRegMask() const {
694 assert(isMatrixTileList() && "Invalid access!");
695 return MatrixTileList.RegMask;
696 }
697
698 RegConstraintEqualityTy getRegEqualityTy() const {
699 assert(Kind == k_Register && "Invalid access!");
700 return Reg.EqualityTy;
701 }
702
703 unsigned getVectorListStart() const {
704 assert(Kind == k_VectorList && "Invalid access!");
705 return VectorList.RegNum;
706 }
707
708 unsigned getVectorListCount() const {
709 assert(Kind == k_VectorList && "Invalid access!");
710 return VectorList.Count;
711 }
712
713 unsigned getVectorListStride() const {
714 assert(Kind == k_VectorList && "Invalid access!");
715 return VectorList.Stride;
716 }
717
718 int getVectorIndex() const {
719 assert(Kind == k_VectorIndex && "Invalid access!");
720 return VectorIndex.Val;
721 }
722
723 StringRef getSysReg() const {
724 assert(Kind == k_SysReg && "Invalid access!");
725 return StringRef(SysReg.Data, SysReg.Length);
726 }
727
728 unsigned getSysCR() const {
729 assert(Kind == k_SysCR && "Invalid access!");
730 return SysCRImm.Val;
731 }
732
733 unsigned getPrefetch() const {
734 assert(Kind == k_Prefetch && "Invalid access!");
735 return Prefetch.Val;
736 }
737
738 unsigned getPSBHint() const {
739 assert(Kind == k_PSBHint && "Invalid access!");
740 return PSBHint.Val;
741 }
742
743 unsigned getPHint() const {
744 assert(Kind == k_PHint && "Invalid access!");
745 return PHint.Val;
746 }
747
748 StringRef getPSBHintName() const {
749 assert(Kind == k_PSBHint && "Invalid access!");
750 return StringRef(PSBHint.Data, PSBHint.Length);
751 }
752
753 StringRef getPHintName() const {
754 assert(Kind == k_PHint && "Invalid access!");
755 return StringRef(PHint.Data, PHint.Length);
756 }
757
758 unsigned getBTIHint() const {
759 assert(Kind == k_BTIHint && "Invalid access!");
760 return BTIHint.Val;
761 }
762
763 StringRef getBTIHintName() const {
764 assert(Kind == k_BTIHint && "Invalid access!");
765 return StringRef(BTIHint.Data, BTIHint.Length);
766 }
767
768 StringRef getSVCR() const {
769 assert(Kind == k_SVCR && "Invalid access!");
770 return StringRef(SVCR.Data, SVCR.Length);
771 }
772
773 StringRef getPrefetchName() const {
774 assert(Kind == k_Prefetch && "Invalid access!");
775 return StringRef(Prefetch.Data, Prefetch.Length);
776 }
777
778 AArch64_AM::ShiftExtendType getShiftExtendType() const {
779 if (Kind == k_ShiftExtend)
780 return ShiftExtend.Type;
781 if (Kind == k_Register)
782 return Reg.ShiftExtend.Type;
783 llvm_unreachable("Invalid access!");
784 }
785
786 unsigned getShiftExtendAmount() const {
787 if (Kind == k_ShiftExtend)
788 return ShiftExtend.Amount;
789 if (Kind == k_Register)
790 return Reg.ShiftExtend.Amount;
791 llvm_unreachable("Invalid access!");
792 }
793
794 bool hasShiftExtendAmount() const {
795 if (Kind == k_ShiftExtend)
796 return ShiftExtend.HasExplicitAmount;
797 if (Kind == k_Register)
798 return Reg.ShiftExtend.HasExplicitAmount;
799 llvm_unreachable("Invalid access!");
800 }
801
802 bool isImm() const override { return Kind == k_Immediate; }
803 bool isMem() const override { return false; }
804
805 bool isUImm6() const {
806 if (!isImm())
807 return false;
808 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
809 if (!MCE)
810 return false;
811 int64_t Val = MCE->getValue();
812 return (Val >= 0 && Val < 64);
813 }
814
815 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
816
817 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
818 return isImmScaled<Bits, Scale>(true);
819 }
820
821 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
822 DiagnosticPredicate isUImmScaled() const {
823 if (IsRange && isImmRange() &&
824 (getLastImmVal() != getFirstImmVal() + Offset))
825 return DiagnosticPredicateTy::NoMatch;
826
827 return isImmScaled<Bits, Scale, IsRange>(false);
828 }
829
830 template <int Bits, int Scale, bool IsRange = false>
831 DiagnosticPredicate isImmScaled(bool Signed) const {
832 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
833 (isImmRange() && !IsRange))
834 return DiagnosticPredicateTy::NoMatch;
835
836 int64_t Val;
837 if (isImmRange())
838 Val = getFirstImmVal();
839 else {
840 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
841 if (!MCE)
842 return DiagnosticPredicateTy::NoMatch;
843 Val = MCE->getValue();
844 }
845
846 int64_t MinVal, MaxVal;
847 if (Signed) {
848 int64_t Shift = Bits - 1;
849 MinVal = (int64_t(1) << Shift) * -Scale;
850 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
851 } else {
852 MinVal = 0;
853 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
854 }
855
856 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
857 return DiagnosticPredicateTy::Match;
858
859 return DiagnosticPredicateTy::NearMatch;
860 }
861
862 DiagnosticPredicate isSVEPattern() const {
863 if (!isImm())
864 return DiagnosticPredicateTy::NoMatch;
865 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
866 if (!MCE)
867 return DiagnosticPredicateTy::NoMatch;
868 int64_t Val = MCE->getValue();
869 if (Val >= 0 && Val < 32)
870 return DiagnosticPredicateTy::Match;
871 return DiagnosticPredicateTy::NearMatch;
872 }
873
874 DiagnosticPredicate isSVEVecLenSpecifier() const {
875 if (!isImm())
876 return DiagnosticPredicateTy::NoMatch;
877 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
878 if (!MCE)
879 return DiagnosticPredicateTy::NoMatch;
880 int64_t Val = MCE->getValue();
881 if (Val >= 0 && Val <= 1)
882 return DiagnosticPredicateTy::Match;
883 return DiagnosticPredicateTy::NearMatch;
884 }
885
886 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
888 MCSymbolRefExpr::VariantKind DarwinRefKind;
889 int64_t Addend;
890 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
891 Addend)) {
892 // If we don't understand the expression, assume the best and
893 // let the fixup and relocation code deal with it.
894 return true;
895 }
896
897 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
898 ELFRefKind == AArch64MCExpr::VK_LO12 ||
899 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
900 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
901 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
902 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
903 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
904 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
906 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
908 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
909 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
910 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
911 // Note that we don't range-check the addend. It's adjusted modulo page
912 // size when converted, so there is no "out of range" condition when using
913 // @pageoff.
914 return true;
915 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
916 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
917 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
918 return Addend == 0;
919 }
920
921 return false;
922 }
923
924 template <int Scale> bool isUImm12Offset() const {
925 if (!isImm())
926 return false;
927
928 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
929 if (!MCE)
930 return isSymbolicUImm12Offset(getImm());
931
932 int64_t Val = MCE->getValue();
933 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
934 }
935
936 template <int N, int M>
937 bool isImmInRange() const {
938 if (!isImm())
939 return false;
940 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941 if (!MCE)
942 return false;
943 int64_t Val = MCE->getValue();
944 return (Val >= N && Val <= M);
945 }
946
947 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
948 // a logical immediate can always be represented when inverted.
949 template <typename T>
950 bool isLogicalImm() const {
951 if (!isImm())
952 return false;
953 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
954 if (!MCE)
955 return false;
956
957 int64_t Val = MCE->getValue();
958 // Avoid left shift by 64 directly.
959 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
960 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
961 if ((Val & Upper) && (Val & Upper) != Upper)
962 return false;
963
964 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
965 }
966
967 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
968
969 bool isImmRange() const { return Kind == k_ImmRange; }
970
971 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
972 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
973 /// immediate that can be shifted by 'Shift'.
974 template <unsigned Width>
975 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
976 if (isShiftedImm() && Width == getShiftedImmShift())
977 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
978 return std::make_pair(CE->getValue(), Width);
979
980 if (isImm())
981 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
982 int64_t Val = CE->getValue();
983 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
984 return std::make_pair(Val >> Width, Width);
985 else
986 return std::make_pair(Val, 0u);
987 }
988
989 return {};
990 }
991
992 bool isAddSubImm() const {
993 if (!isShiftedImm() && !isImm())
994 return false;
995
996 const MCExpr *Expr;
997
998 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
999 if (isShiftedImm()) {
1000 unsigned Shift = ShiftedImm.ShiftAmount;
1001 Expr = ShiftedImm.Val;
1002 if (Shift != 0 && Shift != 12)
1003 return false;
1004 } else {
1005 Expr = getImm();
1006 }
1007
1008 AArch64MCExpr::VariantKind ELFRefKind;
1009 MCSymbolRefExpr::VariantKind DarwinRefKind;
1010 int64_t Addend;
1011 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1012 DarwinRefKind, Addend)) {
1013 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1014 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
1015 (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
1016 ELFRefKind == AArch64MCExpr::VK_LO12 ||
1017 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
1018 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
1019 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
1020 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
1021 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
1022 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
1023 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
1024 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
1026 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
1027 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1028 }
1029
1030 // If it's a constant, it should be a real immediate in range.
1031 if (auto ShiftedVal = getShiftedVal<12>())
1032 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1033
1034 // If it's an expression, we hope for the best and let the fixup/relocation
1035 // code deal with it.
1036 return true;
1037 }
1038
1039 bool isAddSubImmNeg() const {
1040 if (!isShiftedImm() && !isImm())
1041 return false;
1042
1043 // Otherwise it should be a real negative immediate in range.
1044 if (auto ShiftedVal = getShiftedVal<12>())
1045 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1046
1047 return false;
1048 }
1049
1050 // Signed value in the range -128 to +127. For element widths of
1051 // 16 bits or higher it may also be a signed multiple of 256 in the
1052 // range -32768 to +32512.
1053 // For element-width of 8 bits a range of -128 to 255 is accepted,
1054 // since a copy of a byte can be either signed/unsigned.
1055 template <typename T>
1056 DiagnosticPredicate isSVECpyImm() const {
1057 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1058 return DiagnosticPredicateTy::NoMatch;
1059
1060 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1061 std::is_same<int8_t, T>::value;
1062 if (auto ShiftedImm = getShiftedVal<8>())
1063 if (!(IsByte && ShiftedImm->second) &&
1064 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1065 << ShiftedImm->second))
1066 return DiagnosticPredicateTy::Match;
1067
1068 return DiagnosticPredicateTy::NearMatch;
1069 }
1070
1071 // Unsigned value in the range 0 to 255. For element widths of
1072 // 16 bits or higher it may also be a signed multiple of 256 in the
1073 // range 0 to 65280.
1074 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1075 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1076 return DiagnosticPredicateTy::NoMatch;
1077
1078 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1079 std::is_same<int8_t, T>::value;
1080 if (auto ShiftedImm = getShiftedVal<8>())
1081 if (!(IsByte && ShiftedImm->second) &&
1082 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1083 << ShiftedImm->second))
1084 return DiagnosticPredicateTy::Match;
1085
1086 return DiagnosticPredicateTy::NearMatch;
1087 }
1088
1089 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1090 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1091 return DiagnosticPredicateTy::Match;
1092 return DiagnosticPredicateTy::NoMatch;
1093 }
1094
1095 bool isCondCode() const { return Kind == k_CondCode; }
1096
1097 bool isSIMDImmType10() const {
1098 if (!isImm())
1099 return false;
1100 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1101 if (!MCE)
1102 return false;
1104 }
1105
1106 template<int N>
1107 bool isBranchTarget() const {
1108 if (!isImm())
1109 return false;
1110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1111 if (!MCE)
1112 return true;
1113 int64_t Val = MCE->getValue();
1114 if (Val & 0x3)
1115 return false;
1116 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1117 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1118 }
1119
1120 bool
1121 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1122 if (!isImm())
1123 return false;
1124
1125 AArch64MCExpr::VariantKind ELFRefKind;
1126 MCSymbolRefExpr::VariantKind DarwinRefKind;
1127 int64_t Addend;
1128 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1129 DarwinRefKind, Addend)) {
1130 return false;
1131 }
1132 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1133 return false;
1134
1135 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1136 }
1137
1138 bool isMovWSymbolG3() const {
1140 }
1141
1142 bool isMovWSymbolG2() const {
1143 return isMovWSymbol(
1148 }
1149
1150 bool isMovWSymbolG1() const {
1151 return isMovWSymbol(
1157 }
1158
1159 bool isMovWSymbolG0() const {
1160 return isMovWSymbol(
1166 }
1167
1168 template<int RegWidth, int Shift>
1169 bool isMOVZMovAlias() const {
1170 if (!isImm()) return false;
1171
1172 const MCExpr *E = getImm();
1173 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1174 uint64_t Value = CE->getValue();
1175
1176 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1177 }
1178 // Only supports the case of Shift being 0 if an expression is used as an
1179 // operand
1180 return !Shift && E;
1181 }
1182
1183 template<int RegWidth, int Shift>
1184 bool isMOVNMovAlias() const {
1185 if (!isImm()) return false;
1186
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188 if (!CE) return false;
1189 uint64_t Value = CE->getValue();
1190
1191 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1192 }
1193
1194 bool isFPImm() const {
1195 return Kind == k_FPImm &&
1196 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1197 }
1198
1199 bool isBarrier() const {
1200 return Kind == k_Barrier && !getBarriernXSModifier();
1201 }
1202 bool isBarriernXS() const {
1203 return Kind == k_Barrier && getBarriernXSModifier();
1204 }
1205 bool isSysReg() const { return Kind == k_SysReg; }
1206
1207 bool isMRSSystemRegister() const {
1208 if (!isSysReg()) return false;
1209
1210 return SysReg.MRSReg != -1U;
1211 }
1212
1213 bool isMSRSystemRegister() const {
1214 if (!isSysReg()) return false;
1215 return SysReg.MSRReg != -1U;
1216 }
1217
1218 bool isSystemPStateFieldWithImm0_1() const {
1219 if (!isSysReg()) return false;
1220 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1221 }
1222
1223 bool isSystemPStateFieldWithImm0_15() const {
1224 if (!isSysReg())
1225 return false;
1226 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1227 }
1228
1229 bool isSVCR() const {
1230 if (Kind != k_SVCR)
1231 return false;
1232 return SVCR.PStateField != -1U;
1233 }
1234
1235 bool isReg() const override {
1236 return Kind == k_Register;
1237 }
1238
1239 bool isVectorList() const { return Kind == k_VectorList; }
1240
1241 bool isScalarReg() const {
1242 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1243 }
1244
1245 bool isNeonVectorReg() const {
1246 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1247 }
1248
1249 bool isNeonVectorRegLo() const {
1250 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1251 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1252 Reg.RegNum) ||
1253 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1254 Reg.RegNum));
1255 }
1256
1257 bool isNeonVectorReg0to7() const {
1258 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1259 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1260 Reg.RegNum));
1261 }
1262
1263 bool isMatrix() const { return Kind == k_MatrixRegister; }
1264 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1265
1266 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1267 RegKind RK;
1268 switch (Class) {
1269 case AArch64::PPRRegClassID:
1270 case AArch64::PPR_3bRegClassID:
1271 case AArch64::PPR_p8to15RegClassID:
1272 case AArch64::PNRRegClassID:
1273 case AArch64::PNR_p8to15RegClassID:
1274 case AArch64::PPRorPNRRegClassID:
1275 RK = RegKind::SVEPredicateAsCounter;
1276 break;
1277 default:
1278 llvm_unreachable("Unsupport register class");
1279 }
1280
1281 return (Kind == k_Register && Reg.Kind == RK) &&
1282 AArch64MCRegisterClasses[Class].contains(getReg());
1283 }
1284
1285 template <unsigned Class> bool isSVEVectorReg() const {
1286 RegKind RK;
1287 switch (Class) {
1288 case AArch64::ZPRRegClassID:
1289 case AArch64::ZPR_3bRegClassID:
1290 case AArch64::ZPR_4bRegClassID:
1291 case AArch64::ZPRMul2_LoRegClassID:
1292 case AArch64::ZPRMul2_HiRegClassID:
1293 case AArch64::ZPR_KRegClassID:
1294 RK = RegKind::SVEDataVector;
1295 break;
1296 case AArch64::PPRRegClassID:
1297 case AArch64::PPR_3bRegClassID:
1298 case AArch64::PPR_p8to15RegClassID:
1299 case AArch64::PNRRegClassID:
1300 case AArch64::PNR_p8to15RegClassID:
1301 case AArch64::PPRorPNRRegClassID:
1302 RK = RegKind::SVEPredicateVector;
1303 break;
1304 default:
1305 llvm_unreachable("Unsupport register class");
1306 }
1307
1308 return (Kind == k_Register && Reg.Kind == RK) &&
1309 AArch64MCRegisterClasses[Class].contains(getReg());
1310 }
1311
1312 template <unsigned Class> bool isFPRasZPR() const {
1313 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1314 AArch64MCRegisterClasses[Class].contains(getReg());
1315 }
1316
1317 template <int ElementWidth, unsigned Class>
1318 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1319 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1320 return DiagnosticPredicateTy::NoMatch;
1321
1322 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1323 return DiagnosticPredicateTy::Match;
1324
1325 return DiagnosticPredicateTy::NearMatch;
1326 }
1327
1328 template <int ElementWidth, unsigned Class>
1329 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1330 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1331 Reg.Kind != RegKind::SVEPredicateVector))
1332 return DiagnosticPredicateTy::NoMatch;
1333
1334 if ((isSVEPredicateAsCounterReg<Class>() ||
1335 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1336 Reg.ElementWidth == ElementWidth)
1337 return DiagnosticPredicateTy::Match;
1338
1339 return DiagnosticPredicateTy::NearMatch;
1340 }
1341
1342 template <int ElementWidth, unsigned Class>
1343 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1344 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1345 return DiagnosticPredicateTy::NoMatch;
1346
1347 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1348 return DiagnosticPredicateTy::Match;
1349
1350 return DiagnosticPredicateTy::NearMatch;
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1356 return DiagnosticPredicateTy::NoMatch;
1357
1358 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1359 return DiagnosticPredicateTy::Match;
1360
1361 return DiagnosticPredicateTy::NearMatch;
1362 }
1363
1364 template <int ElementWidth, unsigned Class,
1365 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1366 bool ShiftWidthAlwaysSame>
1367 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1368 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1369 if (!VectorMatch.isMatch())
1370 return DiagnosticPredicateTy::NoMatch;
1371
1372 // Give a more specific diagnostic when the user has explicitly typed in
1373 // a shift-amount that does not match what is expected, but for which
1374 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1375 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1376 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1377 ShiftExtendTy == AArch64_AM::SXTW) &&
1378 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1379 return DiagnosticPredicateTy::NoMatch;
1380
1381 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1382 return DiagnosticPredicateTy::Match;
1383
1384 return DiagnosticPredicateTy::NearMatch;
1385 }
1386
1387 bool isGPR32as64() const {
1388 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1389 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1390 }
1391
1392 bool isGPR64as32() const {
1393 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1394 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1395 }
1396
1397 bool isGPR64x8() const {
1398 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1399 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1400 Reg.RegNum);
1401 }
1402
1403 bool isWSeqPair() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1406 Reg.RegNum);
1407 }
1408
1409 bool isXSeqPair() const {
1410 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1411 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1412 Reg.RegNum);
1413 }
1414
1415 bool isSyspXzrPair() const {
1416 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1417 }
1418
1419 template<int64_t Angle, int64_t Remainder>
1420 DiagnosticPredicate isComplexRotation() const {
1421 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1422
1423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1424 if (!CE) return DiagnosticPredicateTy::NoMatch;
1425 uint64_t Value = CE->getValue();
1426
1427 if (Value % Angle == Remainder && Value <= 270)
1428 return DiagnosticPredicateTy::Match;
1429 return DiagnosticPredicateTy::NearMatch;
1430 }
1431
1432 template <unsigned RegClassID> bool isGPR64() const {
1433 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1434 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1435 }
1436
1437 template <unsigned RegClassID, int ExtWidth>
1438 DiagnosticPredicate isGPR64WithShiftExtend() const {
1439 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1440 return DiagnosticPredicateTy::NoMatch;
1441
1442 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1443 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1444 return DiagnosticPredicateTy::Match;
1445 return DiagnosticPredicateTy::NearMatch;
1446 }
1447
1448 /// Is this a vector list with the type implicit (presumably attached to the
1449 /// instruction itself)?
1450 template <RegKind VectorKind, unsigned NumRegs>
1451 bool isImplicitlyTypedVectorList() const {
1452 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1453 VectorList.NumElements == 0 &&
1454 VectorList.RegisterKind == VectorKind;
1455 }
1456
1457 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1458 unsigned ElementWidth, unsigned Stride = 1>
1459 bool isTypedVectorList() const {
1460 if (Kind != k_VectorList)
1461 return false;
1462 if (VectorList.Count != NumRegs)
1463 return false;
1464 if (VectorList.RegisterKind != VectorKind)
1465 return false;
1466 if (VectorList.ElementWidth != ElementWidth)
1467 return false;
1468 if (VectorList.Stride != Stride)
1469 return false;
1470 return VectorList.NumElements == NumElements;
1471 }
1472
1473 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1474 unsigned ElementWidth, unsigned RegClass>
1475 DiagnosticPredicate isTypedVectorListMultiple() const {
1476 bool Res =
1477 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1478 if (!Res)
1479 return DiagnosticPredicateTy::NoMatch;
1480 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1481 return DiagnosticPredicateTy::NearMatch;
1482 return DiagnosticPredicateTy::Match;
1483 }
1484
1485 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1486 unsigned ElementWidth>
1487 DiagnosticPredicate isTypedVectorListStrided() const {
1488 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1489 ElementWidth, Stride>();
1490 if (!Res)
1491 return DiagnosticPredicateTy::NoMatch;
1492 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1493 ((VectorList.RegNum >= AArch64::Z16) &&
1494 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1495 return DiagnosticPredicateTy::Match;
1496 return DiagnosticPredicateTy::NoMatch;
1497 }
1498
1499 template <int Min, int Max>
1500 DiagnosticPredicate isVectorIndex() const {
1501 if (Kind != k_VectorIndex)
1502 return DiagnosticPredicateTy::NoMatch;
1503 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1504 return DiagnosticPredicateTy::Match;
1505 return DiagnosticPredicateTy::NearMatch;
1506 }
1507
1508 bool isToken() const override { return Kind == k_Token; }
1509
1510 bool isTokenEqual(StringRef Str) const {
1511 return Kind == k_Token && getToken() == Str;
1512 }
1513 bool isSysCR() const { return Kind == k_SysCR; }
1514 bool isPrefetch() const { return Kind == k_Prefetch; }
1515 bool isPSBHint() const { return Kind == k_PSBHint; }
1516 bool isPHint() const { return Kind == k_PHint; }
1517 bool isBTIHint() const { return Kind == k_BTIHint; }
1518 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1519 bool isShifter() const {
1520 if (!isShiftExtend())
1521 return false;
1522
1523 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1524 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1525 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1526 ST == AArch64_AM::MSL);
1527 }
1528
1529 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1530 if (Kind != k_FPImm)
1531 return DiagnosticPredicateTy::NoMatch;
1532
1533 if (getFPImmIsExact()) {
1534 // Lookup the immediate from table of supported immediates.
1535 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1536 assert(Desc && "Unknown enum value");
1537
1538 // Calculate its FP value.
1539 APFloat RealVal(APFloat::IEEEdouble());
1540 auto StatusOrErr =
1541 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1542 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1543 llvm_unreachable("FP immediate is not exact");
1544
1545 if (getFPImm().bitwiseIsEqual(RealVal))
1546 return DiagnosticPredicateTy::Match;
1547 }
1548
1549 return DiagnosticPredicateTy::NearMatch;
1550 }
1551
1552 template <unsigned ImmA, unsigned ImmB>
1553 DiagnosticPredicate isExactFPImm() const {
1554 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1555 if ((Res = isExactFPImm<ImmA>()))
1556 return DiagnosticPredicateTy::Match;
1557 if ((Res = isExactFPImm<ImmB>()))
1558 return DiagnosticPredicateTy::Match;
1559 return Res;
1560 }
1561
1562 bool isExtend() const {
1563 if (!isShiftExtend())
1564 return false;
1565
1566 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1567 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1568 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1569 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1570 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1571 ET == AArch64_AM::LSL) &&
1572 getShiftExtendAmount() <= 4;
1573 }
1574
1575 bool isExtend64() const {
1576 if (!isExtend())
1577 return false;
1578 // Make sure the extend expects a 32-bit source register.
1579 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1580 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1581 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1582 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1583 }
1584
1585 bool isExtendLSL64() const {
1586 if (!isExtend())
1587 return false;
1588 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1589 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1590 ET == AArch64_AM::LSL) &&
1591 getShiftExtendAmount() <= 4;
1592 }
1593
1594 bool isLSLImm3Shift() const {
1595 if (!isShiftExtend())
1596 return false;
1597 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1598 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1599 }
1600
1601 template<int Width> bool isMemXExtend() const {
1602 if (!isExtend())
1603 return false;
1604 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1605 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1606 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1607 getShiftExtendAmount() == 0);
1608 }
1609
1610 template<int Width> bool isMemWExtend() const {
1611 if (!isExtend())
1612 return false;
1613 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1614 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1615 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1616 getShiftExtendAmount() == 0);
1617 }
1618
1619 template <unsigned width>
1620 bool isArithmeticShifter() const {
1621 if (!isShifter())
1622 return false;
1623
1624 // An arithmetic shifter is LSL, LSR, or ASR.
1625 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1626 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1627 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1628 }
1629
1630 template <unsigned width>
1631 bool isLogicalShifter() const {
1632 if (!isShifter())
1633 return false;
1634
1635 // A logical shifter is LSL, LSR, ASR or ROR.
1636 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1637 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1638 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1639 getShiftExtendAmount() < width;
1640 }
1641
1642 bool isMovImm32Shifter() const {
1643 if (!isShifter())
1644 return false;
1645
1646 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1647 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1648 if (ST != AArch64_AM::LSL)
1649 return false;
1650 uint64_t Val = getShiftExtendAmount();
1651 return (Val == 0 || Val == 16);
1652 }
1653
1654 bool isMovImm64Shifter() const {
1655 if (!isShifter())
1656 return false;
1657
1658 // A MOVi shifter is LSL of 0 or 16.
1659 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1660 if (ST != AArch64_AM::LSL)
1661 return false;
1662 uint64_t Val = getShiftExtendAmount();
1663 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1664 }
1665
1666 bool isLogicalVecShifter() const {
1667 if (!isShifter())
1668 return false;
1669
1670 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1671 unsigned Shift = getShiftExtendAmount();
1672 return getShiftExtendType() == AArch64_AM::LSL &&
1673 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1674 }
1675
1676 bool isLogicalVecHalfWordShifter() const {
1677 if (!isLogicalVecShifter())
1678 return false;
1679
1680 // A logical vector shifter is a left shift by 0 or 8.
1681 unsigned Shift = getShiftExtendAmount();
1682 return getShiftExtendType() == AArch64_AM::LSL &&
1683 (Shift == 0 || Shift == 8);
1684 }
1685
1686 bool isMoveVecShifter() const {
1687 if (!isShiftExtend())
1688 return false;
1689
1690 // A logical vector shifter is a left shift by 8 or 16.
1691 unsigned Shift = getShiftExtendAmount();
1692 return getShiftExtendType() == AArch64_AM::MSL &&
1693 (Shift == 8 || Shift == 16);
1694 }
1695
1696 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1697 // to LDUR/STUR when the offset is not legal for the former but is for
1698 // the latter. As such, in addition to checking for being a legal unscaled
1699 // address, also check that it is not a legal scaled address. This avoids
1700 // ambiguity in the matcher.
1701 template<int Width>
1702 bool isSImm9OffsetFB() const {
1703 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1704 }
1705
1706 bool isAdrpLabel() const {
1707 // Validation was handled during parsing, so we just verify that
1708 // something didn't go haywire.
1709 if (!isImm())
1710 return false;
1711
1712 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1713 int64_t Val = CE->getValue();
1714 int64_t Min = - (4096 * (1LL << (21 - 1)));
1715 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1716 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1717 }
1718
1719 return true;
1720 }
1721
1722 bool isAdrLabel() const {
1723 // Validation was handled during parsing, so we just verify that
1724 // something didn't go haywire.
1725 if (!isImm())
1726 return false;
1727
1728 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1729 int64_t Val = CE->getValue();
1730 int64_t Min = - (1LL << (21 - 1));
1731 int64_t Max = ((1LL << (21 - 1)) - 1);
1732 return Val >= Min && Val <= Max;
1733 }
1734
1735 return true;
1736 }
1737
1738 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1739 DiagnosticPredicate isMatrixRegOperand() const {
1740 if (!isMatrix())
1741 return DiagnosticPredicateTy::NoMatch;
1742 if (getMatrixKind() != Kind ||
1743 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1744 EltSize != getMatrixElementWidth())
1745 return DiagnosticPredicateTy::NearMatch;
1746 return DiagnosticPredicateTy::Match;
1747 }
1748
1749 bool isPAuthPCRelLabel16Operand() const {
1750 // PAuth PCRel16 operands are similar to regular branch targets, but only
1751 // negative values are allowed for concrete immediates as signing instr
1752 // should be in a lower address.
1753 if (!isImm())
1754 return false;
1755 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1756 if (!MCE)
1757 return true;
1758 int64_t Val = MCE->getValue();
1759 if (Val & 0b11)
1760 return false;
1761 return (Val <= 0) && (Val > -(1 << 18));
1762 }
1763
1764 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1765 // Add as immediates when possible. Null MCExpr = 0.
1766 if (!Expr)
1768 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1769 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1770 else
1772 }
1773
1774 void addRegOperands(MCInst &Inst, unsigned N) const {
1775 assert(N == 1 && "Invalid number of operands!");
1777 }
1778
1779 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1780 assert(N == 1 && "Invalid number of operands!");
1781 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1782 }
1783
1784 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1785 assert(N == 1 && "Invalid number of operands!");
1786 assert(
1787 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1788
1789 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1790 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1791 RI->getEncodingValue(getReg()));
1792
1794 }
1795
1796 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1797 assert(N == 1 && "Invalid number of operands!");
1798 assert(
1799 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1800
1801 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1802 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1803 RI->getEncodingValue(getReg()));
1804
1806 }
1807
1808 template <int Width>
1809 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1810 unsigned Base;
1811 switch (Width) {
1812 case 8: Base = AArch64::B0; break;
1813 case 16: Base = AArch64::H0; break;
1814 case 32: Base = AArch64::S0; break;
1815 case 64: Base = AArch64::D0; break;
1816 case 128: Base = AArch64::Q0; break;
1817 default:
1818 llvm_unreachable("Unsupported width");
1819 }
1820 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1821 }
1822
1823 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1824 assert(N == 1 && "Invalid number of operands!");
1825 unsigned Reg = getReg();
1826 // Normalise to PPR
1827 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1828 Reg = Reg - AArch64::PN0 + AArch64::P0;
1830 }
1831
1832 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1833 assert(N == 1 && "Invalid number of operands!");
1834 Inst.addOperand(
1835 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1836 }
1837
1838 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1839 assert(N == 1 && "Invalid number of operands!");
1840 assert(
1841 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1842 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1843 }
1844
1845 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1846 assert(N == 1 && "Invalid number of operands!");
1847 assert(
1848 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1850 }
1851
1852 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1853 assert(N == 1 && "Invalid number of operands!");
1855 }
1856
1857 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1858 assert(N == 1 && "Invalid number of operands!");
1860 }
1861
1862 enum VecListIndexType {
1863 VecListIdx_DReg = 0,
1864 VecListIdx_QReg = 1,
1865 VecListIdx_ZReg = 2,
1866 VecListIdx_PReg = 3,
1867 };
1868
1869 template <VecListIndexType RegTy, unsigned NumRegs>
1870 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1871 assert(N == 1 && "Invalid number of operands!");
1872 static const unsigned FirstRegs[][5] = {
1873 /* DReg */ { AArch64::Q0,
1874 AArch64::D0, AArch64::D0_D1,
1875 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1876 /* QReg */ { AArch64::Q0,
1877 AArch64::Q0, AArch64::Q0_Q1,
1878 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1879 /* ZReg */ { AArch64::Z0,
1880 AArch64::Z0, AArch64::Z0_Z1,
1881 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1882 /* PReg */ { AArch64::P0,
1883 AArch64::P0, AArch64::P0_P1 }
1884 };
1885
1886 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1887 " NumRegs must be <= 4 for ZRegs");
1888
1889 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1890 " NumRegs must be <= 2 for PRegs");
1891
1892 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1893 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1894 FirstRegs[(unsigned)RegTy][0]));
1895 }
1896
1897 template <unsigned NumRegs>
1898 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1899 assert(N == 1 && "Invalid number of operands!");
1900 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1901
1902 switch (NumRegs) {
1903 case 2:
1904 if (getVectorListStart() < AArch64::Z16) {
1905 assert((getVectorListStart() < AArch64::Z8) &&
1906 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1908 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1909 } else {
1910 assert((getVectorListStart() < AArch64::Z24) &&
1911 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1913 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1914 }
1915 break;
1916 case 4:
1917 if (getVectorListStart() < AArch64::Z16) {
1918 assert((getVectorListStart() < AArch64::Z4) &&
1919 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1921 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1922 } else {
1923 assert((getVectorListStart() < AArch64::Z20) &&
1924 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1926 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1927 }
1928 break;
1929 default:
1930 llvm_unreachable("Unsupported number of registers for strided vec list");
1931 }
1932 }
1933
1934 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1935 assert(N == 1 && "Invalid number of operands!");
1936 unsigned RegMask = getMatrixTileListRegMask();
1937 assert(RegMask <= 0xFF && "Invalid mask!");
1938 Inst.addOperand(MCOperand::createImm(RegMask));
1939 }
1940
1941 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1942 assert(N == 1 && "Invalid number of operands!");
1943 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1944 }
1945
1946 template <unsigned ImmIs0, unsigned ImmIs1>
1947 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 1 && "Invalid number of operands!");
1949 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1950 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1951 }
1952
1953 void addImmOperands(MCInst &Inst, unsigned N) const {
1954 assert(N == 1 && "Invalid number of operands!");
1955 // If this is a pageoff symrefexpr with an addend, adjust the addend
1956 // to be only the page-offset portion. Otherwise, just add the expr
1957 // as-is.
1958 addExpr(Inst, getImm());
1959 }
1960
1961 template <int Shift>
1962 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1963 assert(N == 2 && "Invalid number of operands!");
1964 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1965 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1966 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1967 } else if (isShiftedImm()) {
1968 addExpr(Inst, getShiftedImmVal());
1969 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1970 } else {
1971 addExpr(Inst, getImm());
1973 }
1974 }
1975
1976 template <int Shift>
1977 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1978 assert(N == 2 && "Invalid number of operands!");
1979 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1980 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1981 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1982 } else
1983 llvm_unreachable("Not a shifted negative immediate");
1984 }
1985
1986 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1989 }
1990
1991 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1992 assert(N == 1 && "Invalid number of operands!");
1993 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1994 if (!MCE)
1995 addExpr(Inst, getImm());
1996 else
1997 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1998 }
1999
2000 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2001 addImmOperands(Inst, N);
2002 }
2003
2004 template<int Scale>
2005 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2006 assert(N == 1 && "Invalid number of operands!");
2007 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2008
2009 if (!MCE) {
2010 Inst.addOperand(MCOperand::createExpr(getImm()));
2011 return;
2012 }
2013 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2014 }
2015
2016 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2017 assert(N == 1 && "Invalid number of operands!");
2018 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2020 }
2021
2022 template <int Scale>
2023 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2024 assert(N == 1 && "Invalid number of operands!");
2025 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2026 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2027 }
2028
2029 template <int Scale>
2030 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2031 assert(N == 1 && "Invalid number of operands!");
2032 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2033 }
2034
2035 template <typename T>
2036 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2039 std::make_unsigned_t<T> Val = MCE->getValue();
2040 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2041 Inst.addOperand(MCOperand::createImm(encoding));
2042 }
2043
2044 template <typename T>
2045 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2046 assert(N == 1 && "Invalid number of operands!");
2047 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2048 std::make_unsigned_t<T> Val = ~MCE->getValue();
2049 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2050 Inst.addOperand(MCOperand::createImm(encoding));
2051 }
2052
2053 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2054 assert(N == 1 && "Invalid number of operands!");
2055 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2057 Inst.addOperand(MCOperand::createImm(encoding));
2058 }
2059
2060 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2061 // Branch operands don't encode the low bits, so shift them off
2062 // here. If it's a label, however, just put it on directly as there's
2063 // not enough information now to do anything.
2064 assert(N == 1 && "Invalid number of operands!");
2065 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2066 if (!MCE) {
2067 addExpr(Inst, getImm());
2068 return;
2069 }
2070 assert(MCE && "Invalid constant immediate operand!");
2071 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2072 }
2073
2074 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2075 // PC-relative operands don't encode the low bits, so shift them off
2076 // here. If it's a label, however, just put it on directly as there's
2077 // not enough information now to do anything.
2078 assert(N == 1 && "Invalid number of operands!");
2079 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2080 if (!MCE) {
2081 addExpr(Inst, getImm());
2082 return;
2083 }
2084 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2085 }
2086
2087 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2088 // Branch operands don't encode the low bits, so shift them off
2089 // here. If it's a label, however, just put it on directly as there's
2090 // not enough information now to do anything.
2091 assert(N == 1 && "Invalid number of operands!");
2092 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2093 if (!MCE) {
2094 addExpr(Inst, getImm());
2095 return;
2096 }
2097 assert(MCE && "Invalid constant immediate operand!");
2098 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2099 }
2100
2101 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2102 // Branch operands don't encode the low bits, so shift them off
2103 // here. If it's a label, however, just put it on directly as there's
2104 // not enough information now to do anything.
2105 assert(N == 1 && "Invalid number of operands!");
2106 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2107 if (!MCE) {
2108 addExpr(Inst, getImm());
2109 return;
2110 }
2111 assert(MCE && "Invalid constant immediate operand!");
2112 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2113 }
2114
2115 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2116 // Branch operands don't encode the low bits, so shift them off
2117 // here. If it's a label, however, just put it on directly as there's
2118 // not enough information now to do anything.
2119 assert(N == 1 && "Invalid number of operands!");
2120 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2121 if (!MCE) {
2122 addExpr(Inst, getImm());
2123 return;
2124 }
2125 assert(MCE && "Invalid constant immediate operand!");
2126 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2127 }
2128
2129 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2130 assert(N == 1 && "Invalid number of operands!");
2132 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2133 }
2134
2135 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2136 assert(N == 1 && "Invalid number of operands!");
2137 Inst.addOperand(MCOperand::createImm(getBarrier()));
2138 }
2139
2140 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2141 assert(N == 1 && "Invalid number of operands!");
2142 Inst.addOperand(MCOperand::createImm(getBarrier()));
2143 }
2144
2145 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2146 assert(N == 1 && "Invalid number of operands!");
2147
2148 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2149 }
2150
2151 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2153
2154 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2155 }
2156
2157 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2158 assert(N == 1 && "Invalid number of operands!");
2159
2160 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2161 }
2162
2163 void addSVCROperands(MCInst &Inst, unsigned N) const {
2164 assert(N == 1 && "Invalid number of operands!");
2165
2166 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2167 }
2168
2169 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2170 assert(N == 1 && "Invalid number of operands!");
2171
2172 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2173 }
2174
2175 void addSysCROperands(MCInst &Inst, unsigned N) const {
2176 assert(N == 1 && "Invalid number of operands!");
2177 Inst.addOperand(MCOperand::createImm(getSysCR()));
2178 }
2179
2180 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2181 assert(N == 1 && "Invalid number of operands!");
2182 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2183 }
2184
2185 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2186 assert(N == 1 && "Invalid number of operands!");
2187 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2188 }
2189
2190 void addPHintOperands(MCInst &Inst, unsigned N) const {
2191 assert(N == 1 && "Invalid number of operands!");
2192 Inst.addOperand(MCOperand::createImm(getPHint()));
2193 }
2194
2195 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 1 && "Invalid number of operands!");
2197 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2198 }
2199
2200 void addShifterOperands(MCInst &Inst, unsigned N) const {
2201 assert(N == 1 && "Invalid number of operands!");
2202 unsigned Imm =
2203 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2205 }
2206
2207 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2208 assert(N == 1 && "Invalid number of operands!");
2209 unsigned Imm = getShiftExtendAmount();
2211 }
2212
2213 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215
2216 if (!isScalarReg())
2217 return;
2218
2219 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2220 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2222 if (Reg != AArch64::XZR)
2223 llvm_unreachable("wrong register");
2224
2225 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2226 }
2227
2228 void addExtendOperands(MCInst &Inst, unsigned N) const {
2229 assert(N == 1 && "Invalid number of operands!");
2230 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2231 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2232 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2234 }
2235
2236 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2237 assert(N == 1 && "Invalid number of operands!");
2238 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2239 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2240 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2242 }
2243
2244 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2245 assert(N == 2 && "Invalid number of operands!");
2246 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2247 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2248 Inst.addOperand(MCOperand::createImm(IsSigned));
2249 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2250 }
2251
2252 // For 8-bit load/store instructions with a register offset, both the
2253 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2254 // they're disambiguated by whether the shift was explicit or implicit rather
2255 // than its size.
2256 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2257 assert(N == 2 && "Invalid number of operands!");
2258 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2259 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2260 Inst.addOperand(MCOperand::createImm(IsSigned));
2261 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2262 }
2263
2264 template<int Shift>
2265 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2266 assert(N == 1 && "Invalid number of operands!");
2267
2268 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2269 if (CE) {
2270 uint64_t Value = CE->getValue();
2271 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2272 } else {
2273 addExpr(Inst, getImm());
2274 }
2275 }
2276
2277 template<int Shift>
2278 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2279 assert(N == 1 && "Invalid number of operands!");
2280
2281 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2282 uint64_t Value = CE->getValue();
2283 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2284 }
2285
2286 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2287 assert(N == 1 && "Invalid number of operands!");
2288 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2289 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2290 }
2291
2292 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2293 assert(N == 1 && "Invalid number of operands!");
2294 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2295 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2296 }
2297
2298 void print(raw_ostream &OS) const override;
2299
2300 static std::unique_ptr<AArch64Operand>
2301 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2302 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2303 Op->Tok.Data = Str.data();
2304 Op->Tok.Length = Str.size();
2305 Op->Tok.IsSuffix = IsSuffix;
2306 Op->StartLoc = S;
2307 Op->EndLoc = S;
2308 return Op;
2309 }
2310
2311 static std::unique_ptr<AArch64Operand>
2312 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2313 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2315 unsigned ShiftAmount = 0,
2316 unsigned HasExplicitAmount = false) {
2317 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2318 Op->Reg.RegNum = RegNum;
2319 Op->Reg.Kind = Kind;
2320 Op->Reg.ElementWidth = 0;
2321 Op->Reg.EqualityTy = EqTy;
2322 Op->Reg.ShiftExtend.Type = ExtTy;
2323 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2324 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2325 Op->StartLoc = S;
2326 Op->EndLoc = E;
2327 return Op;
2328 }
2329
2330 static std::unique_ptr<AArch64Operand>
2331 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2332 SMLoc S, SMLoc E, MCContext &Ctx,
2334 unsigned ShiftAmount = 0,
2335 unsigned HasExplicitAmount = false) {
2336 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2337 Kind == RegKind::SVEPredicateVector ||
2338 Kind == RegKind::SVEPredicateAsCounter) &&
2339 "Invalid vector kind");
2340 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2341 HasExplicitAmount);
2342 Op->Reg.ElementWidth = ElementWidth;
2343 return Op;
2344 }
2345
2346 static std::unique_ptr<AArch64Operand>
2347 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2348 unsigned NumElements, unsigned ElementWidth,
2349 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2350 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2351 Op->VectorList.RegNum = RegNum;
2352 Op->VectorList.Count = Count;
2353 Op->VectorList.Stride = Stride;
2354 Op->VectorList.NumElements = NumElements;
2355 Op->VectorList.ElementWidth = ElementWidth;
2356 Op->VectorList.RegisterKind = RegisterKind;
2357 Op->StartLoc = S;
2358 Op->EndLoc = E;
2359 return Op;
2360 }
2361
2362 static std::unique_ptr<AArch64Operand>
2363 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2364 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2365 Op->VectorIndex.Val = Idx;
2366 Op->StartLoc = S;
2367 Op->EndLoc = E;
2368 return Op;
2369 }
2370
2371 static std::unique_ptr<AArch64Operand>
2372 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2373 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2374 Op->MatrixTileList.RegMask = RegMask;
2375 Op->StartLoc = S;
2376 Op->EndLoc = E;
2377 return Op;
2378 }
2379
2380 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2381 const unsigned ElementWidth) {
2382 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2383 RegMap = {
2384 {{0, AArch64::ZAB0},
2385 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2386 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2387 {{8, AArch64::ZAB0},
2388 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2389 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2390 {{16, AArch64::ZAH0},
2391 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2392 {{16, AArch64::ZAH1},
2393 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2394 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2395 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2396 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2397 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2398 };
2399
2400 if (ElementWidth == 64)
2401 OutRegs.insert(Reg);
2402 else {
2403 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2404 assert(!Regs.empty() && "Invalid tile or element width!");
2405 for (auto OutReg : Regs)
2406 OutRegs.insert(OutReg);
2407 }
2408 }
2409
2410 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2411 SMLoc E, MCContext &Ctx) {
2412 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2413 Op->Imm.Val = Val;
2414 Op->StartLoc = S;
2415 Op->EndLoc = E;
2416 return Op;
2417 }
2418
2419 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2420 unsigned ShiftAmount,
2421 SMLoc S, SMLoc E,
2422 MCContext &Ctx) {
2423 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2424 Op->ShiftedImm .Val = Val;
2425 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2426 Op->StartLoc = S;
2427 Op->EndLoc = E;
2428 return Op;
2429 }
2430
2431 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2432 unsigned Last, SMLoc S,
2433 SMLoc E,
2434 MCContext &Ctx) {
2435 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2436 Op->ImmRange.First = First;
2437 Op->ImmRange.Last = Last;
2438 Op->EndLoc = E;
2439 return Op;
2440 }
2441
2442 static std::unique_ptr<AArch64Operand>
2443 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2444 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2445 Op->CondCode.Code = Code;
2446 Op->StartLoc = S;
2447 Op->EndLoc = E;
2448 return Op;
2449 }
2450
2451 static std::unique_ptr<AArch64Operand>
2452 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2453 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2454 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2455 Op->FPImm.IsExact = IsExact;
2456 Op->StartLoc = S;
2457 Op->EndLoc = S;
2458 return Op;
2459 }
2460
2461 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2462 StringRef Str,
2463 SMLoc S,
2464 MCContext &Ctx,
2465 bool HasnXSModifier) {
2466 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2467 Op->Barrier.Val = Val;
2468 Op->Barrier.Data = Str.data();
2469 Op->Barrier.Length = Str.size();
2470 Op->Barrier.HasnXSModifier = HasnXSModifier;
2471 Op->StartLoc = S;
2472 Op->EndLoc = S;
2473 return Op;
2474 }
2475
2476 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2477 uint32_t MRSReg,
2478 uint32_t MSRReg,
2479 uint32_t PStateField,
2480 MCContext &Ctx) {
2481 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2482 Op->SysReg.Data = Str.data();
2483 Op->SysReg.Length = Str.size();
2484 Op->SysReg.MRSReg = MRSReg;
2485 Op->SysReg.MSRReg = MSRReg;
2486 Op->SysReg.PStateField = PStateField;
2487 Op->StartLoc = S;
2488 Op->EndLoc = S;
2489 return Op;
2490 }
2491
2492 static std::unique_ptr<AArch64Operand>
2493 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2495 Op->PHint.Val = Val;
2496 Op->PHint.Data = Str.data();
2497 Op->PHint.Length = Str.size();
2498 Op->StartLoc = S;
2499 Op->EndLoc = S;
2500 return Op;
2501 }
2502
2503 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2504 SMLoc E, MCContext &Ctx) {
2505 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2506 Op->SysCRImm.Val = Val;
2507 Op->StartLoc = S;
2508 Op->EndLoc = E;
2509 return Op;
2510 }
2511
2512 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2513 StringRef Str,
2514 SMLoc S,
2515 MCContext &Ctx) {
2516 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2517 Op->Prefetch.Val = Val;
2518 Op->Barrier.Data = Str.data();
2519 Op->Barrier.Length = Str.size();
2520 Op->StartLoc = S;
2521 Op->EndLoc = S;
2522 return Op;
2523 }
2524
2525 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2526 StringRef Str,
2527 SMLoc S,
2528 MCContext &Ctx) {
2529 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2530 Op->PSBHint.Val = Val;
2531 Op->PSBHint.Data = Str.data();
2532 Op->PSBHint.Length = Str.size();
2533 Op->StartLoc = S;
2534 Op->EndLoc = S;
2535 return Op;
2536 }
2537
2538 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2539 StringRef Str,
2540 SMLoc S,
2541 MCContext &Ctx) {
2542 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2543 Op->BTIHint.Val = Val | 32;
2544 Op->BTIHint.Data = Str.data();
2545 Op->BTIHint.Length = Str.size();
2546 Op->StartLoc = S;
2547 Op->EndLoc = S;
2548 return Op;
2549 }
2550
2551 static std::unique_ptr<AArch64Operand>
2552 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2553 SMLoc S, SMLoc E, MCContext &Ctx) {
2554 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2555 Op->MatrixReg.RegNum = RegNum;
2556 Op->MatrixReg.ElementWidth = ElementWidth;
2557 Op->MatrixReg.Kind = Kind;
2558 Op->StartLoc = S;
2559 Op->EndLoc = E;
2560 return Op;
2561 }
2562
2563 static std::unique_ptr<AArch64Operand>
2564 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2565 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2566 Op->SVCR.PStateField = PStateField;
2567 Op->SVCR.Data = Str.data();
2568 Op->SVCR.Length = Str.size();
2569 Op->StartLoc = S;
2570 Op->EndLoc = S;
2571 return Op;
2572 }
2573
2574 static std::unique_ptr<AArch64Operand>
2575 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2576 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2577 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2578 Op->ShiftExtend.Type = ShOp;
2579 Op->ShiftExtend.Amount = Val;
2580 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2581 Op->StartLoc = S;
2582 Op->EndLoc = E;
2583 return Op;
2584 }
2585};
2586
2587} // end anonymous namespace.
2588
2589void AArch64Operand::print(raw_ostream &OS) const {
2590 switch (Kind) {
2591 case k_FPImm:
2592 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2593 if (!getFPImmIsExact())
2594 OS << " (inexact)";
2595 OS << ">";
2596 break;
2597 case k_Barrier: {
2598 StringRef Name = getBarrierName();
2599 if (!Name.empty())
2600 OS << "<barrier " << Name << ">";
2601 else
2602 OS << "<barrier invalid #" << getBarrier() << ">";
2603 break;
2604 }
2605 case k_Immediate:
2606 OS << *getImm();
2607 break;
2608 case k_ShiftedImm: {
2609 unsigned Shift = getShiftedImmShift();
2610 OS << "<shiftedimm ";
2611 OS << *getShiftedImmVal();
2612 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2613 break;
2614 }
2615 case k_ImmRange: {
2616 OS << "<immrange ";
2617 OS << getFirstImmVal();
2618 OS << ":" << getLastImmVal() << ">";
2619 break;
2620 }
2621 case k_CondCode:
2622 OS << "<condcode " << getCondCode() << ">";
2623 break;
2624 case k_VectorList: {
2625 OS << "<vectorlist ";
2626 unsigned Reg = getVectorListStart();
2627 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2628 OS << Reg + i * getVectorListStride() << " ";
2629 OS << ">";
2630 break;
2631 }
2632 case k_VectorIndex:
2633 OS << "<vectorindex " << getVectorIndex() << ">";
2634 break;
2635 case k_SysReg:
2636 OS << "<sysreg: " << getSysReg() << '>';
2637 break;
2638 case k_Token:
2639 OS << "'" << getToken() << "'";
2640 break;
2641 case k_SysCR:
2642 OS << "c" << getSysCR();
2643 break;
2644 case k_Prefetch: {
2645 StringRef Name = getPrefetchName();
2646 if (!Name.empty())
2647 OS << "<prfop " << Name << ">";
2648 else
2649 OS << "<prfop invalid #" << getPrefetch() << ">";
2650 break;
2651 }
2652 case k_PSBHint:
2653 OS << getPSBHintName();
2654 break;
2655 case k_PHint:
2656 OS << getPHintName();
2657 break;
2658 case k_BTIHint:
2659 OS << getBTIHintName();
2660 break;
2661 case k_MatrixRegister:
2662 OS << "<matrix " << getMatrixReg() << ">";
2663 break;
2664 case k_MatrixTileList: {
2665 OS << "<matrixlist ";
2666 unsigned RegMask = getMatrixTileListRegMask();
2667 unsigned MaxBits = 8;
2668 for (unsigned I = MaxBits; I > 0; --I)
2669 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2670 OS << '>';
2671 break;
2672 }
2673 case k_SVCR: {
2674 OS << getSVCR();
2675 break;
2676 }
2677 case k_Register:
2678 OS << "<register " << getReg() << ">";
2679 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2680 break;
2681 [[fallthrough]];
2682 case k_ShiftExtend:
2683 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2684 << getShiftExtendAmount();
2685 if (!hasShiftExtendAmount())
2686 OS << "<imp>";
2687 OS << '>';
2688 break;
2689 }
2690}
2691
2692/// @name Auto-generated Match Functions
2693/// {
2694
2696
2697/// }
2698
2700 return StringSwitch<unsigned>(Name.lower())
2701 .Case("v0", AArch64::Q0)
2702 .Case("v1", AArch64::Q1)
2703 .Case("v2", AArch64::Q2)
2704 .Case("v3", AArch64::Q3)
2705 .Case("v4", AArch64::Q4)
2706 .Case("v5", AArch64::Q5)
2707 .Case("v6", AArch64::Q6)
2708 .Case("v7", AArch64::Q7)
2709 .Case("v8", AArch64::Q8)
2710 .Case("v9", AArch64::Q9)
2711 .Case("v10", AArch64::Q10)
2712 .Case("v11", AArch64::Q11)
2713 .Case("v12", AArch64::Q12)
2714 .Case("v13", AArch64::Q13)
2715 .Case("v14", AArch64::Q14)
2716 .Case("v15", AArch64::Q15)
2717 .Case("v16", AArch64::Q16)
2718 .Case("v17", AArch64::Q17)
2719 .Case("v18", AArch64::Q18)
2720 .Case("v19", AArch64::Q19)
2721 .Case("v20", AArch64::Q20)
2722 .Case("v21", AArch64::Q21)
2723 .Case("v22", AArch64::Q22)
2724 .Case("v23", AArch64::Q23)
2725 .Case("v24", AArch64::Q24)
2726 .Case("v25", AArch64::Q25)
2727 .Case("v26", AArch64::Q26)
2728 .Case("v27", AArch64::Q27)
2729 .Case("v28", AArch64::Q28)
2730 .Case("v29", AArch64::Q29)
2731 .Case("v30", AArch64::Q30)
2732 .Case("v31", AArch64::Q31)
2733 .Default(0);
2734}
2735
2736/// Returns an optional pair of (#elements, element-width) if Suffix
2737/// is a valid vector kind. Where the number of elements in a vector
2738/// or the vector width is implicit or explicitly unknown (but still a
2739/// valid suffix kind), 0 is used.
2740static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2741 RegKind VectorKind) {
2742 std::pair<int, int> Res = {-1, -1};
2743
2744 switch (VectorKind) {
2745 case RegKind::NeonVector:
2747 .Case("", {0, 0})
2748 .Case(".1d", {1, 64})
2749 .Case(".1q", {1, 128})
2750 // '.2h' needed for fp16 scalar pairwise reductions
2751 .Case(".2h", {2, 16})
2752 .Case(".2b", {2, 8})
2753 .Case(".2s", {2, 32})
2754 .Case(".2d", {2, 64})
2755 // '.4b' is another special case for the ARMv8.2a dot product
2756 // operand
2757 .Case(".4b", {4, 8})
2758 .Case(".4h", {4, 16})
2759 .Case(".4s", {4, 32})
2760 .Case(".8b", {8, 8})
2761 .Case(".8h", {8, 16})
2762 .Case(".16b", {16, 8})
2763 // Accept the width neutral ones, too, for verbose syntax. If
2764 // those aren't used in the right places, the token operand won't
2765 // match so all will work out.
2766 .Case(".b", {0, 8})
2767 .Case(".h", {0, 16})
2768 .Case(".s", {0, 32})
2769 .Case(".d", {0, 64})
2770 .Default({-1, -1});
2771 break;
2772 case RegKind::SVEPredicateAsCounter:
2773 case RegKind::SVEPredicateVector:
2774 case RegKind::SVEDataVector:
2775 case RegKind::Matrix:
2777 .Case("", {0, 0})
2778 .Case(".b", {0, 8})
2779 .Case(".h", {0, 16})
2780 .Case(".s", {0, 32})
2781 .Case(".d", {0, 64})
2782 .Case(".q", {0, 128})
2783 .Default({-1, -1});
2784 break;
2785 default:
2786 llvm_unreachable("Unsupported RegKind");
2787 }
2788
2789 if (Res == std::make_pair(-1, -1))
2790 return std::nullopt;
2791
2792 return std::optional<std::pair<int, int>>(Res);
2793}
2794
2795static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2796 return parseVectorKind(Suffix, VectorKind).has_value();
2797}
2798
2800 return StringSwitch<unsigned>(Name.lower())
2801 .Case("z0", AArch64::Z0)
2802 .Case("z1", AArch64::Z1)
2803 .Case("z2", AArch64::Z2)
2804 .Case("z3", AArch64::Z3)
2805 .Case("z4", AArch64::Z4)
2806 .Case("z5", AArch64::Z5)
2807 .Case("z6", AArch64::Z6)
2808 .Case("z7", AArch64::Z7)
2809 .Case("z8", AArch64::Z8)
2810 .Case("z9", AArch64::Z9)
2811 .Case("z10", AArch64::Z10)
2812 .Case("z11", AArch64::Z11)
2813 .Case("z12", AArch64::Z12)
2814 .Case("z13", AArch64::Z13)
2815 .Case("z14", AArch64::Z14)
2816 .Case("z15", AArch64::Z15)
2817 .Case("z16", AArch64::Z16)
2818 .Case("z17", AArch64::Z17)
2819 .Case("z18", AArch64::Z18)
2820 .Case("z19", AArch64::Z19)
2821 .Case("z20", AArch64::Z20)
2822 .Case("z21", AArch64::Z21)
2823 .Case("z22", AArch64::Z22)
2824 .Case("z23", AArch64::Z23)
2825 .Case("z24", AArch64::Z24)
2826 .Case("z25", AArch64::Z25)
2827 .Case("z26", AArch64::Z26)
2828 .Case("z27", AArch64::Z27)
2829 .Case("z28", AArch64::Z28)
2830 .Case("z29", AArch64::Z29)
2831 .Case("z30", AArch64::Z30)
2832 .Case("z31", AArch64::Z31)
2833 .Default(0);
2834}
2835
2837 return StringSwitch<unsigned>(Name.lower())
2838 .Case("p0", AArch64::P0)
2839 .Case("p1", AArch64::P1)
2840 .Case("p2", AArch64::P2)
2841 .Case("p3", AArch64::P3)
2842 .Case("p4", AArch64::P4)
2843 .Case("p5", AArch64::P5)
2844 .Case("p6", AArch64::P6)
2845 .Case("p7", AArch64::P7)
2846 .Case("p8", AArch64::P8)
2847 .Case("p9", AArch64::P9)
2848 .Case("p10", AArch64::P10)
2849 .Case("p11", AArch64::P11)
2850 .Case("p12", AArch64::P12)
2851 .Case("p13", AArch64::P13)
2852 .Case("p14", AArch64::P14)
2853 .Case("p15", AArch64::P15)
2854 .Default(0);
2855}
2856
2858 return StringSwitch<unsigned>(Name.lower())
2859 .Case("pn0", AArch64::PN0)
2860 .Case("pn1", AArch64::PN1)
2861 .Case("pn2", AArch64::PN2)
2862 .Case("pn3", AArch64::PN3)
2863 .Case("pn4", AArch64::PN4)
2864 .Case("pn5", AArch64::PN5)
2865 .Case("pn6", AArch64::PN6)
2866 .Case("pn7", AArch64::PN7)
2867 .Case("pn8", AArch64::PN8)
2868 .Case("pn9", AArch64::PN9)
2869 .Case("pn10", AArch64::PN10)
2870 .Case("pn11", AArch64::PN11)
2871 .Case("pn12", AArch64::PN12)
2872 .Case("pn13", AArch64::PN13)
2873 .Case("pn14", AArch64::PN14)
2874 .Case("pn15", AArch64::PN15)
2875 .Default(0);
2876}
2877
2879 return StringSwitch<unsigned>(Name.lower())
2880 .Case("za0.d", AArch64::ZAD0)
2881 .Case("za1.d", AArch64::ZAD1)
2882 .Case("za2.d", AArch64::ZAD2)
2883 .Case("za3.d", AArch64::ZAD3)
2884 .Case("za4.d", AArch64::ZAD4)
2885 .Case("za5.d", AArch64::ZAD5)
2886 .Case("za6.d", AArch64::ZAD6)
2887 .Case("za7.d", AArch64::ZAD7)
2888 .Case("za0.s", AArch64::ZAS0)
2889 .Case("za1.s", AArch64::ZAS1)
2890 .Case("za2.s", AArch64::ZAS2)
2891 .Case("za3.s", AArch64::ZAS3)
2892 .Case("za0.h", AArch64::ZAH0)
2893 .Case("za1.h", AArch64::ZAH1)
2894 .Case("za0.b", AArch64::ZAB0)
2895 .Default(0);
2896}
2897
2899 return StringSwitch<unsigned>(Name.lower())
2900 .Case("za", AArch64::ZA)
2901 .Case("za0.q", AArch64::ZAQ0)
2902 .Case("za1.q", AArch64::ZAQ1)
2903 .Case("za2.q", AArch64::ZAQ2)
2904 .Case("za3.q", AArch64::ZAQ3)
2905 .Case("za4.q", AArch64::ZAQ4)
2906 .Case("za5.q", AArch64::ZAQ5)
2907 .Case("za6.q", AArch64::ZAQ6)
2908 .Case("za7.q", AArch64::ZAQ7)
2909 .Case("za8.q", AArch64::ZAQ8)
2910 .Case("za9.q", AArch64::ZAQ9)
2911 .Case("za10.q", AArch64::ZAQ10)
2912 .Case("za11.q", AArch64::ZAQ11)
2913 .Case("za12.q", AArch64::ZAQ12)
2914 .Case("za13.q", AArch64::ZAQ13)
2915 .Case("za14.q", AArch64::ZAQ14)
2916 .Case("za15.q", AArch64::ZAQ15)
2917 .Case("za0.d", AArch64::ZAD0)
2918 .Case("za1.d", AArch64::ZAD1)
2919 .Case("za2.d", AArch64::ZAD2)
2920 .Case("za3.d", AArch64::ZAD3)
2921 .Case("za4.d", AArch64::ZAD4)
2922 .Case("za5.d", AArch64::ZAD5)
2923 .Case("za6.d", AArch64::ZAD6)
2924 .Case("za7.d", AArch64::ZAD7)
2925 .Case("za0.s", AArch64::ZAS0)
2926 .Case("za1.s", AArch64::ZAS1)
2927 .Case("za2.s", AArch64::ZAS2)
2928 .Case("za3.s", AArch64::ZAS3)
2929 .Case("za0.h", AArch64::ZAH0)
2930 .Case("za1.h", AArch64::ZAH1)
2931 .Case("za0.b", AArch64::ZAB0)
2932 .Case("za0h.q", AArch64::ZAQ0)
2933 .Case("za1h.q", AArch64::ZAQ1)
2934 .Case("za2h.q", AArch64::ZAQ2)
2935 .Case("za3h.q", AArch64::ZAQ3)
2936 .Case("za4h.q", AArch64::ZAQ4)
2937 .Case("za5h.q", AArch64::ZAQ5)
2938 .Case("za6h.q", AArch64::ZAQ6)
2939 .Case("za7h.q", AArch64::ZAQ7)
2940 .Case("za8h.q", AArch64::ZAQ8)
2941 .Case("za9h.q", AArch64::ZAQ9)
2942 .Case("za10h.q", AArch64::ZAQ10)
2943 .Case("za11h.q", AArch64::ZAQ11)
2944 .Case("za12h.q", AArch64::ZAQ12)
2945 .Case("za13h.q", AArch64::ZAQ13)
2946 .Case("za14h.q", AArch64::ZAQ14)
2947 .Case("za15h.q", AArch64::ZAQ15)
2948 .Case("za0h.d", AArch64::ZAD0)
2949 .Case("za1h.d", AArch64::ZAD1)
2950 .Case("za2h.d", AArch64::ZAD2)
2951 .Case("za3h.d", AArch64::ZAD3)
2952 .Case("za4h.d", AArch64::ZAD4)
2953 .Case("za5h.d", AArch64::ZAD5)
2954 .Case("za6h.d", AArch64::ZAD6)
2955 .Case("za7h.d", AArch64::ZAD7)
2956 .Case("za0h.s", AArch64::ZAS0)
2957 .Case("za1h.s", AArch64::ZAS1)
2958 .Case("za2h.s", AArch64::ZAS2)
2959 .Case("za3h.s", AArch64::ZAS3)
2960 .Case("za0h.h", AArch64::ZAH0)
2961 .Case("za1h.h", AArch64::ZAH1)
2962 .Case("za0h.b", AArch64::ZAB0)
2963 .Case("za0v.q", AArch64::ZAQ0)
2964 .Case("za1v.q", AArch64::ZAQ1)
2965 .Case("za2v.q", AArch64::ZAQ2)
2966 .Case("za3v.q", AArch64::ZAQ3)
2967 .Case("za4v.q", AArch64::ZAQ4)
2968 .Case("za5v.q", AArch64::ZAQ5)
2969 .Case("za6v.q", AArch64::ZAQ6)
2970 .Case("za7v.q", AArch64::ZAQ7)
2971 .Case("za8v.q", AArch64::ZAQ8)
2972 .Case("za9v.q", AArch64::ZAQ9)
2973 .Case("za10v.q", AArch64::ZAQ10)
2974 .Case("za11v.q", AArch64::ZAQ11)
2975 .Case("za12v.q", AArch64::ZAQ12)
2976 .Case("za13v.q", AArch64::ZAQ13)
2977 .Case("za14v.q", AArch64::ZAQ14)
2978 .Case("za15v.q", AArch64::ZAQ15)
2979 .Case("za0v.d", AArch64::ZAD0)
2980 .Case("za1v.d", AArch64::ZAD1)
2981 .Case("za2v.d", AArch64::ZAD2)
2982 .Case("za3v.d", AArch64::ZAD3)
2983 .Case("za4v.d", AArch64::ZAD4)
2984 .Case("za5v.d", AArch64::ZAD5)
2985 .Case("za6v.d", AArch64::ZAD6)
2986 .Case("za7v.d", AArch64::ZAD7)
2987 .Case("za0v.s", AArch64::ZAS0)
2988 .Case("za1v.s", AArch64::ZAS1)
2989 .Case("za2v.s", AArch64::ZAS2)
2990 .Case("za3v.s", AArch64::ZAS3)
2991 .Case("za0v.h", AArch64::ZAH0)
2992 .Case("za1v.h", AArch64::ZAH1)
2993 .Case("za0v.b", AArch64::ZAB0)
2994 .Default(0);
2995}
2996
2997bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2998 SMLoc &EndLoc) {
2999 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3000}
3001
3002ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3003 SMLoc &EndLoc) {
3004 StartLoc = getLoc();
3005 ParseStatus Res = tryParseScalarRegister(Reg);
3006 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3007 return Res;
3008}
3009
3010// Matches a register name or register alias previously defined by '.req'
3011unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3012 RegKind Kind) {
3013 unsigned RegNum = 0;
3014 if ((RegNum = matchSVEDataVectorRegName(Name)))
3015 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3016
3017 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3018 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3019
3021 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3022
3023 if ((RegNum = MatchNeonVectorRegName(Name)))
3024 return Kind == RegKind::NeonVector ? RegNum : 0;
3025
3026 if ((RegNum = matchMatrixRegName(Name)))
3027 return Kind == RegKind::Matrix ? RegNum : 0;
3028
3029 if (Name.equals_insensitive("zt0"))
3030 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3031
3032 // The parsed register must be of RegKind Scalar
3033 if ((RegNum = MatchRegisterName(Name)))
3034 return (Kind == RegKind::Scalar) ? RegNum : 0;
3035
3036 if (!RegNum) {
3037 // Handle a few common aliases of registers.
3038 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3039 .Case("fp", AArch64::FP)
3040 .Case("lr", AArch64::LR)
3041 .Case("x31", AArch64::XZR)
3042 .Case("w31", AArch64::WZR)
3043 .Default(0))
3044 return Kind == RegKind::Scalar ? RegNum : 0;
3045
3046 // Check for aliases registered via .req. Canonicalize to lower case.
3047 // That's more consistent since register names are case insensitive, and
3048 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3049 auto Entry = RegisterReqs.find(Name.lower());
3050 if (Entry == RegisterReqs.end())
3051 return 0;
3052
3053 // set RegNum if the match is the right kind of register
3054 if (Kind == Entry->getValue().first)
3055 RegNum = Entry->getValue().second;
3056 }
3057 return RegNum;
3058}
3059
3060unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3061 switch (K) {
3062 case RegKind::Scalar:
3063 case RegKind::NeonVector:
3064 case RegKind::SVEDataVector:
3065 return 32;
3066 case RegKind::Matrix:
3067 case RegKind::SVEPredicateVector:
3068 case RegKind::SVEPredicateAsCounter:
3069 return 16;
3070 case RegKind::LookupTable:
3071 return 1;
3072 }
3073 llvm_unreachable("Unsupported RegKind");
3074}
3075
3076/// tryParseScalarRegister - Try to parse a register name. The token must be an
3077/// Identifier when called, and if it is a register name the token is eaten and
3078/// the register is added to the operand list.
3079ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3080 const AsmToken &Tok = getTok();
3081 if (Tok.isNot(AsmToken::Identifier))
3082 return ParseStatus::NoMatch;
3083
3084 std::string lowerCase = Tok.getString().lower();
3085 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3086 if (Reg == 0)
3087 return ParseStatus::NoMatch;
3088
3089 RegNum = Reg;
3090 Lex(); // Eat identifier token.
3091 return ParseStatus::Success;
3092}
3093
3094/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3095ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3096 SMLoc S = getLoc();
3097
3098 if (getTok().isNot(AsmToken::Identifier))
3099 return Error(S, "Expected cN operand where 0 <= N <= 15");
3100
3101 StringRef Tok = getTok().getIdentifier();
3102 if (Tok[0] != 'c' && Tok[0] != 'C')
3103 return Error(S, "Expected cN operand where 0 <= N <= 15");
3104
3105 uint32_t CRNum;
3106 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3107 if (BadNum || CRNum > 15)
3108 return Error(S, "Expected cN operand where 0 <= N <= 15");
3109
3110 Lex(); // Eat identifier token.
3111 Operands.push_back(
3112 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3113 return ParseStatus::Success;
3114}
3115
3116// Either an identifier for named values or a 6-bit immediate.
3117ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3118 SMLoc S = getLoc();
3119 const AsmToken &Tok = getTok();
3120
3121 unsigned MaxVal = 63;
3122
3123 // Immediate case, with optional leading hash:
3124 if (parseOptionalToken(AsmToken::Hash) ||
3125 Tok.is(AsmToken::Integer)) {
3126 const MCExpr *ImmVal;
3127 if (getParser().parseExpression(ImmVal))
3128 return ParseStatus::Failure;
3129
3130 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3131 if (!MCE)
3132 return TokError("immediate value expected for prefetch operand");
3133 unsigned prfop = MCE->getValue();
3134 if (prfop > MaxVal)
3135 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3136 "] expected");
3137
3138 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3139 Operands.push_back(AArch64Operand::CreatePrefetch(
3140 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3141 return ParseStatus::Success;
3142 }
3143
3144 if (Tok.isNot(AsmToken::Identifier))
3145 return TokError("prefetch hint expected");
3146
3147 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3148 if (!RPRFM)
3149 return TokError("prefetch hint expected");
3150
3151 Operands.push_back(AArch64Operand::CreatePrefetch(
3152 RPRFM->Encoding, Tok.getString(), S, getContext()));
3153 Lex(); // Eat identifier token.
3154 return ParseStatus::Success;
3155}
3156
3157/// tryParsePrefetch - Try to parse a prefetch operand.
3158template <bool IsSVEPrefetch>
3159ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3160 SMLoc S = getLoc();
3161 const AsmToken &Tok = getTok();
3162
3163 auto LookupByName = [](StringRef N) {
3164 if (IsSVEPrefetch) {
3165 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3166 return std::optional<unsigned>(Res->Encoding);
3167 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3168 return std::optional<unsigned>(Res->Encoding);
3169 return std::optional<unsigned>();
3170 };
3171
3172 auto LookupByEncoding = [](unsigned E) {
3173 if (IsSVEPrefetch) {
3174 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3175 return std::optional<StringRef>(Res->Name);
3176 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3177 return std::optional<StringRef>(Res->Name);
3178 return std::optional<StringRef>();
3179 };
3180 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3181
3182 // Either an identifier for named values or a 5-bit immediate.
3183 // Eat optional hash.
3184 if (parseOptionalToken(AsmToken::Hash) ||
3185 Tok.is(AsmToken::Integer)) {
3186 const MCExpr *ImmVal;
3187 if (getParser().parseExpression(ImmVal))
3188 return ParseStatus::Failure;
3189
3190 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3191 if (!MCE)
3192 return TokError("immediate value expected for prefetch operand");
3193 unsigned prfop = MCE->getValue();
3194 if (prfop > MaxVal)
3195 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3196 "] expected");
3197
3198 auto PRFM = LookupByEncoding(MCE->getValue());
3199 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3200 S, getContext()));
3201 return ParseStatus::Success;
3202 }
3203
3204 if (Tok.isNot(AsmToken::Identifier))
3205 return TokError("prefetch hint expected");
3206
3207 auto PRFM = LookupByName(Tok.getString());
3208 if (!PRFM)
3209 return TokError("prefetch hint expected");
3210
3211 Operands.push_back(AArch64Operand::CreatePrefetch(
3212 *PRFM, Tok.getString(), S, getContext()));
3213 Lex(); // Eat identifier token.
3214 return ParseStatus::Success;
3215}
3216
3217/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3218ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3219 SMLoc S = getLoc();
3220 const AsmToken &Tok = getTok();
3221 if (Tok.isNot(AsmToken::Identifier))
3222 return TokError("invalid operand for instruction");
3223
3224 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3225 if (!PSB)
3226 return TokError("invalid operand for instruction");
3227
3228 Operands.push_back(AArch64Operand::CreatePSBHint(
3229 PSB->Encoding, Tok.getString(), S, getContext()));
3230 Lex(); // Eat identifier token.
3231 return ParseStatus::Success;
3232}
3233
3234ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3235 SMLoc StartLoc = getLoc();
3236
3237 MCRegister RegNum;
3238
3239 // The case where xzr, xzr is not present is handled by an InstAlias.
3240
3241 auto RegTok = getTok(); // in case we need to backtrack
3242 if (!tryParseScalarRegister(RegNum).isSuccess())
3243 return ParseStatus::NoMatch;
3244
3245 if (RegNum != AArch64::XZR) {
3246 getLexer().UnLex(RegTok);
3247 return ParseStatus::NoMatch;
3248 }
3249
3250 if (parseComma())
3251 return ParseStatus::Failure;
3252
3253 if (!tryParseScalarRegister(RegNum).isSuccess())
3254 return TokError("expected register operand");
3255
3256 if (RegNum != AArch64::XZR)
3257 return TokError("xzr must be followed by xzr");
3258
3259 // We need to push something, since we claim this is an operand in .td.
3260 // See also AArch64AsmParser::parseKeywordOperand.
3261 Operands.push_back(AArch64Operand::CreateReg(
3262 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3263
3264 return ParseStatus::Success;
3265}
3266
3267/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3268ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3269 SMLoc S = getLoc();
3270 const AsmToken &Tok = getTok();
3271 if (Tok.isNot(AsmToken::Identifier))
3272 return TokError("invalid operand for instruction");
3273
3274 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3275 if (!BTI)
3276 return TokError("invalid operand for instruction");
3277
3278 Operands.push_back(AArch64Operand::CreateBTIHint(
3279 BTI->Encoding, Tok.getString(), S, getContext()));
3280 Lex(); // Eat identifier token.
3281 return ParseStatus::Success;
3282}
3283
3284/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3285/// instruction.
3286ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3287 SMLoc S = getLoc();
3288 const MCExpr *Expr = nullptr;
3289
3290 if (getTok().is(AsmToken::Hash)) {
3291 Lex(); // Eat hash token.
3292 }
3293
3294 if (parseSymbolicImmVal(Expr))
3295 return ParseStatus::Failure;
3296
3297 AArch64MCExpr::VariantKind ELFRefKind;
3298 MCSymbolRefExpr::VariantKind DarwinRefKind;
3299 int64_t Addend;
3300 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3301 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3302 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3303 // No modifier was specified at all; this is the syntax for an ELF basic
3304 // ADRP relocation (unfortunately).
3305 Expr =
3307 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3308 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3309 Addend != 0) {
3310 return Error(S, "gotpage label reference not allowed an addend");
3311 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3312 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3313 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3314 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3315 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3316 ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
3317 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3318 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3319 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE &&
3321 // The operand must be an @page or @gotpage qualified symbolref.
3322 return Error(S, "page or gotpage label reference expected");
3323 }
3324 }
3325
3326 // We have either a label reference possibly with addend or an immediate. The
3327 // addend is a raw value here. The linker will adjust it to only reference the
3328 // page.
3329 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3330 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3331
3332 return ParseStatus::Success;
3333}
3334
3335/// tryParseAdrLabel - Parse and validate a source label for the ADR
3336/// instruction.
3337ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3338 SMLoc S = getLoc();
3339 const MCExpr *Expr = nullptr;
3340
3341 // Leave anything with a bracket to the default for SVE
3342 if (getTok().is(AsmToken::LBrac))
3343 return ParseStatus::NoMatch;
3344
3345 if (getTok().is(AsmToken::Hash))
3346 Lex(); // Eat hash token.
3347
3348 if (parseSymbolicImmVal(Expr))
3349 return ParseStatus::Failure;
3350
3351 AArch64MCExpr::VariantKind ELFRefKind;
3352 MCSymbolRefExpr::VariantKind DarwinRefKind;
3353 int64_t Addend;
3354 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3355 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3356 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3357 // No modifier was specified at all; this is the syntax for an ELF basic
3358 // ADR relocation (unfortunately).
3359 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3360 } else if (ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE) {
3361 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3362 // adr. It's not actually GOT entry page address but the GOT address
3363 // itself - we just share the same variant kind with :got_auth: operator
3364 // applied for adrp.
3365 // TODO: can we somehow get current TargetMachine object to call
3366 // getCodeModel() on it to ensure we are using tiny code model?
3367 return Error(S, "unexpected adr label");
3368 }
3369 }
3370
3371 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3372 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3373 return ParseStatus::Success;
3374}
3375
3376/// tryParseFPImm - A floating point immediate expression operand.
3377template <bool AddFPZeroAsLiteral>
3378ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3379 SMLoc S = getLoc();
3380
3381 bool Hash = parseOptionalToken(AsmToken::Hash);
3382
3383 // Handle negation, as that still comes through as a separate token.
3384 bool isNegative = parseOptionalToken(AsmToken::Minus);
3385
3386 const AsmToken &Tok = getTok();
3387 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3388 if (!Hash)
3389 return ParseStatus::NoMatch;
3390 return TokError("invalid floating point immediate");
3391 }
3392
3393 // Parse hexadecimal representation.
3394 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3395 if (Tok.getIntVal() > 255 || isNegative)
3396 return TokError("encoded floating point value out of range");
3397
3399 Operands.push_back(
3400 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3401 } else {
3402 // Parse FP representation.
3403 APFloat RealVal(APFloat::IEEEdouble());
3404 auto StatusOrErr =
3405 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3406 if (errorToBool(StatusOrErr.takeError()))
3407 return TokError("invalid floating point representation");
3408
3409 if (isNegative)
3410 RealVal.changeSign();
3411
3412 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3413 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3414 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3415 } else
3416 Operands.push_back(AArch64Operand::CreateFPImm(
3417 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3418 }
3419
3420 Lex(); // Eat the token.
3421
3422 return ParseStatus::Success;
3423}
3424
3425/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3426/// a shift suffix, for example '#1, lsl #12'.
3428AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3429 SMLoc S = getLoc();
3430
3431 if (getTok().is(AsmToken::Hash))
3432 Lex(); // Eat '#'
3433 else if (getTok().isNot(AsmToken::Integer))
3434 // Operand should start from # or should be integer, emit error otherwise.
3435 return ParseStatus::NoMatch;
3436
3437 if (getTok().is(AsmToken::Integer) &&
3438 getLexer().peekTok().is(AsmToken::Colon))
3439 return tryParseImmRange(Operands);
3440
3441 const MCExpr *Imm = nullptr;
3442 if (parseSymbolicImmVal(Imm))
3443 return ParseStatus::Failure;
3444 else if (getTok().isNot(AsmToken::Comma)) {
3445 Operands.push_back(
3446 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3447 return ParseStatus::Success;
3448 }
3449
3450 // Eat ','
3451 Lex();
3452 StringRef VecGroup;
3453 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3454 Operands.push_back(
3455 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3456 Operands.push_back(
3457 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3458 return ParseStatus::Success;
3459 }
3460
3461 // The optional operand must be "lsl #N" where N is non-negative.
3462 if (!getTok().is(AsmToken::Identifier) ||
3463 !getTok().getIdentifier().equals_insensitive("lsl"))
3464 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3465
3466 // Eat 'lsl'
3467 Lex();
3468
3469 parseOptionalToken(AsmToken::Hash);
3470
3471 if (getTok().isNot(AsmToken::Integer))
3472 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3473
3474 int64_t ShiftAmount = getTok().getIntVal();
3475
3476 if (ShiftAmount < 0)
3477 return Error(getLoc(), "positive shift amount required");
3478 Lex(); // Eat the number
3479
3480 // Just in case the optional lsl #0 is used for immediates other than zero.
3481 if (ShiftAmount == 0 && Imm != nullptr) {
3482 Operands.push_back(
3483 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3484 return ParseStatus::Success;
3485 }
3486
3487 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3488 getLoc(), getContext()));
3489 return ParseStatus::Success;
3490}
3491
3492/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3493/// suggestion to help common typos.
3495AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3497 .Case("eq", AArch64CC::EQ)
3498 .Case("ne", AArch64CC::NE)
3499 .Case("cs", AArch64CC::HS)
3500 .Case("hs", AArch64CC::HS)
3501 .Case("cc", AArch64CC::LO)
3502 .Case("lo", AArch64CC::LO)
3503 .Case("mi", AArch64CC::MI)
3504 .Case("pl", AArch64CC::PL)
3505 .Case("vs", AArch64CC::VS)
3506 .Case("vc", AArch64CC::VC)
3507 .Case("hi", AArch64CC::HI)
3508 .Case("ls", AArch64CC::LS)
3509 .Case("ge", AArch64CC::GE)
3510 .Case("lt", AArch64CC::LT)
3511 .Case("gt", AArch64CC::GT)
3512 .Case("le", AArch64CC::LE)
3513 .Case("al", AArch64CC::AL)
3514 .Case("nv", AArch64CC::NV)
3516
3517 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3519 .Case("none", AArch64CC::EQ)
3520 .Case("any", AArch64CC::NE)
3521 .Case("nlast", AArch64CC::HS)
3522 .Case("last", AArch64CC::LO)
3523 .Case("first", AArch64CC::MI)
3524 .Case("nfrst", AArch64CC::PL)
3525 .Case("pmore", AArch64CC::HI)
3526 .Case("plast", AArch64CC::LS)
3527 .Case("tcont", AArch64CC::GE)
3528 .Case("tstop", AArch64CC::LT)
3530
3531 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3532 Suggestion = "nfrst";
3533 }
3534 return CC;
3535}
3536
3537/// parseCondCode - Parse a Condition Code operand.
3538bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3539 bool invertCondCode) {
3540 SMLoc S = getLoc();
3541 const AsmToken &Tok = getTok();
3542 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3543
3544 StringRef Cond = Tok.getString();
3545 std::string Suggestion;
3546 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3547 if (CC == AArch64CC::Invalid) {
3548 std::string Msg = "invalid condition code";
3549 if (!Suggestion.empty())
3550 Msg += ", did you mean " + Suggestion + "?";
3551 return TokError(Msg);
3552 }
3553 Lex(); // Eat identifier token.
3554
3555 if (invertCondCode) {
3556 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3557 return TokError("condition codes AL and NV are invalid for this instruction");
3559 }
3560
3561 Operands.push_back(
3562 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3563 return false;
3564}
3565
3566ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3567 const AsmToken &Tok = getTok();
3568 SMLoc S = getLoc();
3569
3570 if (Tok.isNot(AsmToken::Identifier))
3571 return TokError("invalid operand for instruction");
3572
3573 unsigned PStateImm = -1;
3574 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3575 if (!SVCR)
3576 return ParseStatus::NoMatch;
3577 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3578 PStateImm = SVCR->Encoding;
3579
3580 Operands.push_back(
3581 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3582 Lex(); // Eat identifier token.
3583 return ParseStatus::Success;
3584}
3585
3586ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3587 const AsmToken &Tok = getTok();
3588 SMLoc S = getLoc();
3589
3590 StringRef Name = Tok.getString();
3591
3592 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3593 Lex(); // eat "za[.(b|h|s|d)]"
3594 unsigned ElementWidth = 0;
3595 auto DotPosition = Name.find('.');
3596 if (DotPosition != StringRef::npos) {
3597 const auto &KindRes =
3598 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3599 if (!KindRes)
3600 return TokError(
3601 "Expected the register to be followed by element width suffix");
3602 ElementWidth = KindRes->second;
3603 }
3604 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3605 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3606 getContext()));
3607 if (getLexer().is(AsmToken::LBrac)) {
3608 // There's no comma after matrix operand, so we can parse the next operand
3609 // immediately.
3610 if (parseOperand(Operands, false, false))
3611 return ParseStatus::NoMatch;
3612 }
3613 return ParseStatus::Success;
3614 }
3615
3616 // Try to parse matrix register.
3617 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3618 if (!Reg)
3619 return ParseStatus::NoMatch;
3620
3621 size_t DotPosition = Name.find('.');
3622 assert(DotPosition != StringRef::npos && "Unexpected register");
3623
3624 StringRef Head = Name.take_front(DotPosition);
3625 StringRef Tail = Name.drop_front(DotPosition);
3626 StringRef RowOrColumn = Head.take_back();
3627
3628 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3629 .Case("h", MatrixKind::Row)
3630 .Case("v", MatrixKind::Col)
3631 .Default(MatrixKind::Tile);
3632
3633 // Next up, parsing the suffix
3634 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3635 if (!KindRes)
3636 return TokError(
3637 "Expected the register to be followed by element width suffix");
3638 unsigned ElementWidth = KindRes->second;
3639
3640 Lex();
3641
3642 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3643 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3644
3645 if (getLexer().is(AsmToken::LBrac)) {
3646 // There's no comma after matrix operand, so we can parse the next operand
3647 // immediately.
3648 if (parseOperand(Operands, false, false))
3649 return ParseStatus::NoMatch;
3650 }
3651 return ParseStatus::Success;
3652}
3653
3654/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3655/// them if present.
3657AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3658 const AsmToken &Tok = getTok();
3659 std::string LowerID = Tok.getString().lower();
3662 .Case("lsl", AArch64_AM::LSL)
3663 .Case("lsr", AArch64_AM::LSR)
3664 .Case("asr", AArch64_AM::ASR)
3665 .Case("ror", AArch64_AM::ROR)
3666 .Case("msl", AArch64_AM::MSL)
3667 .Case("uxtb", AArch64_AM::UXTB)
3668 .Case("uxth", AArch64_AM::UXTH)
3669 .Case("uxtw", AArch64_AM::UXTW)
3670 .Case("uxtx", AArch64_AM::UXTX)
3671 .Case("sxtb", AArch64_AM::SXTB)
3672 .Case("sxth", AArch64_AM::SXTH)
3673 .Case("sxtw", AArch64_AM::SXTW)
3674 .Case("sxtx", AArch64_AM::SXTX)
3676
3678 return ParseStatus::NoMatch;
3679
3680 SMLoc S = Tok.getLoc();
3681 Lex();
3682
3683 bool Hash = parseOptionalToken(AsmToken::Hash);
3684
3685 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3686 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3687 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3688 ShOp == AArch64_AM::MSL) {
3689 // We expect a number here.
3690 return TokError("expected #imm after shift specifier");
3691 }
3692
3693 // "extend" type operations don't need an immediate, #0 is implicit.
3694 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3695 Operands.push_back(
3696 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3697 return ParseStatus::Success;
3698 }
3699
3700 // Make sure we do actually have a number, identifier or a parenthesized
3701 // expression.
3702 SMLoc E = getLoc();
3703 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3704 !getTok().is(AsmToken::Identifier))
3705 return Error(E, "expected integer shift amount");
3706
3707 const MCExpr *ImmVal;
3708 if (getParser().parseExpression(ImmVal))
3709 return ParseStatus::Failure;
3710
3711 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3712 if (!MCE)
3713 return Error(E, "expected constant '#imm' after shift specifier");
3714
3715 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3716 Operands.push_back(AArch64Operand::CreateShiftExtend(
3717 ShOp, MCE->getValue(), true, S, E, getContext()));
3718 return ParseStatus::Success;
3719}
3720
3721static const struct Extension {
3722 const char *Name;
3724} ExtensionMap[] = {
3725 {"crc", {AArch64::FeatureCRC}},
3726 {"sm4", {AArch64::FeatureSM4}},
3727 {"sha3", {AArch64::FeatureSHA3}},
3728 {"sha2", {AArch64::FeatureSHA2}},
3729 {"aes", {AArch64::FeatureAES}},
3730 {"crypto", {AArch64::FeatureCrypto}},
3731 {"fp", {AArch64::FeatureFPARMv8}},
3732 {"simd", {AArch64::FeatureNEON}},
3733 {"ras", {AArch64::FeatureRAS}},
3734 {"rasv2", {AArch64::FeatureRASv2}},
3735 {"lse", {AArch64::FeatureLSE}},
3736 {"predres", {AArch64::FeaturePredRes}},
3737 {"predres2", {AArch64::FeatureSPECRES2}},
3738 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3739 {"mte", {AArch64::FeatureMTE}},
3740 {"memtag", {AArch64::FeatureMTE}},
3741 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3742 {"pan", {AArch64::FeaturePAN}},
3743 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3744 {"ccpp", {AArch64::FeatureCCPP}},
3745 {"rcpc", {AArch64::FeatureRCPC}},
3746 {"rng", {AArch64::FeatureRandGen}},
3747 {"sve", {AArch64::FeatureSVE}},
3748 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3749 {"sve2", {AArch64::FeatureSVE2}},
3750 {"sve-aes", {AArch64::FeatureSVEAES}},
3751 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3752 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3753 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3754 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3755 {"sve2p1", {AArch64::FeatureSVE2p1}},
3756 {"ls64", {AArch64::FeatureLS64}},
3757 {"xs", {AArch64::FeatureXS}},
3758 {"pauth", {AArch64::FeaturePAuth}},
3759 {"flagm", {AArch64::FeatureFlagM}},
3760 {"rme", {AArch64::FeatureRME}},
3761 {"sme", {AArch64::FeatureSME}},
3762 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3763 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3764 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3765 {"sme2", {AArch64::FeatureSME2}},
3766 {"sme2p1", {AArch64::FeatureSME2p1}},
3767 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3768 {"hbc", {AArch64::FeatureHBC}},
3769 {"mops", {AArch64::FeatureMOPS}},
3770 {"mec", {AArch64::FeatureMEC}},
3771 {"the", {AArch64::FeatureTHE}},
3772 {"d128", {AArch64::FeatureD128}},
3773 {"lse128", {AArch64::FeatureLSE128}},
3774 {"ite", {AArch64::FeatureITE}},
3775 {"cssc", {AArch64::FeatureCSSC}},
3776 {"rcpc3", {AArch64::FeatureRCPC3}},
3777 {"gcs", {AArch64::FeatureGCS}},
3778 {"bf16", {AArch64::FeatureBF16}},
3779 {"compnum", {AArch64::FeatureComplxNum}},
3780 {"dotprod", {AArch64::FeatureDotProd}},
3781 {"f32mm", {AArch64::FeatureMatMulFP32}},
3782 {"f64mm", {AArch64::FeatureMatMulFP64}},
3783 {"fp16", {AArch64::FeatureFullFP16}},
3784 {"fp16fml", {AArch64::FeatureFP16FML}},
3785 {"i8mm", {AArch64::FeatureMatMulInt8}},
3786 {"lor", {AArch64::FeatureLOR}},
3787 {"profile", {AArch64::FeatureSPE}},
3788 // "rdma" is the name documented by binutils for the feature, but
3789 // binutils also accepts incomplete prefixes of features, so "rdm"
3790 // works too. Support both spellings here.
3791 {"rdm", {AArch64::FeatureRDM}},
3792 {"rdma", {AArch64::FeatureRDM}},
3793 {"sb", {AArch64::FeatureSB}},
3794 {"ssbs", {AArch64::FeatureSSBS}},
3795 {"tme", {AArch64::FeatureTME}},
3796 {"fp8", {AArch64::FeatureFP8}},
3797 {"faminmax", {AArch64::FeatureFAMINMAX}},
3798 {"fp8fma", {AArch64::FeatureFP8FMA}},
3799 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3800 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3801 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3802 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3803 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3804 {"lut", {AArch64::FeatureLUT}},
3805 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3806 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3807 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3808 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3809 {"cpa", {AArch64::FeatureCPA}},
3810 {"tlbiw", {AArch64::FeatureTLBIW}},
3811 {"pops", {AArch64::FeaturePoPS}},
3812 {"cmpbr", {AArch64::FeatureCMPBR}},
3813 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3814 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3815 {"fprcvt", {AArch64::FeatureFPRCVT}},
3816 {"lsfe", {AArch64::FeatureLSFE}},
3817 {"sme2p2", {AArch64::FeatureSME2p2}},
3818 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3819 {"sve2p2", {AArch64::FeatureSVE2p2}},
3820 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3821 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3822 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3823 {"lsui", {AArch64::FeatureLSUI}},
3824 {"occmo", {AArch64::FeatureOCCMO}},
3825 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3827
3828static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3829 if (FBS[AArch64::HasV8_0aOps])
3830 Str += "ARMv8a";
3831 if (FBS[AArch64::HasV8_1aOps])
3832 Str += "ARMv8.1a";
3833 else if (FBS[AArch64::HasV8_2aOps])
3834 Str += "ARMv8.2a";
3835 else if (FBS[AArch64::HasV8_3aOps])
3836 Str += "ARMv8.3a";
3837 else if (FBS[AArch64::HasV8_4aOps])
3838 Str += "ARMv8.4a";
3839 else if (FBS[AArch64::HasV8_5aOps])
3840 Str += "ARMv8.5a";
3841 else if (FBS[AArch64::HasV8_6aOps])
3842 Str += "ARMv8.6a";
3843 else if (FBS[AArch64::HasV8_7aOps])
3844 Str += "ARMv8.7a";
3845 else if (FBS[AArch64::HasV8_8aOps])
3846 Str += "ARMv8.8a";
3847 else if (FBS[AArch64::HasV8_9aOps])
3848 Str += "ARMv8.9a";
3849 else if (FBS[AArch64::HasV9_0aOps])
3850 Str += "ARMv9-a";
3851 else if (FBS[AArch64::HasV9_1aOps])
3852 Str += "ARMv9.1a";
3853 else if (FBS[AArch64::HasV9_2aOps])
3854 Str += "ARMv9.2a";
3855 else if (FBS[AArch64::HasV9_3aOps])
3856 Str += "ARMv9.3a";
3857 else if (FBS[AArch64::HasV9_4aOps])
3858 Str += "ARMv9.4a";
3859 else if (FBS[AArch64::HasV9_5aOps])
3860 Str += "ARMv9.5a";
3861 else if (FBS[AArch64::HasV9_6aOps])
3862 Str += "ARMv9.6a";
3863 else if (FBS[AArch64::HasV8_0rOps])
3864 Str += "ARMv8r";
3865 else {
3866 SmallVector<std::string, 2> ExtMatches;
3867 for (const auto& Ext : ExtensionMap) {
3868 // Use & in case multiple features are enabled
3869 if ((FBS & Ext.Features) != FeatureBitset())
3870 ExtMatches.push_back(Ext.Name);
3871 }
3872 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3873 }
3874}
3875
3876void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3877 SMLoc S) {
3878 const uint16_t Op2 = Encoding & 7;
3879 const uint16_t Cm = (Encoding & 0x78) >> 3;
3880 const uint16_t Cn = (Encoding & 0x780) >> 7;
3881 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3882
3883 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3884
3885 Operands.push_back(
3886 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3887 Operands.push_back(
3888 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3889 Operands.push_back(
3890 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3891 Expr = MCConstantExpr::create(Op2, getContext());
3892 Operands.push_back(
3893 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3894}
3895
3896/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3897/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3898bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3900 if (Name.contains('.'))
3901 return TokError("invalid operand");
3902
3903 Mnemonic = Name;
3904 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3905
3906 const AsmToken &Tok = getTok();
3907 StringRef Op = Tok.getString();
3908 SMLoc S = Tok.getLoc();
3909
3910 if (Mnemonic == "ic") {
3911 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3912 if (!IC)
3913 return TokError("invalid operand for IC instruction");
3914 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3915 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3917 return TokError(Str);
3918 }
3919 createSysAlias(IC->Encoding, Operands, S);
3920 } else if (Mnemonic == "dc") {
3921 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3922 if (!DC)
3923 return TokError("invalid operand for DC instruction");
3924 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3925 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3927 return TokError(Str);
3928 }
3929 createSysAlias(DC->Encoding, Operands, S);
3930 } else if (Mnemonic == "at") {
3931 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3932 if (!AT)
3933 return TokError("invalid operand for AT instruction");
3934 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3935 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3937 return TokError(Str);
3938 }
3939 createSysAlias(AT->Encoding, Operands, S);
3940 } else if (Mnemonic == "tlbi") {
3941 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3942 if (!TLBI)
3943 return TokError("invalid operand for TLBI instruction");
3944 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3945 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3947 return TokError(Str);
3948 }
3949 createSysAlias(TLBI->Encoding, Operands, S);
3950 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3951
3952 if (Op.lower() != "rctx")
3953 return TokError("invalid operand for prediction restriction instruction");
3954
3955 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3956 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3957 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3958
3959 if (Mnemonic == "cosp" && !hasSpecres2)
3960 return TokError("COSP requires: predres2");
3961 if (!hasPredres)
3962 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3963
3964 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3965 : Mnemonic == "dvp" ? 0b101
3966 : Mnemonic == "cosp" ? 0b110
3967 : Mnemonic == "cpp" ? 0b111
3968 : 0;
3969 assert(PRCTX_Op2 &&
3970 "Invalid mnemonic for prediction restriction instruction");
3971 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3972 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3973
3974 createSysAlias(Encoding, Operands, S);
3975 }
3976
3977 Lex(); // Eat operand.
3978
3979 bool ExpectRegister = !Op.contains_insensitive("all");
3980 bool HasRegister = false;
3981
3982 // Check for the optional register operand.
3983 if (parseOptionalToken(AsmToken::Comma)) {
3984 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3985 return TokError("expected register operand");
3986 HasRegister = true;
3987 }
3988
3989 if (ExpectRegister && !HasRegister)
3990 return TokError("specified " + Mnemonic + " op requires a register");
3991 else if (!ExpectRegister && HasRegister)
3992 return TokError("specified " + Mnemonic + " op does not use a register");
3993
3994 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3995 return true;
3996
3997 return false;
3998}
3999
4000/// parseSyspAlias - The TLBIP instructions are simple aliases for
4001/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4002bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4004 if (Name.contains('.'))
4005 return TokError("invalid operand");
4006
4007 Mnemonic = Name;
4008 Operands.push_back(
4009 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4010
4011 const AsmToken &Tok = getTok();
4012 StringRef Op = Tok.getString();
4013 SMLoc S = Tok.getLoc();
4014
4015 if (Mnemonic == "tlbip") {
4016 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4017 if (HasnXSQualifier) {
4018 Op = Op.drop_back(3);
4019 }
4020 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4021 if (!TLBIorig)
4022 return TokError("invalid operand for TLBIP instruction");
4023 const AArch64TLBI::TLBI TLBI(
4024 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4025 TLBIorig->NeedsReg,
4026 HasnXSQualifier
4027 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4028 : TLBIorig->FeaturesRequired);
4029 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4030 std::string Name =
4031 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4032 std::string Str("TLBIP " + Name + " requires: ");
4034 return TokError(Str);
4035 }
4036 createSysAlias(TLBI.Encoding, Operands, S);
4037 }
4038
4039 Lex(); // Eat operand.
4040
4041 if (parseComma())
4042 return true;
4043
4044 if (Tok.isNot(AsmToken::Identifier))
4045 return TokError("expected register identifier");
4046 auto Result = tryParseSyspXzrPair(Operands);
4047 if (Result.isNoMatch())
4048 Result = tryParseGPRSeqPair(Operands);
4049 if (!Result.isSuccess())
4050 return TokError("specified " + Mnemonic +
4051 " op requires a pair of registers");
4052
4053 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4054 return true;
4055
4056 return false;
4057}
4058
4059ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4060 MCAsmParser &Parser = getParser();
4061 const AsmToken &Tok = getTok();
4062
4063 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4064 return TokError("'csync' operand expected");
4065 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4066 // Immediate operand.
4067 const MCExpr *ImmVal;
4068 SMLoc ExprLoc = getLoc();
4069 AsmToken IntTok = Tok;
4070 if (getParser().parseExpression(ImmVal))
4071 return ParseStatus::Failure;
4072 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4073 if (!MCE)
4074 return Error(ExprLoc, "immediate value expected for barrier operand");
4075 int64_t Value = MCE->getValue();
4076 if (Mnemonic == "dsb" && Value > 15) {
4077 // This case is a no match here, but it might be matched by the nXS
4078 // variant. Deliberately not unlex the optional '#' as it is not necessary
4079 // to characterize an integer immediate.
4080 Parser.getLexer().UnLex(IntTok);
4081 return ParseStatus::NoMatch;
4082 }
4083 if (Value < 0 || Value > 15)
4084 return Error(ExprLoc, "barrier operand out of range");
4085 auto DB = AArch64DB::lookupDBByEncoding(Value);
4086 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4087 ExprLoc, getContext(),
4088 false /*hasnXSModifier*/));
4089 return ParseStatus::Success;
4090 }
4091
4092 if (Tok.isNot(AsmToken::Identifier))
4093 return TokError("invalid operand for instruction");
4094
4095 StringRef Operand = Tok.getString();
4096 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4097 auto DB = AArch64DB::lookupDBByName(Operand);
4098 // The only valid named option for ISB is 'sy'
4099 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4100 return TokError("'sy' or #imm operand expected");
4101 // The only valid named option for TSB is 'csync'
4102 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4103 return TokError("'csync' operand expected");
4104 if (!DB && !TSB) {
4105 if (Mnemonic == "dsb") {
4106 // This case is a no match here, but it might be matched by the nXS
4107 // variant.
4108 return ParseStatus::NoMatch;
4109 }
4110 return TokError("invalid barrier option name");
4111 }
4112
4113 Operands.push_back(AArch64Operand::CreateBarrier(
4114 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4115 getContext(), false /*hasnXSModifier*/));
4116 Lex(); // Consume the option
4117
4118 return ParseStatus::Success;
4119}
4120
4122AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4123 const AsmToken &Tok = getTok();
4124
4125 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4126 if (Mnemonic != "dsb")
4127 return ParseStatus::Failure;
4128
4129 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4130 // Immediate operand.
4131 const MCExpr *ImmVal;
4132 SMLoc ExprLoc = getLoc();
4133 if (getParser().parseExpression(ImmVal))
4134 return ParseStatus::Failure;
4135 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4136 if (!MCE)
4137 return Error(ExprLoc, "immediate value expected for barrier operand");
4138 int64_t Value = MCE->getValue();
4139 // v8.7-A DSB in the nXS variant accepts only the following immediate
4140 // values: 16, 20, 24, 28.
4141 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4142 return Error(ExprLoc, "barrier operand out of range");
4143 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4144 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4145 ExprLoc, getContext(),
4146 true /*hasnXSModifier*/));
4147 return ParseStatus::Success;
4148 }
4149
4150 if (Tok.isNot(AsmToken::Identifier))
4151 return TokError("invalid operand for instruction");
4152
4153 StringRef Operand = Tok.getString();
4154 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4155
4156 if (!DB)
4157 return TokError("invalid barrier option name");
4158
4159 Operands.push_back(
4160 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4161 getContext(), true /*hasnXSModifier*/));
4162 Lex(); // Consume the option
4163
4164 return ParseStatus::Success;
4165}
4166
4167ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4168 const AsmToken &Tok = getTok();
4169
4170 if (Tok.isNot(AsmToken::Identifier))
4171 return ParseStatus::NoMatch;
4172
4173 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4174 return ParseStatus::NoMatch;
4175
4176 int MRSReg, MSRReg;
4177 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4178 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4179 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4180 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4181 } else
4182 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4183
4184 unsigned PStateImm = -1;
4185 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4186 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4187 PStateImm = PState15->Encoding;
4188 if (!PState15) {
4189 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4190 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4191 PStateImm = PState1->Encoding;
4192 }
4193
4194 Operands.push_back(
4195 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4196 PStateImm, getContext()));
4197 Lex(); // Eat identifier
4198
4199 return ParseStatus::Success;
4200}
4201
4203AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4204 SMLoc S = getLoc();
4205 const AsmToken &Tok = getTok();
4206 if (Tok.isNot(AsmToken::Identifier))
4207 return TokError("invalid operand for instruction");
4208
4210 if (!PH)
4211 return TokError("invalid operand for instruction");
4212
4213 Operands.push_back(AArch64Operand::CreatePHintInst(
4214 PH->Encoding, Tok.getString(), S, getContext()));
4215 Lex(); // Eat identifier token.
4216 return ParseStatus::Success;
4217}
4218
4219/// tryParseNeonVectorRegister - Parse a vector register operand.
4220bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4221 if (getTok().isNot(AsmToken::Identifier))
4222 return true;
4223
4224 SMLoc S = getLoc();
4225 // Check for a vector register specifier first.
4228 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4229 if (!Res.isSuccess())
4230 return true;
4231
4232 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4233 if (!KindRes)
4234 return true;
4235
4236 unsigned ElementWidth = KindRes->second;
4237 Operands.push_back(
4238 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4239 S, getLoc(), getContext()));
4240
4241 // If there was an explicit qualifier, that goes on as a literal text
4242 // operand.
4243 if (!Kind.empty())
4244 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4245
4246 return tryParseVectorIndex(Operands).isFailure();
4247}
4248
4249ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4250 SMLoc SIdx = getLoc();
4251 if (parseOptionalToken(AsmToken::LBrac)) {
4252 const MCExpr *ImmVal;
4253 if (getParser().parseExpression(ImmVal))
4254 return ParseStatus::NoMatch;
4255 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4256 if (!MCE)
4257 return TokError("immediate value expected for vector index");
4258
4259 SMLoc E = getLoc();
4260
4261 if (parseToken(AsmToken::RBrac, "']' expected"))
4262 return ParseStatus::Failure;
4263
4264 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4265 E, getContext()));
4266 return ParseStatus::Success;
4267 }
4268
4269 return ParseStatus::NoMatch;
4270}
4271
4272// tryParseVectorRegister - Try to parse a vector register name with
4273// optional kind specifier. If it is a register specifier, eat the token
4274// and return it.
4275ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4276 StringRef &Kind,
4277 RegKind MatchKind) {
4278 const AsmToken &Tok = getTok();
4279
4280 if (Tok.isNot(AsmToken::Identifier))
4281 return ParseStatus::NoMatch;
4282
4283 StringRef Name = Tok.getString();
4284 // If there is a kind specifier, it's separated from the register name by
4285 // a '.'.
4286 size_t Start = 0, Next = Name.find('.');
4287 StringRef Head = Name.slice(Start, Next);
4288 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4289
4290 if (RegNum) {
4291 if (Next != StringRef::npos) {
4292 Kind = Name.substr(Next);
4293 if (!isValidVectorKind(Kind, MatchKind))
4294 return TokError("invalid vector kind qualifier");
4295 }
4296 Lex(); // Eat the register token.
4297
4298 Reg = RegNum;
4299 return ParseStatus::Success;
4300 }
4301
4302 return ParseStatus::NoMatch;
4303}
4304
4305ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4308 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4309 if (!Status.isSuccess())
4310 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4311 return Status;
4312}
4313
4314/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4315template <RegKind RK>
4317AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4318 // Check for a SVE predicate register specifier first.
4319 const SMLoc S = getLoc();
4321 MCRegister RegNum;
4322 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4323 if (!Res.isSuccess())
4324 return Res;
4325
4326 const auto &KindRes = parseVectorKind(Kind, RK);
4327 if (!KindRes)
4328 return ParseStatus::NoMatch;
4329
4330 unsigned ElementWidth = KindRes->second;
4331 Operands.push_back(AArch64Operand::CreateVectorReg(
4332 RegNum, RK, ElementWidth, S,
4333 getLoc(), getContext()));
4334
4335 if (getLexer().is(AsmToken::LBrac)) {
4336 if (RK == RegKind::SVEPredicateAsCounter) {
4337 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4338 if (ResIndex.isSuccess())
4339 return ParseStatus::Success;
4340 } else {
4341 // Indexed predicate, there's no comma so try parse the next operand
4342 // immediately.
4343 if (parseOperand(Operands, false, false))
4344 return ParseStatus::NoMatch;
4345 }
4346 }
4347
4348 // Not all predicates are followed by a '/m' or '/z'.
4349 if (getTok().isNot(AsmToken::Slash))
4350 return ParseStatus::Success;
4351
4352 // But when they do they shouldn't have an element type suffix.
4353 if (!Kind.empty())
4354 return Error(S, "not expecting size suffix");
4355
4356 // Add a literal slash as operand
4357 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4358
4359 Lex(); // Eat the slash.
4360
4361 // Zeroing or merging?
4362 auto Pred = getTok().getString().lower();
4363 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4364 return Error(getLoc(), "expecting 'z' predication");
4365
4366 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4367 return Error(getLoc(), "expecting 'm' or 'z' predication");
4368
4369 // Add zero/merge token.
4370 const char *ZM = Pred == "z" ? "z" : "m";
4371 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4372
4373 Lex(); // Eat zero/merge token.
4374 return ParseStatus::Success;
4375}
4376
4377/// parseRegister - Parse a register operand.
4378bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4379 // Try for a Neon vector register.
4380 if (!tryParseNeonVectorRegister(Operands))
4381 return false;
4382
4383 if (tryParseZTOperand(Operands).isSuccess())
4384 return false;
4385
4386 // Otherwise try for a scalar register.
4387 if (tryParseGPROperand<false>(Operands).isSuccess())
4388 return false;
4389
4390 return true;
4391}
4392
4393bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4394 bool HasELFModifier = false;
4396
4397 if (parseOptionalToken(AsmToken::Colon)) {
4398 HasELFModifier = true;
4399
4400 if (getTok().isNot(AsmToken::Identifier))
4401 return TokError("expect relocation specifier in operand after ':'");
4402
4403 std::string LowerCase = getTok().getIdentifier().lower();
4404 RefKind =
4407 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4408 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4409 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4410 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4411 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4412 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4413 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4414 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4415 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4416 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4417 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4418 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4419 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4420 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4421 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4422 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4423 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4424 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4425 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4426 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4427 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4428 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4429 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4430 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4431 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4432 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4433 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4434 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4435 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4436 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4437 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4438 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4439 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4440 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4441 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4442 .Case("tlsdesc_auth_lo12", AArch64MCExpr::VK_TLSDESC_AUTH_LO12)
4444 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4445 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4447 .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
4449 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4450 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4451 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4454 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4455 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4457
4458 if (RefKind == AArch64MCExpr::VK_INVALID)
4459 return TokError("expect relocation specifier in operand after ':'");
4460
4461 Lex(); // Eat identifier
4462
4463 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4464 return true;
4465 }
4466
4467 if (getParser().parseExpression(ImmVal))
4468 return true;
4469
4470 if (HasELFModifier)
4471 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4472
4473 return false;
4474}
4475
4476ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4477 if (getTok().isNot(AsmToken::LCurly))
4478 return ParseStatus::NoMatch;
4479
4480 auto ParseMatrixTile = [this](unsigned &Reg,
4481 unsigned &ElementWidth) -> ParseStatus {
4482 StringRef Name = getTok().getString();
4483 size_t DotPosition = Name.find('.');
4484 if (DotPosition == StringRef::npos)
4485 return ParseStatus::NoMatch;
4486
4487 unsigned RegNum = matchMatrixTileListRegName(Name);
4488 if (!RegNum)
4489 return ParseStatus::NoMatch;
4490
4491 StringRef Tail = Name.drop_front(DotPosition);
4492 const std::optional<std::pair<int, int>> &KindRes =
4493 parseVectorKind(Tail, RegKind::Matrix);
4494 if (!KindRes)
4495 return TokError(
4496 "Expected the register to be followed by element width suffix");
4497 ElementWidth = KindRes->second;
4498 Reg = RegNum;
4499 Lex(); // Eat the register.
4500 return ParseStatus::Success;
4501 };
4502
4503 SMLoc S = getLoc();
4504 auto LCurly = getTok();
4505 Lex(); // Eat left bracket token.
4506
4507 // Empty matrix list
4508 if (parseOptionalToken(AsmToken::RCurly)) {
4509 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4510 /*RegMask=*/0, S, getLoc(), getContext()));
4511 return ParseStatus::Success;
4512 }
4513
4514 // Try parse {za} alias early
4515 if (getTok().getString().equals_insensitive("za")) {
4516 Lex(); // Eat 'za'
4517
4518 if (parseToken(AsmToken::RCurly, "'}' expected"))
4519 return ParseStatus::Failure;
4520
4521 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4522 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4523 return ParseStatus::Success;
4524 }
4525
4526 SMLoc TileLoc = getLoc();
4527
4528 unsigned FirstReg, ElementWidth;
4529 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4530 if (!ParseRes.isSuccess()) {
4531 getLexer().UnLex(LCurly);
4532 return ParseRes;
4533 }
4534
4535 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4536
4537 unsigned PrevReg = FirstReg;
4538
4540 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4541
4542 SmallSet<unsigned, 8> SeenRegs;
4543 SeenRegs.insert(FirstReg);
4544
4545 while (parseOptionalToken(AsmToken::Comma)) {
4546 TileLoc = getLoc();
4547 unsigned Reg, NextElementWidth;
4548 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4549 if (!ParseRes.isSuccess())
4550 return ParseRes;
4551
4552 // Element size must match on all regs in the list.
4553 if (ElementWidth != NextElementWidth)
4554 return Error(TileLoc, "mismatched register size suffix");
4555
4556 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4557 Warning(TileLoc, "tile list not in ascending order");
4558
4559 if (SeenRegs.contains(Reg))
4560 Warning(TileLoc, "duplicate tile in list");
4561 else {
4562 SeenRegs.insert(Reg);
4563 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4564 }
4565
4566 PrevReg = Reg;
4567 }
4568
4569 if (parseToken(AsmToken::RCurly, "'}' expected"))
4570 return ParseStatus::Failure;
4571
4572 unsigned RegMask = 0;
4573 for (auto Reg : DRegs)
4574 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4575 RI->getEncodingValue(AArch64::ZAD0));
4576 Operands.push_back(
4577 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4578
4579 return ParseStatus::Success;
4580}
4581
4582template <RegKind VectorKind>
4583ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4584 bool ExpectMatch) {
4585 MCAsmParser &Parser = getParser();
4586 if (!getTok().is(AsmToken::LCurly))
4587 return ParseStatus::NoMatch;
4588
4589 // Wrapper around parse function
4590 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4591 bool NoMatchIsError) -> ParseStatus {
4592 auto RegTok = getTok();
4593 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4594 if (ParseRes.isSuccess()) {
4595 if (parseVectorKind(Kind, VectorKind))
4596 return ParseRes;
4597 llvm_unreachable("Expected a valid vector kind");
4598 }
4599
4600 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4601 RegTok.getString().equals_insensitive("zt0"))
4602 return ParseStatus::NoMatch;
4603
4604 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4605 (ParseRes.isNoMatch() && NoMatchIsError &&
4606 !RegTok.getString().starts_with_insensitive("za")))
4607 return Error(Loc, "vector register expected");
4608
4609 return ParseStatus::NoMatch;
4610 };
4611
4612 int NumRegs = getNumRegsForRegKind(VectorKind);
4613 SMLoc S = getLoc();
4614 auto LCurly = getTok();
4615 Lex(); // Eat left bracket token.
4616
4618 MCRegister FirstReg;
4619 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4620
4621 // Put back the original left bracket if there was no match, so that
4622 // different types of list-operands can be matched (e.g. SVE, Neon).
4623 if (ParseRes.isNoMatch())
4624 Parser.getLexer().UnLex(LCurly);
4625
4626 if (!ParseRes.isSuccess())
4627 return ParseRes;
4628
4629 int64_t PrevReg = FirstReg;
4630 unsigned Count = 1;
4631
4632 int Stride = 1;
4633 if (parseOptionalToken(AsmToken::Minus)) {
4634 SMLoc Loc = getLoc();
4635 StringRef NextKind;
4636
4638 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4639 if (!ParseRes.isSuccess())
4640 return ParseRes;
4641
4642 // Any Kind suffices must match on all regs in the list.
4643 if (Kind != NextKind)
4644 return Error(Loc, "mismatched register size suffix");
4645
4646 unsigned Space =
4647 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4648
4649 if (Space == 0 || Space > 3)
4650 return Error(Loc, "invalid number of vectors");
4651
4652 Count += Space;
4653 }
4654 else {
4655 bool HasCalculatedStride = false;
4656 while (parseOptionalToken(AsmToken::Comma)) {
4657 SMLoc Loc = getLoc();
4658 StringRef NextKind;
4660 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4661 if (!ParseRes.isSuccess())
4662 return ParseRes;
4663
4664 // Any Kind suffices must match on all regs in the list.
4665 if (Kind != NextKind)
4666 return Error(Loc, "mismatched register size suffix");
4667
4668 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4669 unsigned PrevRegVal =
4670 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4671 if (!HasCalculatedStride) {
4672 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4673 : (RegVal + NumRegs - PrevRegVal);
4674 HasCalculatedStride = true;
4675 }
4676
4677 // Register must be incremental (with a wraparound at last register).
4678 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4679 return Error(Loc, "registers must have the same sequential stride");
4680
4681 PrevReg = Reg;
4682 ++Count;
4683 }
4684 }
4685
4686 if (parseToken(AsmToken::RCurly, "'}' expected"))
4687 return ParseStatus::Failure;
4688
4689 if (Count > 4)
4690 return Error(S, "invalid number of vectors");
4691
4692 unsigned NumElements = 0;
4693 unsigned ElementWidth = 0;
4694 if (!Kind.empty()) {
4695 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4696 std::tie(NumElements, ElementWidth) = *VK;
4697 }
4698
4699 Operands.push_back(AArch64Operand::CreateVectorList(
4700 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4701 getLoc(), getContext()));
4702
4703 return ParseStatus::Success;
4704}
4705
4706/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4707bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4708 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4709 if (!ParseRes.isSuccess())
4710 return true;
4711
4712 return tryParseVectorIndex(Operands).isFailure();
4713}
4714
4715ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4716 SMLoc StartLoc = getLoc();
4717
4718 MCRegister RegNum;
4719 ParseStatus Res = tryParseScalarRegister(RegNum);
4720 if (!Res.isSuccess())
4721 return Res;
4722
4723 if (!parseOptionalToken(AsmToken::Comma)) {
4724 Operands.push_back(AArch64Operand::CreateReg(
4725 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4726 return ParseStatus::Success;
4727 }
4728
4729 parseOptionalToken(AsmToken::Hash);
4730
4731 if (getTok().isNot(AsmToken::Integer))
4732 return Error(getLoc(), "index must be absent or #0");
4733
4734 const MCExpr *ImmVal;
4735 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4736 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4737 return Error(getLoc(), "index must be absent or #0");
4738
4739 Operands.push_back(AArch64Operand::CreateReg(
4740 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4741 return ParseStatus::Success;
4742}
4743
4744ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4745 SMLoc StartLoc = getLoc();
4746 const AsmToken &Tok = getTok();
4747 std::string Name = Tok.getString().lower();
4748
4749 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4750
4751 if (RegNum == 0)
4752 return ParseStatus::NoMatch;
4753
4754 Operands.push_back(AArch64Operand::CreateReg(
4755 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4756 Lex(); // Eat register.
4757
4758 // Check if register is followed by an index
4759 if (parseOptionalToken(AsmToken::LBrac)) {
4760 Operands.push_back(
4761 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4762 const MCExpr *ImmVal;
4763 if (getParser().parseExpression(ImmVal))
4764 return ParseStatus::NoMatch;
4765 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4766 if (!MCE)
4767 return TokError("immediate value expected for vector index");
4768 Operands.push_back(AArch64Operand::CreateImm(
4769 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4770 getLoc(), getContext()));
4771 if (parseOptionalToken(AsmToken::Comma))
4772 if (parseOptionalMulOperand(Operands))
4773 return ParseStatus::Failure;
4774 if (parseToken(AsmToken::RBrac, "']' expected"))
4775 return ParseStatus::Failure;
4776 Operands.push_back(
4777 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4778 }
4779 return ParseStatus::Success;
4780}
4781
4782template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4783ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4784 SMLoc StartLoc = getLoc();
4785
4786 MCRegister RegNum;
4787 ParseStatus Res = tryParseScalarRegister(RegNum);
4788 if (!Res.isSuccess())
4789 return Res;
4790
4791 // No shift/extend is the default.
4792 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4793 Operands.push_back(AArch64Operand::CreateReg(
4794 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4795 return ParseStatus::Success;
4796 }
4797
4798 // Eat the comma
4799 Lex();
4800
4801 // Match the shift
4803 Res = tryParseOptionalShiftExtend(ExtOpnd);
4804 if (!Res.isSuccess())
4805 return Res;
4806
4807 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4808 Operands.push_back(AArch64Operand::CreateReg(
4809 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4810 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4811 Ext->hasShiftExtendAmount()));
4812
4813 return ParseStatus::Success;
4814}
4815
4816bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4817 MCAsmParser &Parser = getParser();
4818
4819 // Some SVE instructions have a decoration after the immediate, i.e.
4820 // "mul vl". We parse them here and add tokens, which must be present in the
4821 // asm string in the tablegen instruction.
4822 bool NextIsVL =
4823 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4824 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4825 if (!getTok().getString().equals_insensitive("mul") ||
4826 !(NextIsVL || NextIsHash))
4827 return true;
4828
4829 Operands.push_back(
4830 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4831 Lex(); // Eat the "mul"
4832
4833 if (NextIsVL) {
4834 Operands.push_back(
4835 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4836 Lex(); // Eat the "vl"
4837 return false;
4838 }
4839
4840 if (NextIsHash) {
4841 Lex(); // Eat the #
4842 SMLoc S = getLoc();
4843
4844 // Parse immediate operand.
4845 const MCExpr *ImmVal;
4846 if (!Parser.parseExpression(ImmVal))
4847 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4848 Operands.push_back(AArch64Operand::CreateImm(
4849 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4850 getContext()));
4851 return false;
4852 }
4853 }
4854
4855 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4856}
4857
4858bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4859 StringRef &VecGroup) {
4860 MCAsmParser &Parser = getParser();
4861 auto Tok = Parser.getTok();
4862 if (Tok.isNot(AsmToken::Identifier))
4863 return true;
4864
4866 .Case("vgx2", "vgx2")
4867 .Case("vgx4", "vgx4")
4868 .Default("");
4869
4870 if (VG.empty())
4871 return true;
4872
4873 VecGroup = VG;
4874 Parser.Lex(); // Eat vgx[2|4]
4875 return false;
4876}
4877
4878bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4879 auto Tok = getTok();
4880 if (Tok.isNot(AsmToken::Identifier))
4881 return true;
4882
4883 auto Keyword = Tok.getString();
4885 .Case("sm", "sm")
4886 .Case("za", "za")
4887 .Default(Keyword);
4888 Operands.push_back(
4889 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4890
4891 Lex();
4892 return false;
4893}
4894
4895/// parseOperand - Parse a arm instruction operand. For now this parses the
4896/// operand regardless of the mnemonic.
4897bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4898 bool invertCondCode) {
4899 MCAsmParser &Parser = getParser();
4900
4901 ParseStatus ResTy =
4902 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4903
4904 // Check if the current operand has a custom associated parser, if so, try to
4905 // custom parse the operand, or fallback to the general approach.
4906 if (ResTy.isSuccess())
4907 return false;
4908 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4909 // there was a match, but an error occurred, in which case, just return that
4910 // the operand parsing failed.
4911 if (ResTy.isFailure())
4912 return true;
4913
4914 // Nothing custom, so do general case parsing.
4915 SMLoc S, E;
4916 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4917 if (parseOptionalToken(AsmToken::Comma)) {
4918 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4919 if (!Res.isNoMatch())
4920 return Res.isFailure();
4921 getLexer().UnLex(SavedTok);
4922 }
4923 return false;
4924 };
4925 switch (getLexer().getKind()) {
4926 default: {
4927 SMLoc S = getLoc();
4928 const MCExpr *Expr;
4929 if (parseSymbolicImmVal(Expr))
4930 return Error(S, "invalid operand");
4931
4932 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4933 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4934 return parseOptionalShiftExtend(getTok());
4935 }
4936 case AsmToken::LBrac: {
4937 Operands.push_back(
4938 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4939 Lex(); // Eat '['
4940
4941 // There's no comma after a '[', so we can parse the next operand
4942 // immediately.
4943 return parseOperand(Operands, false, false);
4944 }
4945 case AsmToken::LCurly: {
4946 if (!parseNeonVectorList(Operands))
4947 return false;
4948
4949 Operands.push_back(
4950 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4951 Lex(); // Eat '{'
4952
4953 // There's no comma after a '{', so we can parse the next operand
4954 // immediately.
4955 return parseOperand(Operands, false, false);
4956 }
4957 case AsmToken::Identifier: {
4958 // See if this is a "VG" decoration used by SME instructions.
4959 StringRef VecGroup;
4960 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4961 Operands.push_back(
4962 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4963 return false;
4964 }
4965 // If we're expecting a Condition Code operand, then just parse that.
4966 if (isCondCode)
4967 return parseCondCode(Operands, invertCondCode);
4968
4969 // If it's a register name, parse it.
4970 if (!parseRegister(Operands)) {
4971 // Parse an optional shift/extend modifier.
4972 AsmToken SavedTok = getTok();
4973 if (parseOptionalToken(AsmToken::Comma)) {
4974 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4975 // such cases and don't report an error when <label> happens to match a
4976 // shift/extend modifier.
4977 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4978 /*ParseForAllFeatures=*/true);
4979 if (!Res.isNoMatch())
4980 return Res.isFailure();
4981 Res = tryParseOptionalShiftExtend(Operands);
4982 if (!Res.isNoMatch())
4983 return Res.isFailure();
4984 getLexer().UnLex(SavedTok);
4985 }
4986 return false;
4987 }
4988
4989 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4990 // by SVE instructions.
4991 if (!parseOptionalMulOperand(Operands))
4992 return false;
4993
4994 // If this is a two-word mnemonic, parse its special keyword
4995 // operand as an identifier.
4996 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4997 Mnemonic == "gcsb")
4998 return parseKeywordOperand(Operands);
4999
5000 // This was not a register so parse other operands that start with an
5001 // identifier (like labels) as expressions and create them as immediates.
5002 const MCExpr *IdVal;
5003 S = getLoc();
5004 if (getParser().parseExpression(IdVal))
5005 return true;
5006 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5007 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5008 return false;
5009 }
5010 case AsmToken::Integer:
5011 case AsmToken::Real:
5012 case AsmToken::Hash: {
5013 // #42 -> immediate.
5014 S = getLoc();
5015
5016 parseOptionalToken(AsmToken::Hash);
5017
5018 // Parse a negative sign
5019 bool isNegative = false;
5020 if (getTok().is(AsmToken::Minus)) {
5021 isNegative = true;
5022 // We need to consume this token only when we have a Real, otherwise
5023 // we let parseSymbolicImmVal take care of it
5024 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5025 Lex();
5026 }
5027
5028 // The only Real that should come through here is a literal #0.0 for
5029 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5030 // so convert the value.
5031 const AsmToken &Tok = getTok();
5032 if (Tok.is(AsmToken::Real)) {
5033 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5034 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5035 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5036 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5037 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5038 return TokError("unexpected floating point literal");
5039 else if (IntVal != 0 || isNegative)
5040 return TokError("expected floating-point constant #0.0");
5041 Lex(); // Eat the token.
5042
5043 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5044 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5045 return false;
5046 }
5047
5048 const MCExpr *ImmVal;
5049 if (parseSymbolicImmVal(ImmVal))
5050 return true;
5051
5052 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5053 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5054
5055 // Parse an optional shift/extend modifier.
5056 return parseOptionalShiftExtend(Tok);
5057 }
5058 case AsmToken::Equal: {
5059 SMLoc Loc = getLoc();
5060 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5061 return TokError("unexpected token in operand");
5062 Lex(); // Eat '='
5063 const MCExpr *SubExprVal;
5064 if (getParser().parseExpression(SubExprVal))
5065 return true;
5066
5067 if (Operands.size() < 2 ||
5068 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5069 return Error(Loc, "Only valid when first operand is register");
5070
5071 bool IsXReg =
5072 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5073 Operands[1]->getReg());
5074
5075 MCContext& Ctx = getContext();
5076 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5077 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5078 if (isa<MCConstantExpr>(SubExprVal)) {
5079 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5080 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5081 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5082 ShiftAmt += 16;
5083 Imm >>= 16;
5084 }
5085 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5086 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5087 Operands.push_back(AArch64Operand::CreateImm(
5088 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5089 if (ShiftAmt)
5090 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5091 ShiftAmt, true, S, E, Ctx));
5092 return false;
5093 }
5094 APInt Simm = APInt(64, Imm << ShiftAmt);
5095 // check if the immediate is an unsigned or signed 32-bit int for W regs
5096 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5097 return Error(Loc, "Immediate too large for register");
5098 }
5099 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5100 const MCExpr *CPLoc =
5101 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5102 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5103 return false;
5104 }
5105 }
5106}
5107
5108bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5109 const MCExpr *Expr = nullptr;
5110 SMLoc L = getLoc();
5111 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5112 return true;
5113 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5114 if (check(!Value, L, "expected constant expression"))
5115 return true;
5116 Out = Value->getValue();
5117 return false;
5118}
5119
5120bool AArch64AsmParser::parseComma() {
5121 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5122 return true;
5123 // Eat the comma
5124 Lex();
5125 return false;
5126}
5127
5128bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5129 unsigned First, unsigned Last) {
5131 SMLoc Start, End;
5132 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5133 return true;
5134
5135 // Special handling for FP and LR; they aren't linearly after x28 in
5136 // the registers enum.
5137 unsigned RangeEnd = Last;
5138 if (Base == AArch64::X0) {
5139 if (Last == AArch64::FP) {
5140 RangeEnd = AArch64::X28;
5141 if (Reg == AArch64::FP) {
5142 Out = 29;
5143 return false;
5144 }
5145 }
5146 if (Last == AArch64::LR) {
5147 RangeEnd = AArch64::X28;
5148 if (Reg == AArch64::FP) {
5149 Out = 29;
5150 return false;
5151 } else if (Reg == AArch64::LR) {
5152 Out = 30;
5153 return false;
5154 }
5155 }
5156 }
5157
5158 if (check(Reg < First || Reg > RangeEnd, Start,
5159 Twine("expected register in range ") +
5162 return true;
5163 Out = Reg - Base;
5164 return false;
5165}
5166
5167bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5168 const MCParsedAsmOperand &Op2) const {
5169 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5170 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5171
5172 if (AOp1.isVectorList() && AOp2.isVectorList())
5173 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5174 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5175 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5176
5177 if (!AOp1.isReg() || !AOp2.isReg())
5178 return false;
5179
5180 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5181 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5182 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5183
5184 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5185 "Testing equality of non-scalar registers not supported");
5186
5187 // Check if a registers match their sub/super register classes.
5188 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5189 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5190 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5191 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5192 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5193 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5194 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5195 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5196
5197 return false;
5198}
5199
5200/// Parse an AArch64 instruction mnemonic followed by its operands.
5201bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5202 StringRef Name, SMLoc NameLoc,
5205 .Case("beq", "b.eq")
5206 .Case("bne", "b.ne")
5207 .Case("bhs", "b.hs")
5208 .Case("bcs", "b.cs")
5209 .Case("blo", "b.lo")
5210 .Case("bcc", "b.cc")
5211 .Case("bmi", "b.mi")
5212 .Case("bpl", "b.pl")
5213 .Case("bvs", "b.vs")
5214 .Case("bvc", "b.vc")
5215 .Case("bhi", "b.hi")
5216 .Case("bls", "b.ls")
5217 .Case("bge", "b.ge")
5218 .Case("blt", "b.lt")
5219 .Case("bgt", "b.gt")
5220 .Case("ble", "b.le")
5221 .Case("bal", "b.al")
5222 .Case("bnv", "b.nv")
5223 .Default(Name);
5224
5225 // First check for the AArch64-specific .req directive.
5226 if (getTok().is(AsmToken::Identifier) &&
5227 getTok().getIdentifier().lower() == ".req") {
5228 parseDirectiveReq(Name, NameLoc);
5229 // We always return 'error' for this, as we're done with this
5230 // statement and don't need to match the 'instruction."
5231 return true;
5232 }
5233
5234 // Create the leading tokens for the mnemonic, split by '.' characters.
5235 size_t Start = 0, Next = Name.find('.');
5236 StringRef Head = Name.slice(Start, Next);
5237
5238 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5239 // the SYS instruction.
5240 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5241 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5242 return parseSysAlias(Head, NameLoc, Operands);
5243
5244 // TLBIP instructions are aliases for the SYSP instruction.
5245 if (Head == "tlbip")
5246 return parseSyspAlias(Head, NameLoc, Operands);
5247
5248 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5249 Mnemonic = Head;
5250
5251 // Handle condition codes for a branch mnemonic
5252 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5253 Start = Next;
5254 Next = Name.find('.', Start + 1);
5255 Head = Name.slice(Start + 1, Next);
5256
5257 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5258 (Head.data() - Name.data()));
5259 std::string Suggestion;
5260 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5261 if (CC == AArch64CC::Invalid) {
5262 std::string Msg = "invalid condition code";
5263 if (!Suggestion.empty())
5264 Msg += ", did you mean " + Suggestion + "?";
5265 return Error(SuffixLoc, Msg);
5266 }
5267 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5268 /*IsSuffix=*/true));
5269 Operands.push_back(
5270 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5271 }
5272
5273 // Add the remaining tokens in the mnemonic.
5274 while (Next != StringRef::npos) {
5275 Start = Next;
5276 Next = Name.find('.', Start + 1);
5277 Head = Name.slice(Start, Next);
5278 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5279 (Head.data() - Name.data()) + 1);
5280 Operands.push_back(AArch64Operand::CreateToken(
5281 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5282 }
5283
5284 // Conditional compare instructions have a Condition Code operand, which needs
5285 // to be parsed and an immediate operand created.
5286 bool condCodeFourthOperand =
5287 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5288 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5289 Head == "csinc" || Head == "csinv" || Head == "csneg");
5290
5291 // These instructions are aliases to some of the conditional select
5292 // instructions. However, the condition code is inverted in the aliased
5293 // instruction.
5294 //
5295 // FIXME: Is this the correct way to handle these? Or should the parser
5296 // generate the aliased instructions directly?
5297 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5298 bool condCodeThirdOperand =
5299 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5300
5301 // Read the remaining operands.
5302 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5303
5304 unsigned N = 1;
5305 do {
5306 // Parse and remember the operand.
5307 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5308 (N == 3 && condCodeThirdOperand) ||
5309 (N == 2 && condCodeSecondOperand),
5310 condCodeSecondOperand || condCodeThirdOperand)) {
5311 return true;
5312 }
5313
5314 // After successfully parsing some operands there are three special cases
5315 // to consider (i.e. notional operands not separated by commas). Two are
5316 // due to memory specifiers:
5317 // + An RBrac will end an address for load/store/prefetch
5318 // + An '!' will indicate a pre-indexed operation.
5319 //
5320 // And a further case is '}', which ends a group of tokens specifying the
5321 // SME accumulator array 'ZA' or tile vector, i.e.
5322 //
5323 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5324 //
5325 // It's someone else's responsibility to make sure these tokens are sane
5326 // in the given context!
5327
5328 if (parseOptionalToken(AsmToken::RBrac))
5329 Operands.push_back(
5330 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5331 if (parseOptionalToken(AsmToken::Exclaim))
5332 Operands.push_back(
5333 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5334 if (parseOptionalToken(AsmToken::RCurly))
5335 Operands.push_back(
5336 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5337
5338 ++N;
5339 } while (parseOptionalToken(AsmToken::Comma));
5340 }
5341
5342 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5343 return true;
5344
5345 return false;
5346}
5347
5348static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5349 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5350 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5351 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5352 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5353 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5354 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5355 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5356}
5357
5358// FIXME: This entire function is a giant hack to provide us with decent
5359// operand range validation/diagnostics until TableGen/MC can be extended
5360// to support autogeneration of this kind of validation.
5361bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5363 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5364 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5365
5366 // A prefix only applies to the instruction following it. Here we extract
5367 // prefix information for the next instruction before validating the current
5368 // one so that in the case of failure we don't erronously continue using the
5369 // current prefix.
5370 PrefixInfo Prefix = NextPrefix;
5371 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5372
5373 // Before validating the instruction in isolation we run through the rules
5374 // applicable when it follows a prefix instruction.
5375 // NOTE: brk & hlt can be prefixed but require no additional validation.
5376 if (Prefix.isActive() &&
5377 (Inst.getOpcode() != AArch64::BRK) &&
5378 (Inst.getOpcode() != AArch64::HLT)) {
5379
5380 // Prefixed intructions must have a destructive operand.
5383 return Error(IDLoc, "instruction is unpredictable when following a"
5384 " movprfx, suggest replacing movprfx with mov");
5385
5386 // Destination operands must match.
5387 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5388 return Error(Loc[0], "instruction is unpredictable when following a"
5389 " movprfx writing to a different destination");
5390
5391 // Destination operand must not be used in any other location.
5392 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5393 if (Inst.getOperand(i).isReg() &&
5394 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5395 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5396 return Error(Loc[0], "instruction is unpredictable when following a"
5397 " movprfx and destination also used as non-destructive"
5398 " source");
5399 }
5400
5401 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5402 if (Prefix.isPredicated()) {
5403 int PgIdx = -1;
5404
5405 // Find the instructions general predicate.
5406 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5407 if (Inst.getOperand(i).isReg() &&
5408 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5409 PgIdx = i;
5410 break;
5411 }
5412
5413 // Instruction must be predicated if the movprfx is predicated.
5414 if (PgIdx == -1 ||
5416 return Error(IDLoc, "instruction is unpredictable when following a"
5417 " predicated movprfx, suggest using unpredicated movprfx");
5418
5419 // Instruction must use same general predicate as the movprfx.
5420 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5421 return Error(IDLoc, "instruction is unpredictable when following a"
5422 " predicated movprfx using a different general predicate");
5423
5424 // Instruction element type must match the movprfx.
5425 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5426 return Error(IDLoc, "instruction is unpredictable when following a"
5427 " predicated movprfx with a different element size");
5428 }
5429 }
5430
5431 // On ARM64EC, only valid registers may be used. Warn against using
5432 // explicitly disallowed registers.
5433 if (IsWindowsArm64EC) {
5434 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5435 if (Inst.getOperand(i).isReg()) {
5436 MCRegister Reg = Inst.getOperand(i).getReg();
5437 // At this point, vector registers are matched to their
5438 // appropriately sized alias.
5439 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5440 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5441 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5442 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5443 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5444 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5445 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5446 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5447 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5448 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5449 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5450 " is disallowed on ARM64EC.");
5451 }
5452 }
5453 }
5454 }
5455
5456 // Check for indexed addressing modes w/ the base register being the
5457 // same as a destination/source register or pair load where
5458 // the Rt == Rt2. All of those are undefined behaviour.
5459 switch (Inst.getOpcode()) {
5460 case AArch64::LDPSWpre:
5461 case AArch64::LDPWpost:
5462 case AArch64::LDPWpre:
5463 case AArch64::LDPXpost:
5464 case AArch64::LDPXpre: {
5465 MCRegister Rt = Inst.getOperand(1).getReg();
5466 MCRegister Rt2 = Inst.getOperand(2).getReg();
5467 MCRegister Rn = Inst.getOperand(3).getReg();
5468 if (RI->isSubRegisterEq(Rn, Rt))
5469 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5470 "is also a destination");
5471 if (RI->isSubRegisterEq(Rn, Rt2))
5472 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5473 "is also a destination");
5474 [[fallthrough]];
5475 }
5476 case AArch64::LDR_ZA:
5477 case AArch64::STR_ZA: {
5478 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5479 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5480 return Error(Loc[1],
5481 "unpredictable instruction, immediate and offset mismatch.");
5482 break;
5483 }
5484 case AArch64::LDPDi:
5485 case AArch64::LDPQi:
5486 case AArch64::LDPSi:
5487 case AArch64::LDPSWi:
5488 case AArch64::LDPWi:
5489 case AArch64::LDPXi: {
5490 MCRegister Rt = Inst.getOperand(0).getReg();
5491 MCRegister Rt2 = Inst.getOperand(1).getReg();
5492 if (Rt == Rt2)
5493 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5494 break;
5495 }
5496 case AArch64::LDPDpost:
5497 case AArch64::LDPDpre:
5498 case AArch64::LDPQpost:
5499 case AArch64::LDPQpre:
5500 case AArch64::LDPSpost:
5501 case AArch64::LDPSpre:
5502 case AArch64::LDPSWpost: {
5503 MCRegister Rt = Inst.getOperand(1).getReg();
5504 MCRegister Rt2 = Inst.getOperand(2).getReg();
5505 if (Rt == Rt2)
5506 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5507 break;
5508 }
5509 case AArch64::STPDpost:
5510 case AArch64::STPDpre:
5511 case AArch64::STPQpost:
5512 case AArch64::STPQpre:
5513 case AArch64::STPSpost:
5514 case AArch64::STPSpre:
5515 case AArch64::STPWpost:
5516 case AArch64::STPWpre:
5517 case AArch64::STPXpost:
5518 case AArch64::STPXpre: {
5519 MCRegister Rt = Inst.getOperand(1).getReg();
5520 MCRegister Rt2 = Inst.getOperand(2).getReg();
5521 MCRegister Rn = Inst.getOperand(3).getReg();
5522 if (RI->isSubRegisterEq(Rn, Rt))
5523 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5524 "is also a source");
5525 if (RI->isSubRegisterEq(Rn, Rt2))
5526 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5527 "is also a source");
5528 break;
5529 }
5530 case AArch64::LDRBBpre:
5531 case AArch64::LDRBpre:
5532 case AArch64::LDRHHpre:
5533 case AArch64::LDRHpre:
5534 case AArch64::LDRSBWpre:
5535 case AArch64::LDRSBXpre:
5536 case AArch64::LDRSHWpre:
5537 case AArch64::LDRSHXpre:
5538 case AArch64::LDRSWpre:
5539 case AArch64::LDRWpre:
5540 case AArch64::LDRXpre:
5541 case AArch64::LDRBBpost:
5542 case AArch64::LDRBpost:
5543 case AArch64::LDRHHpost:
5544 case AArch64::LDRHpost:
5545 case AArch64::LDRSBWpost:
5546 case AArch64::LDRSBXpost:
5547 case AArch64::LDRSHWpost:
5548 case AArch64::LDRSHXpost:
5549 case AArch64::LDRSWpost:
5550 case AArch64::LDRWpost:
5551 case AArch64::LDRXpost: {
5552 MCRegister Rt = Inst.getOperand(1).getReg();
5553 MCRegister Rn = Inst.getOperand(2).getReg();
5554 if (RI->isSubRegisterEq(Rn, Rt))
5555 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5556 "is also a source");
5557 break;
5558 }
5559 case AArch64::STRBBpost:
5560 case AArch64::STRBpost:
5561 case AArch64::STRHHpost:
5562 case AArch64::STRHpost:
5563 case AArch64::STRWpost:
5564 case AArch64::STRXpost:
5565 case AArch64::STRBBpre:
5566 case AArch64::STRBpre:
5567 case AArch64::STRHHpre:
5568 case AArch64::STRHpre:
5569 case AArch64::STRWpre:
5570 case AArch64::STRXpre: {
5571 MCRegister Rt = Inst.getOperand(1).getReg();
5572 MCRegister Rn = Inst.getOperand(2).getReg();
5573 if (RI->isSubRegisterEq(Rn, Rt))
5574 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5575 "is also a source");
5576 break;
5577 }
5578 case AArch64::STXRB:
5579 case AArch64::STXRH:
5580 case AArch64::STXRW:
5581 case AArch64::STXRX:
5582 case AArch64::STLXRB:
5583 case AArch64::STLXRH:
5584 case AArch64::STLXRW:
5585 case AArch64::STLXRX: {
5586 MCRegister Rs = Inst.getOperand(0).getReg();
5587 MCRegister Rt = Inst.getOperand(1).getReg();
5588 MCRegister Rn = Inst.getOperand(2).getReg();
5589 if (RI->isSubRegisterEq(Rt, Rs) ||
5590 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5591 return Error(Loc[0],
5592 "unpredictable STXR instruction, status is also a source");
5593 break;
5594 }
5595 case AArch64::STXPW:
5596 case AArch64::STXPX:
5597 case AArch64::STLXPW:
5598 case AArch64::STLXPX: {
5599 MCRegister Rs = Inst.getOperand(0).getReg();
5600 MCRegister Rt1 = Inst.getOperand(1).getReg();
5601 MCRegister Rt2 = Inst.getOperand(2).getReg();
5602 MCRegister Rn = Inst.getOperand(3).getReg();
5603 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5604 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5605 return Error(Loc[0],
5606 "unpredictable STXP instruction, status is also a source");
5607 break;
5608 }
5609 case AArch64::LDRABwriteback:
5610 case AArch64::LDRAAwriteback: {
5611 MCRegister Xt = Inst.getOperand(0).getReg();
5612 MCRegister Xn = Inst.getOperand(1).getReg();
5613 if (Xt == Xn)
5614 return Error(Loc[0],
5615 "unpredictable LDRA instruction, writeback base"
5616 " is also a destination");
5617 break;
5618 }
5619 }
5620
5621 // Check v8.8-A memops instructions.
5622 switch (Inst.getOpcode()) {
5623 case AArch64::CPYFP:
5624 case AArch64::CPYFPWN:
5625 case AArch64::CPYFPRN:
5626 case AArch64::CPYFPN:
5627 case AArch64::CPYFPWT:
5628 case AArch64::CPYFPWTWN:
5629 case AArch64::CPYFPWTRN:
5630 case AArch64::CPYFPWTN:
5631 case AArch64::CPYFPRT:
5632 case AArch64::CPYFPRTWN:
5633 case AArch64::CPYFPRTRN:
5634 case AArch64::CPYFPRTN:
5635 case AArch64::CPYFPT:
5636 case AArch64::CPYFPTWN:
5637 case AArch64::CPYFPTRN:
5638 case AArch64::CPYFPTN:
5639 case AArch64::CPYFM:
5640 case AArch64::CPYFMWN:
5641 case AArch64::CPYFMRN:
5642 case AArch64::CPYFMN:
5643 case AArch64::CPYFMWT:
5644 case AArch64::CPYFMWTWN:
5645 case AArch64::CPYFMWTRN:
5646 case AArch64::CPYFMWTN:
5647 case AArch64::CPYFMRT:
5648 case AArch64::CPYFMRTWN:
5649 case AArch64::CPYFMRTRN:
5650 case AArch64::CPYFMRTN:
5651 case AArch64::CPYFMT:
5652 case AArch64::CPYFMTWN:
5653 case AArch64::CPYFMTRN:
5654 case AArch64::CPYFMTN:
5655 case AArch64::CPYFE:
5656 case AArch64::CPYFEWN:
5657 case AArch64::CPYFERN:
5658 case AArch64::CPYFEN:
5659 case AArch64::CPYFEWT:
5660 case AArch64::CPYFEWTWN:
5661 case AArch64::CPYFEWTRN:
5662 case AArch64::CPYFEWTN:
5663 case AArch64::CPYFERT:
5664 case AArch64::CPYFERTWN:
5665 case AArch64::CPYFERTRN:
5666 case AArch64::CPYFERTN:
5667 case AArch64::CPYFET:
5668 case AArch64::CPYFETWN:
5669 case AArch64::CPYFETRN:
5670 case AArch64::CPYFETN:
5671 case AArch64::CPYP:
5672 case AArch64::CPYPWN:
5673 case AArch64::CPYPRN:
5674 case AArch64::CPYPN:
5675 case AArch64::CPYPWT:
5676 case AArch64::CPYPWTWN:
5677 case AArch64::CPYPWTRN:
5678 case AArch64::CPYPWTN:
5679 case AArch64::CPYPRT:
5680 case AArch64::CPYPRTWN:
5681 case AArch64::CPYPRTRN:
5682 case AArch64::CPYPRTN:
5683 case AArch64::CPYPT:
5684 case AArch64::CPYPTWN:
5685 case AArch64::CPYPTRN:
5686 case AArch64::CPYPTN:
5687 case AArch64::CPYM:
5688 case AArch64::CPYMWN:
5689 case AArch64::CPYMRN:
5690 case AArch64::CPYMN:
5691 case AArch64::CPYMWT:
5692 case AArch64::CPYMWTWN:
5693 case AArch64::CPYMWTRN:
5694 case AArch64::CPYMWTN:
5695 case AArch64::CPYMRT:
5696 case AArch64::CPYMRTWN:
5697 case AArch64::CPYMRTRN:
5698 case AArch64::CPYMRTN:
5699 case AArch64::CPYMT:
5700 case AArch64::CPYMTWN:
5701 case AArch64::CPYMTRN:
5702 case AArch64::CPYMTN:
5703 case AArch64::CPYE:
5704 case AArch64::CPYEWN:
5705 case AArch64::CPYERN:
5706 case AArch64::CPYEN:
5707 case AArch64::CPYEWT:
5708 case AArch64::CPYEWTWN:
5709 case AArch64::CPYEWTRN:
5710 case AArch64::CPYEWTN:
5711 case AArch64::CPYERT:
5712 case AArch64::CPYERTWN:
5713 case AArch64::CPYERTRN:
5714 case AArch64::CPYERTN:
5715 case AArch64::CPYET:
5716 case AArch64::CPYETWN:
5717 case AArch64::CPYETRN:
5718 case AArch64::CPYETN: {
5719 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5720 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5721 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5722 MCRegister Xd = Inst.getOperand(3).getReg();
5723 MCRegister Xs = Inst.getOperand(4).getReg();
5724 MCRegister Xn = Inst.getOperand(5).getReg();
5725 if (Xd_wb != Xd)
5726 return Error(Loc[0],
5727 "invalid CPY instruction, Xd_wb and Xd do not match");
5728 if (Xs_wb != Xs)
5729 return Error(Loc[0],
5730 "invalid CPY instruction, Xs_wb and Xs do not match");
5731 if (Xn_wb != Xn)
5732 return Error(Loc[0],
5733 "invalid CPY instruction, Xn_wb and Xn do not match");
5734 if (Xd == Xs)
5735 return Error(Loc[0], "invalid CPY instruction, destination and source"
5736 " registers are the same");
5737 if (Xd == Xn)
5738 return Error(Loc[0], "invalid CPY instruction, destination and size"
5739 " registers are the same");
5740 if (Xs == Xn)
5741 return Error(Loc[0], "invalid CPY instruction, source and size"
5742 " registers are the same");
5743 break;
5744 }
5745 case AArch64::SETP:
5746 case AArch64::SETPT:
5747 case AArch64::SETPN:
5748 case AArch64::SETPTN:
5749 case AArch64::SETM:
5750 case AArch64::SETMT:
5751 case AArch64::SETMN:
5752 case AArch64::SETMTN:
5753 case AArch64::SETE:
5754 case AArch64::SETET:
5755 case AArch64::SETEN:
5756 case AArch64::SETETN:
5757 case AArch64::SETGP:
5758 case AArch64::SETGPT:
5759 case AArch64::SETGPN:
5760 case AArch64::SETGPTN:
5761 case AArch64::SETGM:
5762 case AArch64::SETGMT:
5763 case AArch64::SETGMN:
5764 case AArch64::SETGMTN:
5765 case AArch64::MOPSSETGE:
5766 case AArch64::MOPSSETGET:
5767 case AArch64::MOPSSETGEN:
5768 case AArch64::MOPSSETGETN: {
5769 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5770 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5771 MCRegister Xd = Inst.getOperand(2).getReg();
5772 MCRegister Xn = Inst.getOperand(3).getReg();
5773 MCRegister Xm = Inst.getOperand(4).getReg();
5774 if (Xd_wb != Xd)
5775 return Error(Loc[0],
5776 "invalid SET instruction, Xd_wb and Xd do not match");
5777 if (Xn_wb != Xn)
5778 return Error(Loc[0],
5779 "invalid SET instruction, Xn_wb and Xn do not match");
5780 if (Xd == Xn)
5781 return Error(Loc[0], "invalid SET instruction, destination and size"
5782 " registers are the same");
5783 if (Xd == Xm)
5784 return Error(Loc[0], "invalid SET instruction, destination and source"
5785 " registers are the same");
5786 if (Xn == Xm)
5787 return Error(Loc[0], "invalid SET instruction, source and size"
5788 " registers are the same");
5789 break;
5790 }
5791 }
5792
5793 // Now check immediate ranges. Separate from the above as there is overlap
5794 // in the instructions being checked and this keeps the nested conditionals
5795 // to a minimum.
5796 switch (Inst.getOpcode()) {
5797 case AArch64::ADDSWri:
5798 case AArch64::ADDSXri:
5799 case AArch64::ADDWri:
5800 case AArch64::ADDXri:
5801 case AArch64::SUBSWri:
5802 case AArch64::SUBSXri:
5803 case AArch64::SUBWri:
5804 case AArch64::SUBXri: {
5805 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5806 // some slight duplication here.
5807 if (Inst.getOperand(2).isExpr()) {
5808 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5809 AArch64MCExpr::VariantKind ELFRefKind;
5810 MCSymbolRefExpr::VariantKind DarwinRefKind;
5811 int64_t Addend;
5812 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5813
5814 // Only allow these with ADDXri.
5815 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5816 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5817 Inst.getOpcode() == AArch64::ADDXri)
5818 return false;
5819
5820 // Only allow these with ADDXri/ADDWri
5821 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5822 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
5823 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5824 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5825 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5826 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5827 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5828 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5829 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5831 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5832 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5833 (Inst.getOpcode() == AArch64::ADDXri ||
5834 Inst.getOpcode() == AArch64::ADDWri))
5835 return false;
5836
5837 // Don't allow symbol refs in the immediate field otherwise
5838 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5839 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5840 // 'cmp w0, 'borked')
5841 return Error(Loc.back(), "invalid immediate expression");
5842 }
5843 // We don't validate more complex expressions here
5844 }
5845 return false;
5846 }
5847 default:
5848 return false;
5849 }
5850}
5851
5853 const FeatureBitset &FBS,
5854 unsigned VariantID = 0);
5855
5856bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5859 switch (ErrCode) {
5860 case Match_InvalidTiedOperand: {
5861 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5862 if (Op.isVectorList())
5863 return Error(Loc, "operand must match destination register list");
5864
5865 assert(Op.isReg() && "Unexpected operand type");
5866 switch (Op.getRegEqualityTy()) {
5867 case RegConstraintEqualityTy::EqualsSubReg:
5868 return Error(Loc, "operand must be 64-bit form of destination register");
5869 case RegConstraintEqualityTy::EqualsSuperReg:
5870 return Error(Loc, "operand must be 32-bit form of destination register");
5871 case RegConstraintEqualityTy::EqualsReg:
5872 return Error(Loc, "operand must match destination register");
5873 }
5874 llvm_unreachable("Unknown RegConstraintEqualityTy");
5875 }
5876 case Match_MissingFeature:
5877 return Error(Loc,
5878 "instruction requires a CPU feature not currently enabled");
5879 case Match_InvalidOperand:
5880 return Error(Loc, "invalid operand for instruction");
5881 case Match_InvalidSuffix:
5882 return Error(Loc, "invalid type suffix for instruction");
5883 case Match_InvalidCondCode:
5884 return Error(Loc, "expected AArch64 condition code");
5885 case Match_AddSubRegExtendSmall:
5886 return Error(Loc,
5887 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5888 case Match_AddSubRegExtendLarge:
5889 return Error(Loc,
5890 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5891 case Match_AddSubSecondSource:
5892 return Error(Loc,
5893 "expected compatible register, symbol or integer in range [0, 4095]");
5894 case Match_LogicalSecondSource:
5895 return Error(Loc, "expected compatible register or logical immediate");
5896 case Match_InvalidMovImm32Shift:
5897 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5898 case Match_InvalidMovImm64Shift:
5899 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5900 case Match_AddSubRegShift32:
5901 return Error(Loc,
5902 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5903 case Match_AddSubRegShift64:
5904 return Error(Loc,
5905 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5906 case Match_InvalidFPImm:
5907 return Error(Loc,
5908 "expected compatible register or floating-point constant");
5909 case Match_InvalidMemoryIndexedSImm6:
5910 return Error(Loc, "index must be an integer in range [-32, 31].");
5911 case Match_InvalidMemoryIndexedSImm5:
5912 return Error(Loc, "index must be an integer in range [-16, 15].");
5913 case Match_InvalidMemoryIndexed1SImm4:
5914 return Error(Loc, "index must be an integer in range [-8, 7].");
5915 case Match_InvalidMemoryIndexed2SImm4:
5916 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5917 case Match_InvalidMemoryIndexed3SImm4:
5918 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5919 case Match_InvalidMemoryIndexed4SImm4:
5920 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5921 case Match_InvalidMemoryIndexed16SImm4:
5922 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5923 case Match_InvalidMemoryIndexed32SImm4:
5924 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5925 case Match_InvalidMemoryIndexed1SImm6:
5926 return Error(Loc, "index must be an integer in range [-32, 31].");
5927 case Match_InvalidMemoryIndexedSImm8:
5928 return Error(Loc, "index must be an integer in range [-128, 127].");
5929 case Match_InvalidMemoryIndexedSImm9:
5930 return Error(Loc, "index must be an integer in range [-256, 255].");
5931 case Match_InvalidMemoryIndexed16SImm9:
5932 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5933 case Match_InvalidMemoryIndexed8SImm10:
5934 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5935 case Match_InvalidMemoryIndexed4SImm7:
5936 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5937 case Match_InvalidMemoryIndexed8SImm7:
5938 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5939 case Match_InvalidMemoryIndexed16SImm7:
5940 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5941 case Match_InvalidMemoryIndexed8UImm5:
5942 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5943 case Match_InvalidMemoryIndexed8UImm3:
5944 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5945 case Match_InvalidMemoryIndexed4UImm5:
5946 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5947 case Match_InvalidMemoryIndexed2UImm5:
5948 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5949 case Match_InvalidMemoryIndexed8UImm6:
5950 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5951 case Match_InvalidMemoryIndexed16UImm6:
5952 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5953 case Match_InvalidMemoryIndexed4UImm6:
5954 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5955 case Match_InvalidMemoryIndexed2UImm6:
5956 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5957 case Match_InvalidMemoryIndexed1UImm6:
5958 return Error(Loc, "index must be in range [0, 63].");
5959 case Match_InvalidMemoryWExtend8:
5960 return Error(Loc,
5961 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5962 case Match_InvalidMemoryWExtend16:
5963 return Error(Loc,
5964 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5965 case Match_InvalidMemoryWExtend32:
5966 return Error(Loc,
5967 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5968 case Match_InvalidMemoryWExtend64:
5969 return Error(Loc,
5970 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5971 case Match_InvalidMemoryWExtend128:
5972 return Error(Loc,
5973 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5974 case Match_InvalidMemoryXExtend8:
5975 return Error(Loc,
5976 "expected 'lsl' or 'sxtx' with optional shift of #0");
5977 case Match_InvalidMemoryXExtend16:
5978 return Error(Loc,
5979 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5980 case Match_InvalidMemoryXExtend32:
5981 return Error(Loc,
5982 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5983 case Match_InvalidMemoryXExtend64:
5984 return Error(Loc,
5985 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5986 case Match_InvalidMemoryXExtend128:
5987 return Error(Loc,
5988 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5989 case Match_InvalidMemoryIndexed1:
5990 return Error(Loc, "index must be an integer in range [0, 4095].");
5991 case Match_InvalidMemoryIndexed2:
5992 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5993 case Match_InvalidMemoryIndexed4:
5994 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5995 case Match_InvalidMemoryIndexed8:
5996 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5997 case Match_InvalidMemoryIndexed16:
5998 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5999 case Match_InvalidImm0_0:
6000 return Error(Loc, "immediate must be 0.");
6001 case Match_InvalidImm0_1:
6002 return Error(Loc, "immediate must be an integer in range [0, 1].");
6003 case Match_InvalidImm0_3:
6004 return Error(Loc, "immediate must be an integer in range [0, 3].");
6005 case Match_InvalidImm0_7:
6006 return Error(Loc, "immediate must be an integer in range [0, 7].");
6007 case Match_InvalidImm0_15:
6008 return Error(Loc, "immediate must be an integer in range [0, 15].");
6009 case Match_InvalidImm0_31:
6010 return Error(Loc, "immediate must be an integer in range [0, 31].");
6011 case Match_InvalidImm0_63:
6012 return Error(Loc, "immediate must be an integer in range [0, 63].");
6013 case Match_InvalidImm0_127:
6014 return Error(Loc, "immediate must be an integer in range [0, 127].");
6015 case Match_InvalidImm0_255:
6016 return Error(Loc, "immediate must be an integer in range [0, 255].");
6017 case Match_InvalidImm0_65535:
6018 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6019 case Match_InvalidImm1_8:
6020 return Error(Loc, "immediate must be an integer in range [1, 8].");
6021 case Match_InvalidImm1_16:
6022 return Error(Loc, "immediate must be an integer in range [1, 16].");
6023 case Match_InvalidImm1_32:
6024 return Error(Loc, "immediate must be an integer in range [1, 32].");
6025 case Match_InvalidImm1_64:
6026 return Error(Loc, "immediate must be an integer in range [1, 64].");
6027 case Match_InvalidImmM1_62:
6028 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6029 case Match_InvalidMemoryIndexedRange2UImm0:
6030 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6031 case Match_InvalidMemoryIndexedRange2UImm1:
6032 return Error(Loc, "vector select offset must be an immediate range of the "
6033 "form <immf>:<imml>, where the first "
6034 "immediate is a multiple of 2 in the range [0, 2], and "
6035 "the second immediate is immf + 1.");
6036 case Match_InvalidMemoryIndexedRange2UImm2:
6037 case Match_InvalidMemoryIndexedRange2UImm3:
6038 return Error(
6039 Loc,
6040 "vector select offset must be an immediate range of the form "
6041 "<immf>:<imml>, "
6042 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6043 "[0, 14] "
6044 "depending on the instruction, and the second immediate is immf + 1.");
6045 case Match_InvalidMemoryIndexedRange4UImm0:
6046 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6047 case Match_InvalidMemoryIndexedRange4UImm1:
6048 case Match_InvalidMemoryIndexedRange4UImm2:
6049 return Error(
6050 Loc,
6051 "vector select offset must be an immediate range of the form "
6052 "<immf>:<imml>, "
6053 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6054 "[0, 12] "
6055 "depending on the instruction, and the second immediate is immf + 3.");
6056 case Match_InvalidSVEAddSubImm8:
6057 return Error(Loc, "immediate must be an integer in range [0, 255]"
6058 " with a shift amount of 0");
6059 case Match_InvalidSVEAddSubImm16:
6060 case Match_InvalidSVEAddSubImm32:
6061 case Match_InvalidSVEAddSubImm64:
6062 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6063 "multiple of 256 in range [256, 65280]");
6064 case Match_InvalidSVECpyImm8:
6065 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6066 " with a shift amount of 0");
6067 case Match_InvalidSVECpyImm16:
6068 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6069 "multiple of 256 in range [-32768, 65280]");
6070 case Match_InvalidSVECpyImm32:
6071 case Match_InvalidSVECpyImm64:
6072 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6073 "multiple of 256 in range [-32768, 32512]");
6074 case Match_InvalidIndexRange0_0:
6075 return Error(Loc, "expected lane specifier '[0]'");
6076 case Match_InvalidIndexRange1_1:
6077 return Error(Loc, "expected lane specifier '[1]'");
6078 case Match_InvalidIndexRange0_15:
6079 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6080 case Match_InvalidIndexRange0_7:
6081 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6082 case Match_InvalidIndexRange0_3:
6083 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6084 case Match_InvalidIndexRange0_1:
6085 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6086 case Match_InvalidSVEIndexRange0_63:
6087 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6088 case Match_InvalidSVEIndexRange0_31:
6089 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6090 case Match_InvalidSVEIndexRange0_15:
6091 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6092 case Match_InvalidSVEIndexRange0_7:
6093 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6094 case Match_InvalidSVEIndexRange0_3:
6095 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6096 case Match_InvalidLabel:
6097 return Error(Loc, "expected label or encodable integer pc offset");
6098 case Match_MRS:
6099 return Error(Loc, "expected readable system register");
6100 case Match_MSR:
6101 case Match_InvalidSVCR:
6102 return Error(Loc, "expected writable system register or pstate");
6103 case Match_InvalidComplexRotationEven:
6104 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6105 case Match_InvalidComplexRotationOdd:
6106 return Error(Loc, "complex rotation must be 90 or 270.");
6107 case Match_MnemonicFail: {
6108 std::string Suggestion = AArch64MnemonicSpellCheck(
6109 ((AArch64Operand &)*Operands[0]).getToken(),
6110 ComputeAvailableFeatures(STI->getFeatureBits()));
6111 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6112 }
6113 case Match_InvalidGPR64shifted8:
6114 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6115 case Match_InvalidGPR64shifted16:
6116 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6117 case Match_InvalidGPR64shifted32:
6118 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6119 case Match_InvalidGPR64shifted64:
6120 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6121 case Match_InvalidGPR64shifted128:
6122 return Error(
6123 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6124 case Match_InvalidGPR64NoXZRshifted8:
6125 return Error(Loc, "register must be x0..x30 without shift");
6126 case Match_InvalidGPR64NoXZRshifted16:
6127 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6128 case Match_InvalidGPR64NoXZRshifted32:
6129 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6130 case Match_InvalidGPR64NoXZRshifted64:
6131 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6132 case Match_InvalidGPR64NoXZRshifted128:
6133 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6134 case Match_InvalidZPR32UXTW8:
6135 case Match_InvalidZPR32SXTW8:
6136 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6137 case Match_InvalidZPR32UXTW16:
6138 case Match_InvalidZPR32SXTW16:
6139 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6140 case Match_InvalidZPR32UXTW32:
6141 case Match_InvalidZPR32SXTW32:
6142 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6143 case Match_InvalidZPR32UXTW64:
6144 case Match_InvalidZPR32SXTW64:
6145 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6146 case Match_InvalidZPR64UXTW8:
6147 case Match_InvalidZPR64SXTW8:
6148 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6149 case Match_InvalidZPR64UXTW16:
6150 case Match_InvalidZPR64SXTW16:
6151 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6152 case Match_InvalidZPR64UXTW32:
6153 case Match_InvalidZPR64SXTW32:
6154 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6155 case Match_InvalidZPR64UXTW64:
6156 case Match_InvalidZPR64SXTW64:
6157 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6158 case Match_InvalidZPR32LSL8:
6159 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6160 case Match_InvalidZPR32LSL16:
6161 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6162 case Match_InvalidZPR32LSL32:
6163 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6164 case Match_InvalidZPR32LSL64:
6165 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6166 case Match_InvalidZPR64LSL8:
6167 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6168 case Match_InvalidZPR64LSL16:
6169 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6170 case Match_InvalidZPR64LSL32:
6171 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6172 case Match_InvalidZPR64LSL64:
6173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6174 case Match_InvalidZPR0:
6175 return Error(Loc, "expected register without element width suffix");
6176 case Match_InvalidZPR8:
6177 case Match_InvalidZPR16:
6178 case Match_InvalidZPR32:
6179 case Match_InvalidZPR64:
6180 case Match_InvalidZPR128:
6181 return Error(Loc, "invalid element width");
6182 case Match_InvalidZPR_3b8:
6183 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6184 case Match_InvalidZPR_3b16:
6185 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6186 case Match_InvalidZPR_3b32:
6187 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6188 case Match_InvalidZPR_4b8:
6189 return Error(Loc,
6190 "Invalid restricted vector register, expected z0.b..z15.b");
6191 case Match_InvalidZPR_4b16:
6192 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6193 case Match_InvalidZPR_4b32:
6194 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6195 case Match_InvalidZPR_4b64:
6196 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6197 case Match_InvalidZPRMul2_Lo8:
6198 return Error(Loc, "Invalid restricted vector register, expected even "
6199 "register in z0.b..z14.b");
6200 case Match_InvalidZPRMul2_Hi8:
6201 return Error(Loc, "Invalid restricted vector register, expected even "
6202 "register in z16.b..z30.b");
6203 case Match_InvalidZPRMul2_Lo16:
6204 return Error(Loc, "Invalid restricted vector register, expected even "
6205 "register in z0.h..z14.h");
6206 case Match_InvalidZPRMul2_Hi16:
6207 return Error(Loc, "Invalid restricted vector register, expected even "
6208 "register in z16.h..z30.h");
6209 case Match_InvalidZPRMul2_Lo32:
6210 return Error(Loc, "Invalid restricted vector register, expected even "
6211 "register in z0.s..z14.s");
6212 case Match_InvalidZPRMul2_Hi32:
6213 return Error(Loc, "Invalid restricted vector register, expected even "
6214 "register in z16.s..z30.s");
6215 case Match_InvalidZPRMul2_Lo64:
6216 return Error(Loc, "Invalid restricted vector register, expected even "
6217 "register in z0.d..z14.d");
6218 case Match_InvalidZPRMul2_Hi64:
6219 return Error(Loc, "Invalid restricted vector register, expected even "
6220 "register in z16.d..z30.d");
6221 case Match_InvalidZPR_K0:
6222 return Error(Loc, "invalid restricted vector register, expected register "
6223 "in z20..z23 or z28..z31");
6224 case Match_InvalidSVEPattern:
6225 return Error(Loc, "invalid predicate pattern");
6226 case Match_InvalidSVEPPRorPNRAnyReg:
6227 case Match_InvalidSVEPPRorPNRBReg:
6228 case Match_InvalidSVEPredicateAnyReg:
6229 case Match_InvalidSVEPredicateBReg:
6230 case Match_InvalidSVEPredicateHReg:
6231 case Match_InvalidSVEPredicateSReg:
6232 case Match_InvalidSVEPredicateDReg:
6233 return Error(Loc, "invalid predicate register.");
6234 case Match_InvalidSVEPredicate3bAnyReg:
6235 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6236 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6237 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6238 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6239 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6240 return Error(Loc, "Invalid predicate register, expected PN in range "
6241 "pn8..pn15 with element suffix.");
6242 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6243 return Error(Loc, "invalid restricted predicate-as-counter register "
6244 "expected pn8..pn15");
6245 case Match_InvalidSVEPNPredicateBReg:
6246 case Match_InvalidSVEPNPredicateHReg:
6247 case Match_InvalidSVEPNPredicateSReg:
6248 case Match_InvalidSVEPNPredicateDReg:
6249 return Error(Loc, "Invalid predicate register, expected PN in range "
6250 "pn0..pn15 with element suffix.");
6251 case Match_InvalidSVEVecLenSpecifier:
6252 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6253 case Match_InvalidSVEPredicateListMul2x8:
6254 case Match_InvalidSVEPredicateListMul2x16:
6255 case Match_InvalidSVEPredicateListMul2x32:
6256 case Match_InvalidSVEPredicateListMul2x64:
6257 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6258 "predicate registers, where the first vector is a multiple of 2 "
6259 "and with correct element type");
6260 case Match_InvalidSVEExactFPImmOperandHalfOne:
6261 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6262 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6263 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6264 case Match_InvalidSVEExactFPImmOperandZeroOne:
6265 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6266 case Match_InvalidMatrixTileVectorH8:
6267 case Match_InvalidMatrixTileVectorV8:
6268 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6269 case Match_InvalidMatrixTileVectorH16:
6270 case Match_InvalidMatrixTileVectorV16:
6271 return Error(Loc,
6272 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6273 case Match_InvalidMatrixTileVectorH32:
6274 case Match_InvalidMatrixTileVectorV32:
6275 return Error(Loc,
6276 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6277 case Match_InvalidMatrixTileVectorH64:
6278 case Match_InvalidMatrixTileVectorV64:
6279 return Error(Loc,
6280 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6281 case Match_InvalidMatrixTileVectorH128:
6282 case Match_InvalidMatrixTileVectorV128:
6283 return Error(Loc,
6284 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6285 case Match_InvalidMatrixTile16:
6286 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6287 case Match_InvalidMatrixTile32:
6288 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6289 case Match_InvalidMatrixTile64:
6290 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6291 case Match_InvalidMatrix:
6292 return Error(Loc, "invalid matrix operand, expected za");
6293 case Match_InvalidMatrix8:
6294 return Error(Loc, "invalid matrix operand, expected suffix .b");
6295 case Match_InvalidMatrix16:
6296 return Error(Loc, "invalid matrix operand, expected suffix .h");
6297 case Match_InvalidMatrix32:
6298 return Error(Loc, "invalid matrix operand, expected suffix .s");
6299 case Match_InvalidMatrix64:
6300 return Error(Loc, "invalid matrix operand, expected suffix .d");
6301 case Match_InvalidMatrixIndexGPR32_12_15:
6302 return Error(Loc, "operand must be a register in range [w12, w15]");
6303 case Match_InvalidMatrixIndexGPR32_8_11:
6304 return Error(Loc, "operand must be a register in range [w8, w11]");
6305 case Match_InvalidSVEVectorList2x8Mul2:
6306 case Match_InvalidSVEVectorList2x16Mul2:
6307 case Match_InvalidSVEVectorList2x32Mul2:
6308 case Match_InvalidSVEVectorList2x64Mul2:
6309 case Match_InvalidSVEVectorList2x128Mul2:
6310 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6311 "SVE vectors, where the first vector is a multiple of 2 "
6312 "and with matching element types");
6313 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6314 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6315 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6316 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6317 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6318 "SVE vectors in the range z0-z14, where the first vector "
6319 "is a multiple of 2 "
6320 "and with matching element types");
6321 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6322 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6323 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6324 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6325 return Error(Loc,
6326 "Invalid vector list, expected list with 2 consecutive "
6327 "SVE vectors in the range z16-z30, where the first vector "
6328 "is a multiple of 2 "
6329 "and with matching element types");
6330 case Match_InvalidSVEVectorList4x8Mul4:
6331 case Match_InvalidSVEVectorList4x16Mul4:
6332 case Match_InvalidSVEVectorList4x32Mul4:
6333 case Match_InvalidSVEVectorList4x64Mul4:
6334 case Match_InvalidSVEVectorList4x128Mul4:
6335 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6336 "SVE vectors, where the first vector is a multiple of 4 "
6337 "and with matching element types");
6338 case Match_InvalidLookupTable:
6339 return Error(Loc, "Invalid lookup table, expected zt0");
6340 case Match_InvalidSVEVectorListStrided2x8:
6341 case Match_InvalidSVEVectorListStrided2x16:
6342 case Match_InvalidSVEVectorListStrided2x32:
6343 case Match_InvalidSVEVectorListStrided2x64:
6344 return Error(
6345 Loc,
6346 "Invalid vector list, expected list with each SVE vector in the list "
6347 "8 registers apart, and the first register in the range [z0, z7] or "
6348 "[z16, z23] and with correct element type");
6349 case Match_InvalidSVEVectorListStrided4x8:
6350 case Match_InvalidSVEVectorListStrided4x16:
6351 case Match_InvalidSVEVectorListStrided4x32:
6352 case Match_InvalidSVEVectorListStrided4x64:
6353 return Error(
6354 Loc,
6355 "Invalid vector list, expected list with each SVE vector in the list "
6356 "4 registers apart, and the first register in the range [z0, z3] or "
6357 "[z16, z19] and with correct element type");
6358 case Match_AddSubLSLImm3ShiftLarge:
6359 return Error(Loc,
6360 "expected 'lsl' with optional integer in range [0, 7]");
6361 default:
6362 llvm_unreachable("unexpected error code!");
6363 }
6364}
6365
6366static const char *getSubtargetFeatureName(uint64_t Val);
6367
6368bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6370 MCStreamer &Out,
6372 bool MatchingInlineAsm) {
6373 assert(!Operands.empty() && "Unexpect empty operand list!");
6374 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6375 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6376
6377 StringRef Tok = Op.getToken();
6378 unsigned NumOperands = Operands.size();
6379
6380 if (NumOperands == 4 && Tok == "lsl") {
6381 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6382 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6383 if (Op2.isScalarReg() && Op3.isImm()) {
6384 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6385 if (Op3CE) {
6386 uint64_t Op3Val = Op3CE->getValue();
6387 uint64_t NewOp3Val = 0;
6388 uint64_t NewOp4Val = 0;
6389 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6390 Op2.getReg())) {
6391 NewOp3Val = (32 - Op3Val) & 0x1f;
6392 NewOp4Val = 31 - Op3Val;
6393 } else {
6394 NewOp3Val = (64 - Op3Val) & 0x3f;
6395 NewOp4Val = 63 - Op3Val;
6396 }
6397
6398 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6399 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6400
6401 Operands[0] =
6402 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6403 Operands.push_back(AArch64Operand::CreateImm(
6404 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6405 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6406 Op3.getEndLoc(), getContext());
6407 }
6408 }
6409 } else if (NumOperands == 4 && Tok == "bfc") {
6410 // FIXME: Horrible hack to handle BFC->BFM alias.
6411 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6412 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6413 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6414
6415 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6416 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6417 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6418
6419 if (LSBCE && WidthCE) {
6420 uint64_t LSB = LSBCE->getValue();
6421 uint64_t Width = WidthCE->getValue();
6422
6423 uint64_t RegWidth = 0;
6424 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6425 Op1.getReg()))
6426 RegWidth = 64;
6427 else
6428 RegWidth = 32;
6429
6430 if (LSB >= RegWidth)
6431 return Error(LSBOp.getStartLoc(),
6432 "expected integer in range [0, 31]");
6433 if (Width < 1 || Width > RegWidth)
6434 return Error(WidthOp.getStartLoc(),
6435 "expected integer in range [1, 32]");
6436
6437 uint64_t ImmR = 0;
6438 if (RegWidth == 32)
6439 ImmR = (32 - LSB) & 0x1f;
6440 else
6441 ImmR = (64 - LSB) & 0x3f;
6442
6443 uint64_t ImmS = Width - 1;
6444
6445 if (ImmR != 0 && ImmS >= ImmR)
6446 return Error(WidthOp.getStartLoc(),
6447 "requested insert overflows register");
6448
6449 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6450 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6451 Operands[0] =
6452 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6453 Operands[2] = AArch64Operand::CreateReg(
6454 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6455 SMLoc(), SMLoc(), getContext());
6456 Operands[3] = AArch64Operand::CreateImm(
6457 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6458 Operands.emplace_back(
6459 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6460 WidthOp.getEndLoc(), getContext()));
6461 }
6462 }
6463 } else if (NumOperands == 5) {
6464 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6465 // UBFIZ -> UBFM aliases.
6466 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6467 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6468 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6469 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6470
6471 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6472 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6473 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6474
6475 if (Op3CE && Op4CE) {
6476 uint64_t Op3Val = Op3CE->getValue();
6477 uint64_t Op4Val = Op4CE->getValue();
6478
6479 uint64_t RegWidth = 0;
6480 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6481 Op1.getReg()))
6482 RegWidth = 64;
6483 else
6484 RegWidth = 32;
6485
6486 if (Op3Val >= RegWidth)
6487 return Error(Op3.getStartLoc(),
6488 "expected integer in range [0, 31]");
6489 if (Op4Val < 1 || Op4Val > RegWidth)
6490 return Error(Op4.getStartLoc(),
6491 "expected integer in range [1, 32]");
6492
6493 uint64_t NewOp3Val = 0;
6494 if (RegWidth == 32)
6495 NewOp3Val = (32 - Op3Val) & 0x1f;
6496 else
6497 NewOp3Val = (64 - Op3Val) & 0x3f;
6498
6499 uint64_t NewOp4Val = Op4Val - 1;
6500
6501 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6502 return Error(Op4.getStartLoc(),
6503 "requested insert overflows register");
6504
6505 const MCExpr *NewOp3 =
6506 MCConstantExpr::create(NewOp3Val, getContext());
6507 const MCExpr *NewOp4 =
6508 MCConstantExpr::create(NewOp4Val, getContext());
6509 Operands[3] = AArch64Operand::CreateImm(
6510 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6511 Operands[4] = AArch64Operand::CreateImm(
6512 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6513 if (Tok == "bfi")
6514 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6515 getContext());
6516 else if (Tok == "sbfiz")
6517 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6518 getContext());
6519 else if (Tok == "ubfiz")
6520 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6521 getContext());
6522 else
6523 llvm_unreachable("No valid mnemonic for alias?");
6524 }
6525 }
6526
6527 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6528 // UBFX -> UBFM aliases.
6529 } else if (NumOperands == 5 &&
6530 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6531 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6532 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6533 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6534
6535 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6536 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6537 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6538
6539 if (Op3CE && Op4CE) {
6540 uint64_t Op3Val = Op3CE->getValue();
6541 uint64_t Op4Val = Op4CE->getValue();
6542
6543 uint64_t RegWidth = 0;
6544 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6545 Op1.getReg()))
6546 RegWidth = 64;
6547 else
6548 RegWidth = 32;
6549
6550 if (Op3Val >= RegWidth)
6551 return Error(Op3.getStartLoc(),
6552 "expected integer in range [0, 31]");
6553 if (Op4Val < 1 || Op4Val > RegWidth)
6554 return Error(Op4.getStartLoc(),
6555 "expected integer in range [1, 32]");
6556
6557 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6558
6559 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6560 return Error(Op4.getStartLoc(),
6561 "requested extract overflows register");
6562
6563 const MCExpr *NewOp4 =
6564 MCConstantExpr::create(NewOp4Val, getContext());
6565 Operands[4] = AArch64Operand::CreateImm(
6566 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6567 if (Tok == "bfxil")
6568 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6569 getContext());
6570 else if (Tok == "sbfx")
6571 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6572 getContext());
6573 else if (Tok == "ubfx")
6574 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6575 getContext());
6576 else
6577 llvm_unreachable("No valid mnemonic for alias?");
6578 }
6579 }
6580 }
6581 }
6582
6583 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6584 // instruction for FP registers correctly in some rare circumstances. Convert
6585 // it to a safe instruction and warn (because silently changing someone's
6586 // assembly is rude).
6587 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6588 NumOperands == 4 && Tok == "movi") {
6589 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6590 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6591 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6592 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6593 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6594 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6595 if (Suffix.lower() == ".2d" &&
6596 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6597 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6598 " correctly on this CPU, converting to equivalent movi.16b");
6599 // Switch the suffix to .16b.
6600 unsigned Idx = Op1.isToken() ? 1 : 2;
6601 Operands[Idx] =
6602 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6603 }
6604 }
6605 }
6606
6607 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6608 // InstAlias can't quite handle this since the reg classes aren't
6609 // subclasses.
6610 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6611 // The source register can be Wn here, but the matcher expects a
6612 // GPR64. Twiddle it here if necessary.
6613 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6614 if (Op.isScalarReg()) {
6615 MCRegister Reg = getXRegFromWReg(Op.getReg());
6616 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6617 Op.getStartLoc(), Op.getEndLoc(),
6618 getContext());
6619 }
6620 }
6621 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6622 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6623 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6624 if (Op.isScalarReg() &&
6625 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6626 Op.getReg())) {
6627 // The source register can be Wn here, but the matcher expects a
6628 // GPR64. Twiddle it here if necessary.
6629 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6630 if (Op.isScalarReg()) {
6631 MCRegister Reg = getXRegFromWReg(Op.getReg());
6632 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6633 Op.getStartLoc(),
6634 Op.getEndLoc(), getContext());
6635 }
6636 }
6637 }
6638 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6639 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6640 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6641 if (Op.isScalarReg() &&
6642 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6643 Op.getReg())) {
6644 // The source register can be Wn here, but the matcher expects a
6645 // GPR32. Twiddle it here if necessary.
6646 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6647 if (Op.isScalarReg()) {
6648 MCRegister Reg = getWRegFromXReg(Op.getReg());
6649 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6650 Op.getStartLoc(),
6651 Op.getEndLoc(), getContext());
6652 }
6653 }
6654 }
6655
6656 MCInst Inst;
6657 FeatureBitset MissingFeatures;
6658 // First try to match against the secondary set of tables containing the
6659 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6660 unsigned MatchResult =
6661 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6662 MatchingInlineAsm, 1);
6663
6664 // If that fails, try against the alternate table containing long-form NEON:
6665 // "fadd v0.2s, v1.2s, v2.2s"
6666 if (MatchResult != Match_Success) {
6667 // But first, save the short-form match result: we can use it in case the
6668 // long-form match also fails.
6669 auto ShortFormNEONErrorInfo = ErrorInfo;
6670 auto ShortFormNEONMatchResult = MatchResult;
6671 auto ShortFormNEONMissingFeatures = MissingFeatures;
6672
6673 MatchResult =
6674 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6675 MatchingInlineAsm, 0);
6676
6677 // Now, both matches failed, and the long-form match failed on the mnemonic
6678 // suffix token operand. The short-form match failure is probably more
6679 // relevant: use it instead.
6680 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6681 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6682 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6683 MatchResult = ShortFormNEONMatchResult;
6684 ErrorInfo = ShortFormNEONErrorInfo;
6685 MissingFeatures = ShortFormNEONMissingFeatures;
6686 }
6687 }
6688
6689 switch (MatchResult) {
6690 case Match_Success: {
6691 // Perform range checking and other semantic validations
6692 SmallVector<SMLoc, 8> OperandLocs;
6693 NumOperands = Operands.size();
6694 for (unsigned i = 1; i < NumOperands; ++i)
6695 OperandLocs.push_back(Operands[i]->getStartLoc());
6696 if (validateInstruction(Inst, IDLoc, OperandLocs))
6697 return true;
6698
6699 Inst.setLoc(IDLoc);
6700 Out.emitInstruction(Inst, getSTI());
6701 return false;
6702 }
6703 case Match_MissingFeature: {
6704 assert(MissingFeatures.any() && "Unknown missing feature!");
6705 // Special case the error message for the very common case where only
6706 // a single subtarget feature is missing (neon, e.g.).
6707 std::string Msg = "instruction requires:";
6708 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6709 if (MissingFeatures[i]) {
6710 Msg += " ";
6711 Msg += getSubtargetFeatureName(i);
6712 }
6713 }
6714 return Error(IDLoc, Msg);
6715 }
6716 case Match_MnemonicFail:
6717 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6718 case Match_InvalidOperand: {
6719 SMLoc ErrorLoc = IDLoc;
6720
6721 if (ErrorInfo != ~0ULL) {
6722 if (ErrorInfo >= Operands.size())
6723 return Error(IDLoc, "too few operands for instruction",
6724 SMRange(IDLoc, getTok().getLoc()));
6725
6726 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6727 if (ErrorLoc == SMLoc())
6728 ErrorLoc = IDLoc;
6729 }
6730 // If the match failed on a suffix token operand, tweak the diagnostic
6731 // accordingly.
6732 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6733 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6734 MatchResult = Match_InvalidSuffix;
6735
6736 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6737 }
6738 case Match_InvalidTiedOperand:
6739 case Match_InvalidMemoryIndexed1:
6740 case Match_InvalidMemoryIndexed2:
6741 case Match_InvalidMemoryIndexed4:
6742 case Match_InvalidMemoryIndexed8:
6743 case Match_InvalidMemoryIndexed16:
6744 case Match_InvalidCondCode:
6745 case Match_AddSubLSLImm3ShiftLarge:
6746 case Match_AddSubRegExtendSmall:
6747 case Match_AddSubRegExtendLarge:
6748 case Match_AddSubSecondSource:
6749 case Match_LogicalSecondSource:
6750 case Match_AddSubRegShift32:
6751 case Match_AddSubRegShift64:
6752 case Match_InvalidMovImm32Shift:
6753 case Match_InvalidMovImm64Shift:
6754 case Match_InvalidFPImm:
6755 case Match_InvalidMemoryWExtend8:
6756 case Match_InvalidMemoryWExtend16:
6757 case Match_InvalidMemoryWExtend32:
6758 case Match_InvalidMemoryWExtend64:
6759 case Match_InvalidMemoryWExtend128:
6760 case Match_InvalidMemoryXExtend8:
6761 case Match_InvalidMemoryXExtend16:
6762 case Match_InvalidMemoryXExtend32:
6763 case Match_InvalidMemoryXExtend64:
6764 case Match_InvalidMemoryXExtend128:
6765 case Match_InvalidMemoryIndexed1SImm4:
6766 case Match_InvalidMemoryIndexed2SImm4:
6767 case Match_InvalidMemoryIndexed3SImm4:
6768 case Match_InvalidMemoryIndexed4SImm4:
6769 case Match_InvalidMemoryIndexed1SImm6:
6770 case Match_InvalidMemoryIndexed16SImm4:
6771 case Match_InvalidMemoryIndexed32SImm4:
6772 case Match_InvalidMemoryIndexed4SImm7:
6773 case Match_InvalidMemoryIndexed8SImm7:
6774 case Match_InvalidMemoryIndexed16SImm7:
6775 case Match_InvalidMemoryIndexed8UImm5:
6776 case Match_InvalidMemoryIndexed8UImm3:
6777 case Match_InvalidMemoryIndexed4UImm5:
6778 case Match_InvalidMemoryIndexed2UImm5:
6779 case Match_InvalidMemoryIndexed1UImm6:
6780 case Match_InvalidMemoryIndexed2UImm6:
6781 case Match_InvalidMemoryIndexed4UImm6:
6782 case Match_InvalidMemoryIndexed8UImm6:
6783 case Match_InvalidMemoryIndexed16UImm6:
6784 case Match_InvalidMemoryIndexedSImm6:
6785 case Match_InvalidMemoryIndexedSImm5:
6786 case Match_InvalidMemoryIndexedSImm8:
6787 case Match_InvalidMemoryIndexedSImm9:
6788 case Match_InvalidMemoryIndexed16SImm9:
6789 case Match_InvalidMemoryIndexed8SImm10:
6790 case Match_InvalidImm0_0:
6791 case Match_InvalidImm0_1:
6792 case Match_InvalidImm0_3:
6793 case Match_InvalidImm0_7:
6794 case Match_InvalidImm0_15:
6795 case Match_InvalidImm0_31:
6796 case Match_InvalidImm0_63:
6797 case Match_InvalidImm0_127:
6798 case Match_InvalidImm0_255:
6799 case Match_InvalidImm0_65535:
6800 case Match_InvalidImm1_8:
6801 case Match_InvalidImm1_16:
6802 case Match_InvalidImm1_32:
6803 case Match_InvalidImm1_64:
6804 case Match_InvalidImmM1_62:
6805 case Match_InvalidMemoryIndexedRange2UImm0:
6806 case Match_InvalidMemoryIndexedRange2UImm1:
6807 case Match_InvalidMemoryIndexedRange2UImm2:
6808 case Match_InvalidMemoryIndexedRange2UImm3:
6809 case Match_InvalidMemoryIndexedRange4UImm0:
6810 case Match_InvalidMemoryIndexedRange4UImm1:
6811 case Match_InvalidMemoryIndexedRange4UImm2:
6812 case Match_InvalidSVEAddSubImm8:
6813 case Match_InvalidSVEAddSubImm16:
6814 case Match_InvalidSVEAddSubImm32:
6815 case Match_InvalidSVEAddSubImm64:
6816 case Match_InvalidSVECpyImm8:
6817 case Match_InvalidSVECpyImm16:
6818 case Match_InvalidSVECpyImm32:
6819 case Match_InvalidSVECpyImm64:
6820 case Match_InvalidIndexRange0_0:
6821 case Match_InvalidIndexRange1_1:
6822 case Match_InvalidIndexRange0_15:
6823 case Match_InvalidIndexRange0_7:
6824 case Match_InvalidIndexRange0_3:
6825 case Match_InvalidIndexRange0_1:
6826 case Match_InvalidSVEIndexRange0_63:
6827 case Match_InvalidSVEIndexRange0_31:
6828 case Match_InvalidSVEIndexRange0_15:
6829 case Match_InvalidSVEIndexRange0_7:
6830 case Match_InvalidSVEIndexRange0_3:
6831 case Match_InvalidLabel:
6832 case Match_InvalidComplexRotationEven:
6833 case Match_InvalidComplexRotationOdd:
6834 case Match_InvalidGPR64shifted8:
6835 case Match_InvalidGPR64shifted16:
6836 case Match_InvalidGPR64shifted32:
6837 case Match_InvalidGPR64shifted64:
6838 case Match_InvalidGPR64shifted128:
6839 case Match_InvalidGPR64NoXZRshifted8:
6840 case Match_InvalidGPR64NoXZRshifted16:
6841 case Match_InvalidGPR64NoXZRshifted32:
6842 case Match_InvalidGPR64NoXZRshifted64:
6843 case Match_InvalidGPR64NoXZRshifted128:
6844 case Match_InvalidZPR32UXTW8:
6845 case Match_InvalidZPR32UXTW16:
6846 case Match_InvalidZPR32UXTW32:
6847 case Match_InvalidZPR32UXTW64:
6848 case Match_InvalidZPR32SXTW8:
6849 case Match_InvalidZPR32SXTW16:
6850 case Match_InvalidZPR32SXTW32:
6851 case Match_InvalidZPR32SXTW64:
6852 case Match_InvalidZPR64UXTW8:
6853 case Match_InvalidZPR64SXTW8:
6854 case Match_InvalidZPR64UXTW16:
6855 case Match_InvalidZPR64SXTW16:
6856 case Match_InvalidZPR64UXTW32:
6857 case Match_InvalidZPR64SXTW32:
6858 case Match_InvalidZPR64UXTW64:
6859 case Match_InvalidZPR64SXTW64:
6860 case Match_InvalidZPR32LSL8:
6861 case Match_InvalidZPR32LSL16:
6862 case Match_InvalidZPR32LSL32:
6863 case Match_InvalidZPR32LSL64:
6864 case Match_InvalidZPR64LSL8:
6865 case Match_InvalidZPR64LSL16:
6866 case Match_InvalidZPR64LSL32:
6867 case Match_InvalidZPR64LSL64:
6868 case Match_InvalidZPR0:
6869 case Match_InvalidZPR8:
6870 case Match_InvalidZPR16:
6871 case Match_InvalidZPR32:
6872 case Match_InvalidZPR64:
6873 case Match_InvalidZPR128:
6874 case Match_InvalidZPR_3b8:
6875 case Match_InvalidZPR_3b16:
6876 case Match_InvalidZPR_3b32:
6877 case Match_InvalidZPR_4b8:
6878 case Match_InvalidZPR_4b16:
6879 case Match_InvalidZPR_4b32:
6880 case Match_InvalidZPR_4b64:
6881 case Match_InvalidSVEPPRorPNRAnyReg:
6882 case Match_InvalidSVEPPRorPNRBReg:
6883 case Match_InvalidSVEPredicateAnyReg:
6884 case Match_InvalidSVEPattern:
6885 case Match_InvalidSVEVecLenSpecifier:
6886 case Match_InvalidSVEPredicateBReg:
6887 case Match_InvalidSVEPredicateHReg:
6888 case Match_InvalidSVEPredicateSReg:
6889 case Match_InvalidSVEPredicateDReg:
6890 case Match_InvalidSVEPredicate3bAnyReg:
6891 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6892 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6893 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6894 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6895 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6896 case Match_InvalidSVEPNPredicateBReg:
6897 case Match_InvalidSVEPNPredicateHReg:
6898 case Match_InvalidSVEPNPredicateSReg:
6899 case Match_InvalidSVEPNPredicateDReg:
6900 case Match_InvalidSVEPredicateListMul2x8:
6901 case Match_InvalidSVEPredicateListMul2x16:
6902 case Match_InvalidSVEPredicateListMul2x32:
6903 case Match_InvalidSVEPredicateListMul2x64:
6904 case Match_InvalidSVEExactFPImmOperandHalfOne:
6905 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6906 case Match_InvalidSVEExactFPImmOperandZeroOne:
6907 case Match_InvalidMatrixTile16:
6908 case Match_InvalidMatrixTile32:
6909 case Match_InvalidMatrixTile64:
6910 case Match_InvalidMatrix:
6911 case Match_InvalidMatrix8:
6912 case Match_InvalidMatrix16:
6913 case Match_InvalidMatrix32:
6914 case Match_InvalidMatrix64:
6915 case Match_InvalidMatrixTileVectorH8:
6916 case Match_InvalidMatrixTileVectorH16:
6917 case Match_InvalidMatrixTileVectorH32:
6918 case Match_InvalidMatrixTileVectorH64:
6919 case Match_InvalidMatrixTileVectorH128:
6920 case Match_InvalidMatrixTileVectorV8:
6921 case Match_InvalidMatrixTileVectorV16:
6922 case Match_InvalidMatrixTileVectorV32:
6923 case Match_InvalidMatrixTileVectorV64:
6924 case Match_InvalidMatrixTileVectorV128:
6925 case Match_InvalidSVCR:
6926 case Match_InvalidMatrixIndexGPR32_12_15:
6927 case Match_InvalidMatrixIndexGPR32_8_11:
6928 case Match_InvalidLookupTable:
6929 case Match_InvalidZPRMul2_Lo8:
6930 case Match_InvalidZPRMul2_Hi8:
6931 case Match_InvalidZPRMul2_Lo16:
6932 case Match_InvalidZPRMul2_Hi16:
6933 case Match_InvalidZPRMul2_Lo32:
6934 case Match_InvalidZPRMul2_Hi32:
6935 case Match_InvalidZPRMul2_Lo64:
6936 case Match_InvalidZPRMul2_Hi64:
6937 case Match_InvalidZPR_K0:
6938 case Match_InvalidSVEVectorList2x8Mul2:
6939 case Match_InvalidSVEVectorList2x16Mul2:
6940 case Match_InvalidSVEVectorList2x32Mul2:
6941 case Match_InvalidSVEVectorList2x64Mul2:
6942 case Match_InvalidSVEVectorList2x128Mul2:
6943 case Match_InvalidSVEVectorList4x8Mul4:
6944 case Match_InvalidSVEVectorList4x16Mul4:
6945 case Match_InvalidSVEVectorList4x32Mul4:
6946 case Match_InvalidSVEVectorList4x64Mul4:
6947 case Match_InvalidSVEVectorList4x128Mul4:
6948 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6949 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6950 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6951 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6952 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6953 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6954 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6955 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6956 case Match_InvalidSVEVectorListStrided2x8:
6957 case Match_InvalidSVEVectorListStrided2x16:
6958 case Match_InvalidSVEVectorListStrided2x32:
6959 case Match_InvalidSVEVectorListStrided2x64:
6960 case Match_InvalidSVEVectorListStrided4x8:
6961 case Match_InvalidSVEVectorListStrided4x16:
6962 case Match_InvalidSVEVectorListStrided4x32:
6963 case Match_InvalidSVEVectorListStrided4x64:
6964 case Match_MSR:
6965 case Match_MRS: {
6966 if (ErrorInfo >= Operands.size())
6967 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6968 // Any time we get here, there's nothing fancy to do. Just get the
6969 // operand SMLoc and display the diagnostic.
6970 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6971 if (ErrorLoc == SMLoc())
6972 ErrorLoc = IDLoc;
6973 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6974 }
6975 }
6976
6977 llvm_unreachable("Implement any new match types added!");
6978}
6979
6980/// ParseDirective parses the arm specific directives
6981bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6982 const MCContext::Environment Format = getContext().getObjectFileType();
6983 bool IsMachO = Format == MCContext::IsMachO;
6984 bool IsCOFF = Format == MCContext::IsCOFF;
6985
6986 auto IDVal = DirectiveID.getIdentifier().lower();
6987 SMLoc Loc = DirectiveID.getLoc();
6988 if (IDVal == ".arch")
6989 parseDirectiveArch(Loc);
6990 else if (IDVal == ".cpu")
6991 parseDirectiveCPU(Loc);
6992 else if (IDVal == ".tlsdesccall")
6993 parseDirectiveTLSDescCall(Loc);
6994 else if (IDVal == ".ltorg" || IDVal == ".pool")
6995 parseDirectiveLtorg(Loc);
6996 else if (IDVal == ".unreq")
6997 parseDirectiveUnreq(Loc);
6998 else if (IDVal == ".inst")
6999 parseDirectiveInst(Loc);
7000 else if (IDVal == ".cfi_negate_ra_state")
7001 parseDirectiveCFINegateRAState();
7002 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7003 parseDirectiveCFINegateRAStateWithPC();
7004 else if (IDVal == ".cfi_b_key_frame")
7005 parseDirectiveCFIBKeyFrame();
7006 else if (IDVal == ".cfi_mte_tagged_frame")
7007 parseDirectiveCFIMTETaggedFrame();
7008 else if (IDVal == ".arch_extension")
7009 parseDirectiveArchExtension(Loc);
7010 else if (IDVal == ".variant_pcs")
7011 parseDirectiveVariantPCS(Loc);
7012 else if (IsMachO) {
7013 if (IDVal == MCLOHDirectiveName())
7014 parseDirectiveLOH(IDVal, Loc);
7015 else
7016 return true;
7017 } else if (IsCOFF) {
7018 if (IDVal == ".seh_stackalloc")
7019 parseDirectiveSEHAllocStack(Loc);
7020 else if (IDVal == ".seh_endprologue")
7021 parseDirectiveSEHPrologEnd(Loc);
7022 else if (IDVal == ".seh_save_r19r20_x")
7023 parseDirectiveSEHSaveR19R20X(Loc);
7024 else if (IDVal == ".seh_save_fplr")
7025 parseDirectiveSEHSaveFPLR(Loc);
7026 else if (IDVal == ".seh_save_fplr_x")
7027 parseDirectiveSEHSaveFPLRX(Loc);
7028 else if (IDVal == ".seh_save_reg")
7029 parseDirectiveSEHSaveReg(Loc);
7030 else if (IDVal == ".seh_save_reg_x")
7031 parseDirectiveSEHSaveRegX(Loc);
7032 else if (IDVal == ".seh_save_regp")
7033 parseDirectiveSEHSaveRegP(Loc);
7034 else if (IDVal == ".seh_save_regp_x")
7035 parseDirectiveSEHSaveRegPX(Loc);
7036 else if (IDVal == ".seh_save_lrpair")
7037 parseDirectiveSEHSaveLRPair(Loc);
7038 else if (IDVal == ".seh_save_freg")
7039 parseDirectiveSEHSaveFReg(Loc);
7040 else if (IDVal == ".seh_save_freg_x")
7041 parseDirectiveSEHSaveFRegX(Loc);
7042 else if (IDVal == ".seh_save_fregp")
7043 parseDirectiveSEHSaveFRegP(Loc);
7044 else if (IDVal == ".seh_save_fregp_x")
7045 parseDirectiveSEHSaveFRegPX(Loc);
7046 else if (IDVal == ".seh_set_fp")
7047 parseDirectiveSEHSetFP(Loc);
7048 else if (IDVal == ".seh_add_fp")
7049 parseDirectiveSEHAddFP(Loc);
7050 else if (IDVal == ".seh_nop")
7051 parseDirectiveSEHNop(Loc);
7052 else if (IDVal == ".seh_save_next")
7053 parseDirectiveSEHSaveNext(Loc);
7054 else if (IDVal == ".seh_startepilogue")
7055 parseDirectiveSEHEpilogStart(Loc);
7056 else if (IDVal == ".seh_endepilogue")
7057 parseDirectiveSEHEpilogEnd(Loc);
7058 else if (IDVal == ".seh_trap_frame")
7059 parseDirectiveSEHTrapFrame(Loc);
7060 else if (IDVal == ".seh_pushframe")
7061 parseDirectiveSEHMachineFrame(Loc);
7062 else if (IDVal == ".seh_context")
7063 parseDirectiveSEHContext(Loc);
7064 else if (IDVal == ".seh_ec_context")
7065 parseDirectiveSEHECContext(Loc);
7066 else if (IDVal == ".seh_clear_unwound_to_call")
7067 parseDirectiveSEHClearUnwoundToCall(Loc);
7068 else if (IDVal == ".seh_pac_sign_lr")
7069 parseDirectiveSEHPACSignLR(Loc);
7070 else if (IDVal == ".seh_save_any_reg")
7071 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7072 else if (IDVal == ".seh_save_any_reg_p")
7073 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7074 else if (IDVal == ".seh_save_any_reg_x")
7075 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7076 else if (IDVal == ".seh_save_any_reg_px")
7077 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7078 else
7079 return true;
7080 } else
7081 return true;
7082 return false;
7083}
7084
7085static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7086 SmallVector<StringRef, 4> &RequestedExtensions) {
7087 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7088 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7089
7090 if (!NoCrypto && Crypto) {
7091 // Map 'generic' (and others) to sha2 and aes, because
7092 // that was the traditional meaning of crypto.
7093 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7094 ArchInfo == AArch64::ARMV8_3A) {
7095 RequestedExtensions.push_back("sha2");
7096 RequestedExtensions.push_back("aes");
7097 }
7098 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7099 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7100 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7101 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7102 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7103 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7104 RequestedExtensions.push_back("sm4");
7105 RequestedExtensions.push_back("sha3");
7106 RequestedExtensions.push_back("sha2");
7107 RequestedExtensions.push_back("aes");
7108 }
7109 } else if (NoCrypto) {
7110 // Map 'generic' (and others) to sha2 and aes, because
7111 // that was the traditional meaning of crypto.
7112 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7113 ArchInfo == AArch64::ARMV8_3A) {
7114 RequestedExtensions.push_back("nosha2");
7115 RequestedExtensions.push_back("noaes");
7116 }
7117 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7118 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7119 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7120 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7121 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7122 ArchInfo == AArch64::ARMV9_4A) {
7123 RequestedExtensions.push_back("nosm4");
7124 RequestedExtensions.push_back("nosha3");
7125 RequestedExtensions.push_back("nosha2");
7126 RequestedExtensions.push_back("noaes");
7127 }
7128 }
7129}
7130
7132 return SMLoc::getFromPointer(L.getPointer() + Offset);
7133}
7134
7135/// parseDirectiveArch
7136/// ::= .arch token
7137bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7138 SMLoc CurLoc = getLoc();
7139
7140 StringRef Arch, ExtensionString;
7141 std::tie(Arch, ExtensionString) =
7142 getParser().parseStringToEndOfStatement().trim().split('+');
7143
7144 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7145 if (!ArchInfo)
7146 return Error(CurLoc, "unknown arch name");
7147
7148 if (parseToken(AsmToken::EndOfStatement))
7149 return true;
7150
7151 // Get the architecture and extension features.
7152 std::vector<StringRef> AArch64Features;
7153 AArch64Features.push_back(ArchInfo->ArchFeature);
7154 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7155
7156 MCSubtargetInfo &STI = copySTI();
7157 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7158 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7159 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7160
7161 SmallVector<StringRef, 4> RequestedExtensions;
7162 if (!ExtensionString.empty())
7163 ExtensionString.split(RequestedExtensions, '+');
7164
7165 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7166 CurLoc = incrementLoc(CurLoc, Arch.size());
7167
7168 for (auto Name : RequestedExtensions) {
7169 // Advance source location past '+'.
7170 CurLoc = incrementLoc(CurLoc, 1);
7171
7172 bool EnableFeature = !Name.consume_front_insensitive("no");
7173
7174 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7175 return Extension.Name == Name;
7176 });
7177
7178 if (It == std::end(ExtensionMap))
7179 return Error(CurLoc, "unsupported architectural extension: " + Name);
7180
7181 if (EnableFeature)
7182 STI.SetFeatureBitsTransitively(It->Features);
7183 else
7184 STI.ClearFeatureBitsTransitively(It->Features);
7185 CurLoc = incrementLoc(CurLoc, Name.size());
7186 }
7187 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7188 setAvailableFeatures(Features);
7189 return false;
7190}
7191
7192/// parseDirectiveArchExtension
7193/// ::= .arch_extension [no]feature
7194bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7195 SMLoc ExtLoc = getLoc();
7196
7197 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7198
7199 if (parseEOL())
7200 return true;
7201
7202 bool EnableFeature = true;
7203 if (Name.starts_with_insensitive("no")) {
7204 EnableFeature = false;
7205 Name = Name.substr(2);
7206 }
7207
7208 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7209 return Extension.Name == Name;
7210 });
7211
7212 if (It == std::end(ExtensionMap))
7213 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7214
7215 MCSubtargetInfo &STI = copySTI();
7216 if (EnableFeature)
7217 STI.SetFeatureBitsTransitively(It->Features);
7218 else
7219 STI.ClearFeatureBitsTransitively(It->Features);
7220 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7221 setAvailableFeatures(Features);
7222 return false;
7223}
7224
7225/// parseDirectiveCPU
7226/// ::= .cpu id
7227bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7228 SMLoc CurLoc = getLoc();
7229
7230 StringRef CPU, ExtensionString;
7231 std::tie(CPU, ExtensionString) =
7232 getParser().parseStringToEndOfStatement().trim().split('+');
7233
7234 if (parseToken(AsmToken::EndOfStatement))
7235 return true;
7236
7237 SmallVector<StringRef, 4> RequestedExtensions;
7238 if (!ExtensionString.empty())
7239 ExtensionString.split(RequestedExtensions, '+');
7240
7242 if (!CpuArch) {
7243 Error(CurLoc, "unknown CPU name");
7244 return false;
7245 }
7246 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7247
7248 MCSubtargetInfo &STI = copySTI();
7249 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7250 CurLoc = incrementLoc(CurLoc, CPU.size());
7251
7252 for (auto Name : RequestedExtensions) {
7253 // Advance source location past '+'.
7254 CurLoc = incrementLoc(CurLoc, 1);
7255
7256 bool EnableFeature = !Name.consume_front_insensitive("no");
7257
7258 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7259 return Extension.Name == Name;
7260 });
7261
7262 if (It == std::end(ExtensionMap))
7263 return Error(CurLoc, "unsupported architectural extension: " + Name);
7264
7265 if (EnableFeature)
7266 STI.SetFeatureBitsTransitively(It->Features);
7267 else
7268 STI.ClearFeatureBitsTransitively(It->Features);
7269 CurLoc = incrementLoc(CurLoc, Name.size());
7270 }
7271 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7272 setAvailableFeatures(Features);
7273 return false;
7274}
7275
7276/// parseDirectiveInst
7277/// ::= .inst opcode [, ...]
7278bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7279 if (getLexer().is(AsmToken::EndOfStatement))
7280 return Error(Loc, "expected expression following '.inst' directive");
7281
7282 auto parseOp = [&]() -> bool {
7283 SMLoc L = getLoc();
7284 const MCExpr *Expr = nullptr;
7285 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7286 return true;
7287 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7288 if (check(!Value, L, "expected constant expression"))
7289 return true;
7290 getTargetStreamer().emitInst(Value->getValue());
7291 return false;
7292 };
7293
7294 return parseMany(parseOp);
7295}
7296
7297// parseDirectiveTLSDescCall:
7298// ::= .tlsdesccall symbol
7299bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7301 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7302 parseToken(AsmToken::EndOfStatement))
7303 return true;
7304
7305 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7306 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7307 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7308
7309 MCInst Inst;
7310 Inst.setOpcode(AArch64::TLSDESCCALL);
7312
7313 getParser().getStreamer().emitInstruction(Inst, getSTI());
7314 return false;
7315}
7316
7317/// ::= .loh <lohName | lohId> label1, ..., labelN
7318/// The number of arguments depends on the loh identifier.
7319bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7321 if (getTok().isNot(AsmToken::Identifier)) {
7322 if (getTok().isNot(AsmToken::Integer))
7323 return TokError("expected an identifier or a number in directive");
7324 // We successfully get a numeric value for the identifier.
7325 // Check if it is valid.
7326 int64_t Id = getTok().getIntVal();
7327 if (Id <= -1U && !isValidMCLOHType(Id))
7328 return TokError("invalid numeric identifier in directive");
7329 Kind = (MCLOHType)Id;
7330 } else {
7331 StringRef Name = getTok().getIdentifier();
7332 // We successfully parse an identifier.
7333 // Check if it is a recognized one.
7334 int Id = MCLOHNameToId(Name);
7335
7336 if (Id == -1)
7337 return TokError("invalid identifier in directive");
7338 Kind = (MCLOHType)Id;
7339 }
7340 // Consume the identifier.
7341 Lex();
7342 // Get the number of arguments of this LOH.
7343 int NbArgs = MCLOHIdToNbArgs(Kind);
7344
7345 assert(NbArgs != -1 && "Invalid number of arguments");
7346
7348 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7350 if (getParser().parseIdentifier(Name))
7351 return TokError("expected identifier in directive");
7352 Args.push_back(getContext().getOrCreateSymbol(Name));
7353
7354 if (Idx + 1 == NbArgs)
7355 break;
7356 if (parseComma())
7357 return true;
7358 }
7359 if (parseEOL())
7360 return true;
7361
7362 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7363 return false;
7364}
7365
7366/// parseDirectiveLtorg
7367/// ::= .ltorg | .pool
7368bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7369 if (parseEOL())
7370 return true;
7371 getTargetStreamer().emitCurrentConstantPool();
7372 return false;
7373}
7374
7375/// parseDirectiveReq
7376/// ::= name .req registername
7377bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7378 Lex(); // Eat the '.req' token.
7379 SMLoc SRegLoc = getLoc();
7380 RegKind RegisterKind = RegKind::Scalar;
7381 MCRegister RegNum;
7382 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7383
7384 if (!ParseRes.isSuccess()) {
7386 RegisterKind = RegKind::NeonVector;
7387 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7388
7389 if (ParseRes.isFailure())
7390 return true;
7391
7392 if (ParseRes.isSuccess() && !Kind.empty())
7393 return Error(SRegLoc, "vector register without type specifier expected");
7394 }
7395
7396 if (!ParseRes.isSuccess()) {
7398 RegisterKind = RegKind::SVEDataVector;
7399 ParseRes =
7400 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7401
7402 if (ParseRes.isFailure())
7403 return true;
7404
7405 if (ParseRes.isSuccess() && !Kind.empty())
7406 return Error(SRegLoc,
7407 "sve vector register without type specifier expected");
7408 }
7409
7410 if (!ParseRes.isSuccess()) {
7412 RegisterKind = RegKind::SVEPredicateVector;
7413 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7414
7415 if (ParseRes.isFailure())
7416 return true;
7417
7418 if (ParseRes.isSuccess() && !Kind.empty())
7419 return Error(SRegLoc,
7420 "sve predicate register without type specifier expected");
7421 }
7422
7423 if (!ParseRes.isSuccess())
7424 return Error(SRegLoc, "register name or alias expected");
7425
7426 // Shouldn't be anything else.
7427 if (parseEOL())
7428 return true;
7429
7430 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7431 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7432 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7433
7434 return false;
7435}
7436
7437/// parseDirectiveUneq
7438/// ::= .unreq registername
7439bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7440 if (getTok().isNot(AsmToken::Identifier))
7441 return TokError("unexpected input in .unreq directive.");
7442 RegisterReqs.erase(getTok().getIdentifier().lower());
7443 Lex(); // Eat the identifier.
7444 return parseToken(AsmToken::EndOfStatement);
7445}
7446
7447bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7448 if (parseEOL())
7449 return true;
7450 getStreamer().emitCFINegateRAState();
7451 return false;
7452}
7453
7454bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7455 if (parseEOL())
7456 return true;
7457 getStreamer().emitCFINegateRAStateWithPC();
7458 return false;
7459}
7460
7461/// parseDirectiveCFIBKeyFrame
7462/// ::= .cfi_b_key
7463bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7464 if (parseEOL())
7465 return true;
7466 getStreamer().emitCFIBKeyFrame();
7467 return false;
7468}
7469
7470/// parseDirectiveCFIMTETaggedFrame
7471/// ::= .cfi_mte_tagged_frame
7472bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7473 if (parseEOL())
7474 return true;
7475 getStreamer().emitCFIMTETaggedFrame();
7476 return false;
7477}
7478
7479/// parseDirectiveVariantPCS
7480/// ::= .variant_pcs symbolname
7481bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7483 if (getParser().parseIdentifier(Name))
7484 return TokError("expected symbol name");
7485 if (parseEOL())
7486 return true;
7487 getTargetStreamer().emitDirectiveVariantPCS(
7488 getContext().getOrCreateSymbol(Name));
7489 return false;
7490}
7491
7492/// parseDirectiveSEHAllocStack
7493/// ::= .seh_stackalloc
7494bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7495 int64_t Size;
7496 if (parseImmExpr(Size))
7497 return true;
7498 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7499 return false;
7500}
7501
7502/// parseDirectiveSEHPrologEnd
7503/// ::= .seh_endprologue
7504bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7505 getTargetStreamer().emitARM64WinCFIPrologEnd();
7506 return false;
7507}
7508
7509/// parseDirectiveSEHSaveR19R20X
7510/// ::= .seh_save_r19r20_x
7511bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7512 int64_t Offset;
7513 if (parseImmExpr(Offset))
7514 return true;
7515 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7516 return false;
7517}
7518
7519/// parseDirectiveSEHSaveFPLR
7520/// ::= .seh_save_fplr
7521bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7522 int64_t Offset;
7523 if (parseImmExpr(Offset))
7524 return true;
7525 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7526 return false;
7527}
7528
7529/// parseDirectiveSEHSaveFPLRX
7530/// ::= .seh_save_fplr_x
7531bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7532 int64_t Offset;
7533 if (parseImmExpr(Offset))
7534 return true;
7535 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7536 return false;
7537}
7538
7539/// parseDirectiveSEHSaveReg
7540/// ::= .seh_save_reg
7541bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7542 unsigned Reg;
7543 int64_t Offset;
7544 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7545 parseComma() || parseImmExpr(Offset))
7546 return true;
7547 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7548 return false;
7549}
7550
7551/// parseDirectiveSEHSaveRegX
7552/// ::= .seh_save_reg_x
7553bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7554 unsigned Reg;
7555 int64_t Offset;
7556 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7557 parseComma() || parseImmExpr(Offset))
7558 return true;
7559 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7560 return false;
7561}
7562
7563/// parseDirectiveSEHSaveRegP
7564/// ::= .seh_save_regp
7565bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7566 unsigned Reg;
7567 int64_t Offset;
7568 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7569 parseComma() || parseImmExpr(Offset))
7570 return true;
7571 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7572 return false;
7573}
7574
7575/// parseDirectiveSEHSaveRegPX
7576/// ::= .seh_save_regp_x
7577bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7578 unsigned Reg;
7579 int64_t Offset;
7580 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7581 parseComma() || parseImmExpr(Offset))
7582 return true;
7583 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7584 return false;
7585}
7586
7587/// parseDirectiveSEHSaveLRPair
7588/// ::= .seh_save_lrpair
7589bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7590 unsigned Reg;
7591 int64_t Offset;
7592 L = getLoc();
7593 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7594 parseComma() || parseImmExpr(Offset))
7595 return true;
7596 if (check(((Reg - 19) % 2 != 0), L,
7597 "expected register with even offset from x19"))
7598 return true;
7599 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7600 return false;
7601}
7602
7603/// parseDirectiveSEHSaveFReg
7604/// ::= .seh_save_freg
7605bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7606 unsigned Reg;
7607 int64_t Offset;
7608 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7609 parseComma() || parseImmExpr(Offset))
7610 return true;
7611 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7612 return false;
7613}
7614
7615/// parseDirectiveSEHSaveFRegX
7616/// ::= .seh_save_freg_x
7617bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7618 unsigned Reg;
7619 int64_t Offset;
7620 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7621 parseComma() || parseImmExpr(Offset))
7622 return true;
7623 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7624 return false;
7625}
7626
7627/// parseDirectiveSEHSaveFRegP
7628/// ::= .seh_save_fregp
7629bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7630 unsigned Reg;
7631 int64_t Offset;
7632 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7633 parseComma() || parseImmExpr(Offset))
7634 return true;
7635 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7636 return false;
7637}
7638
7639/// parseDirectiveSEHSaveFRegPX
7640/// ::= .seh_save_fregp_x
7641bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7642 unsigned Reg;
7643 int64_t Offset;
7644 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7645 parseComma() || parseImmExpr(Offset))
7646 return true;
7647 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7648 return false;
7649}
7650
7651/// parseDirectiveSEHSetFP
7652/// ::= .seh_set_fp
7653bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7654 getTargetStreamer().emitARM64WinCFISetFP();
7655 return false;
7656}
7657
7658/// parseDirectiveSEHAddFP
7659/// ::= .seh_add_fp
7660bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7661 int64_t Size;
7662 if (parseImmExpr(Size))
7663 return true;
7664 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7665 return false;
7666}
7667
7668/// parseDirectiveSEHNop
7669/// ::= .seh_nop
7670bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7671 getTargetStreamer().emitARM64WinCFINop();
7672 return false;
7673}
7674
7675/// parseDirectiveSEHSaveNext
7676/// ::= .seh_save_next
7677bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7678 getTargetStreamer().emitARM64WinCFISaveNext();
7679 return false;
7680}
7681
7682/// parseDirectiveSEHEpilogStart
7683/// ::= .seh_startepilogue
7684bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7685 getTargetStreamer().emitARM64WinCFIEpilogStart();
7686 return false;
7687}
7688
7689/// parseDirectiveSEHEpilogEnd
7690/// ::= .seh_endepilogue
7691bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7692 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7693 return false;
7694}
7695
7696/// parseDirectiveSEHTrapFrame
7697/// ::= .seh_trap_frame
7698bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7699 getTargetStreamer().emitARM64WinCFITrapFrame();
7700 return false;
7701}
7702
7703/// parseDirectiveSEHMachineFrame
7704/// ::= .seh_pushframe
7705bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7706 getTargetStreamer().emitARM64WinCFIMachineFrame();
7707 return false;
7708}
7709
7710/// parseDirectiveSEHContext
7711/// ::= .seh_context
7712bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7713 getTargetStreamer().emitARM64WinCFIContext();
7714 return false;
7715}
7716
7717/// parseDirectiveSEHECContext
7718/// ::= .seh_ec_context
7719bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7720 getTargetStreamer().emitARM64WinCFIECContext();
7721 return false;
7722}
7723
7724/// parseDirectiveSEHClearUnwoundToCall
7725/// ::= .seh_clear_unwound_to_call
7726bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7727 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7728 return false;
7729}
7730
7731/// parseDirectiveSEHPACSignLR
7732/// ::= .seh_pac_sign_lr
7733bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7734 getTargetStreamer().emitARM64WinCFIPACSignLR();
7735 return false;
7736}
7737
7738/// parseDirectiveSEHSaveAnyReg
7739/// ::= .seh_save_any_reg
7740/// ::= .seh_save_any_reg_p
7741/// ::= .seh_save_any_reg_x
7742/// ::= .seh_save_any_reg_px
7743bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7744 bool Writeback) {
7746 SMLoc Start, End;
7747 int64_t Offset;
7748 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7749 parseComma() || parseImmExpr(Offset))
7750 return true;
7751
7752 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7753 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7754 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7755 return Error(L, "invalid save_any_reg offset");
7756 unsigned EncodedReg;
7757 if (Reg == AArch64::FP)
7758 EncodedReg = 29;
7759 else if (Reg == AArch64::LR)
7760 EncodedReg = 30;
7761 else
7762 EncodedReg = Reg - AArch64::X0;
7763 if (Paired) {
7764 if (Reg == AArch64::LR)
7765 return Error(Start, "lr cannot be paired with another register");
7766 if (Writeback)
7767 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7768 else
7769 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7770 } else {
7771 if (Writeback)
7772 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7773 else
7774 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7775 }
7776 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7777 unsigned EncodedReg = Reg - AArch64::D0;
7778 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7779 return Error(L, "invalid save_any_reg offset");
7780 if (Paired) {
7781 if (Reg == AArch64::D31)
7782 return Error(Start, "d31 cannot be paired with another register");
7783 if (Writeback)
7784 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7785 else
7786 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7787 } else {
7788 if (Writeback)
7789 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7790 else
7791 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7792 }
7793 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7794 unsigned EncodedReg = Reg - AArch64::Q0;
7795 if (Offset < 0 || Offset % 16)
7796 return Error(L, "invalid save_any_reg offset");
7797 if (Paired) {
7798 if (Reg == AArch64::Q31)
7799 return Error(Start, "q31 cannot be paired with another register");
7800 if (Writeback)
7801 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7802 else
7803 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7804 } else {
7805 if (Writeback)
7806 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7807 else
7808 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7809 }
7810 } else {
7811 return Error(Start, "save_any_reg register must be x, q or d register");
7812 }
7813 return false;
7814}
7815
7816bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7817 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7818 if (!parseAuthExpr(Res, EndLoc))
7819 return false;
7820 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7821}
7822
7823/// parseAuthExpr
7824/// ::= _sym@AUTH(ib,123[,addr])
7825/// ::= (_sym + 5)@AUTH(ib,123[,addr])
7826/// ::= (_sym - 5)@AUTH(ib,123[,addr])
7827bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7828 MCAsmParser &Parser = getParser();
7829 MCContext &Ctx = getContext();
7830
7831 AsmToken Tok = Parser.getTok();
7832
7833 // Look for '_sym@AUTH' ...
7834 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7835 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7836 if (SymName.contains('@'))
7837 return TokError(
7838 "combination of @AUTH with other modifiers not supported");
7839 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7840
7841 Parser.Lex(); // Eat the identifier.
7842 } else {
7843 // ... or look for a more complex symbol reference, such as ...
7845
7846 // ... '"_long sym"@AUTH' ...
7847 if (Tok.is(AsmToken::String))
7848 Tokens.resize(2);
7849 // ... or '(_sym + 5)@AUTH'.
7850 else if (Tok.is(AsmToken::LParen))
7851 Tokens.resize(6);
7852 else
7853 return true;
7854
7855 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7856 return true;
7857
7858 // In either case, the expression ends with '@' 'AUTH'.
7859 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7860 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7861 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7862 return true;
7863
7864 if (Tok.is(AsmToken::String)) {
7865 StringRef SymName;
7866 if (Parser.parseIdentifier(SymName))
7867 return true;
7868 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7869 } else {
7870 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7871 return true;
7872 }
7873
7874 Parser.Lex(); // '@'
7875 Parser.Lex(); // 'AUTH'
7876 }
7877
7878 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7879 if (parseToken(AsmToken::LParen, "expected '('"))
7880 return true;
7881
7882 if (Parser.getTok().isNot(AsmToken::Identifier))
7883 return TokError("expected key name");
7884
7885 StringRef KeyStr = Parser.getTok().getIdentifier();
7886 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7887 if (!KeyIDOrNone)
7888 return TokError("invalid key '" + KeyStr + "'");
7889 Parser.Lex();
7890
7891 if (parseToken(AsmToken::Comma, "expected ','"))
7892 return true;
7893
7894 if (Parser.getTok().isNot(AsmToken::Integer))
7895 return TokError("expected integer discriminator");
7896 int64_t Discriminator = Parser.getTok().getIntVal();
7897
7898 if (!isUInt<16>(Discriminator))
7899 return TokError("integer discriminator " + Twine(Discriminator) +
7900 " out of range [0, 0xFFFF]");
7901 Parser.Lex();
7902
7903 bool UseAddressDiversity = false;
7904 if (Parser.getTok().is(AsmToken::Comma)) {
7905 Parser.Lex();
7906 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7907 Parser.getTok().getIdentifier() != "addr")
7908 return TokError("expected 'addr'");
7909 UseAddressDiversity = true;
7910 Parser.Lex();
7911 }
7912
7913 EndLoc = Parser.getTok().getEndLoc();
7914 if (parseToken(AsmToken::RParen, "expected ')'"))
7915 return true;
7916
7917 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7918 UseAddressDiversity, Ctx);
7919 return false;
7920}
7921
7922bool
7923AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7924 AArch64MCExpr::VariantKind &ELFRefKind,
7925 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7926 int64_t &Addend) {
7927 ELFRefKind = AArch64MCExpr::VK_INVALID;
7928 DarwinRefKind = MCSymbolRefExpr::VK_None;
7929 Addend = 0;
7930
7931 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7932 ELFRefKind = AE->getKind();
7933 Expr = AE->getSubExpr();
7934 }
7935
7936 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7937 if (SE) {
7938 // It's a simple symbol reference with no addend.
7939 DarwinRefKind = SE->getKind();
7940 return true;
7941 }
7942
7943 // Check that it looks like a symbol + an addend
7944 MCValue Res;
7945 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7946 if (!Relocatable || Res.getSymB())
7947 return false;
7948
7949 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7950 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7951 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7952 return false;
7953
7954 if (Res.getSymA())
7955 DarwinRefKind = Res.getSymA()->getKind();
7956 Addend = Res.getConstant();
7957
7958 // It's some symbol reference + a constant addend, but really
7959 // shouldn't use both Darwin and ELF syntax.
7960 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7961 DarwinRefKind == MCSymbolRefExpr::VK_None;
7962}
7963
7964/// Force static initialization.
7971}
7972
7973#define GET_REGISTER_MATCHER
7974#define GET_SUBTARGET_FEATURE_NAME
7975#define GET_MATCHER_IMPLEMENTATION
7976#define GET_MNEMONIC_SPELL_CHECKER
7977#include "AArch64GenAsmMatcher.inc"
7978
7979// Define this matcher function after the auto-generated include so we
7980// have the match class enum definitions.
7981unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7982 unsigned Kind) {
7983 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7984
7985 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7986 if (!Op.isImm())
7987 return Match_InvalidOperand;
7988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7989 if (!CE)
7990 return Match_InvalidOperand;
7991 if (CE->getValue() == ExpectedVal)
7992 return Match_Success;
7993 return Match_InvalidOperand;
7994 };
7995
7996 switch (Kind) {
7997 default:
7998 return Match_InvalidOperand;
7999 case MCK_MPR:
8000 // If the Kind is a token for the MPR register class which has the "za"
8001 // register (SME accumulator array), check if the asm is a literal "za"
8002 // token. This is for the "smstart za" alias that defines the register
8003 // as a literal token.
8004 if (Op.isTokenEqual("za"))
8005 return Match_Success;
8006 return Match_InvalidOperand;
8007
8008 // If the kind is a token for a literal immediate, check if our asm operand
8009 // matches. This is for InstAliases which have a fixed-value immediate in
8010 // the asm string, such as hints which are parsed into a specific
8011 // instruction definition.
8012#define MATCH_HASH(N) \
8013 case MCK__HASH_##N: \
8014 return MatchesOpImmediate(N);
8015 MATCH_HASH(0)
8016 MATCH_HASH(1)
8017 MATCH_HASH(2)
8018 MATCH_HASH(3)
8019 MATCH_HASH(4)
8020 MATCH_HASH(6)
8021 MATCH_HASH(7)
8022 MATCH_HASH(8)
8023 MATCH_HASH(10)
8024 MATCH_HASH(12)
8025 MATCH_HASH(14)
8026 MATCH_HASH(16)
8027 MATCH_HASH(24)
8028 MATCH_HASH(25)
8029 MATCH_HASH(26)
8030 MATCH_HASH(27)
8031 MATCH_HASH(28)
8032 MATCH_HASH(29)
8033 MATCH_HASH(30)
8034 MATCH_HASH(31)
8035 MATCH_HASH(32)
8036 MATCH_HASH(40)
8037 MATCH_HASH(48)
8038 MATCH_HASH(64)
8039#undef MATCH_HASH
8040#define MATCH_HASH_MINUS(N) \
8041 case MCK__HASH__MINUS_##N: \
8042 return MatchesOpImmediate(-N);
8046#undef MATCH_HASH_MINUS
8047 }
8048}
8049
8050ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8051
8052 SMLoc S = getLoc();
8053
8054 if (getTok().isNot(AsmToken::Identifier))
8055 return Error(S, "expected register");
8056
8057 MCRegister FirstReg;
8058 ParseStatus Res = tryParseScalarRegister(FirstReg);
8059 if (!Res.isSuccess())
8060 return Error(S, "expected first even register of a consecutive same-size "
8061 "even/odd register pair");
8062
8063 const MCRegisterClass &WRegClass =
8064 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8065 const MCRegisterClass &XRegClass =
8066 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8067
8068 bool isXReg = XRegClass.contains(FirstReg),
8069 isWReg = WRegClass.contains(FirstReg);
8070 if (!isXReg && !isWReg)
8071 return Error(S, "expected first even register of a consecutive same-size "
8072 "even/odd register pair");
8073
8074 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8075 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8076
8077 if (FirstEncoding & 0x1)
8078 return Error(S, "expected first even register of a consecutive same-size "
8079 "even/odd register pair");
8080
8081 if (getTok().isNot(AsmToken::Comma))
8082 return Error(getLoc(), "expected comma");
8083 // Eat the comma
8084 Lex();
8085
8086 SMLoc E = getLoc();
8087 MCRegister SecondReg;
8088 Res = tryParseScalarRegister(SecondReg);
8089 if (!Res.isSuccess())
8090 return Error(E, "expected second odd register of a consecutive same-size "
8091 "even/odd register pair");
8092
8093 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8094 (isXReg && !XRegClass.contains(SecondReg)) ||
8095 (isWReg && !WRegClass.contains(SecondReg)))
8096 return Error(E, "expected second odd register of a consecutive same-size "
8097 "even/odd register pair");
8098
8099 MCRegister Pair;
8100 if (isXReg) {
8101 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8102 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8103 } else {
8104 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8105 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8106 }
8107
8108 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8109 getLoc(), getContext()));
8110
8111 return ParseStatus::Success;
8112}
8113
8114template <bool ParseShiftExtend, bool ParseSuffix>
8115ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8116 const SMLoc S = getLoc();
8117 // Check for a SVE vector register specifier first.
8118 MCRegister RegNum;
8120
8121 ParseStatus Res =
8122 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8123
8124 if (!Res.isSuccess())
8125 return Res;
8126
8127 if (ParseSuffix && Kind.empty())
8128 return ParseStatus::NoMatch;
8129
8130 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8131 if (!KindRes)
8132 return ParseStatus::NoMatch;
8133
8134 unsigned ElementWidth = KindRes->second;
8135
8136 // No shift/extend is the default.
8137 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8138 Operands.push_back(AArch64Operand::CreateVectorReg(
8139 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8140
8141 ParseStatus Res = tryParseVectorIndex(Operands);
8142 if (Res.isFailure())
8143 return ParseStatus::Failure;
8144 return ParseStatus::Success;
8145 }
8146
8147 // Eat the comma
8148 Lex();
8149
8150 // Match the shift
8152 Res = tryParseOptionalShiftExtend(ExtOpnd);
8153 if (!Res.isSuccess())
8154 return Res;
8155
8156 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8157 Operands.push_back(AArch64Operand::CreateVectorReg(
8158 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8159 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8160 Ext->hasShiftExtendAmount()));
8161
8162 return ParseStatus::Success;
8163}
8164
8165ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8166 MCAsmParser &Parser = getParser();
8167
8168 SMLoc SS = getLoc();
8169 const AsmToken &TokE = getTok();
8170 bool IsHash = TokE.is(AsmToken::Hash);
8171
8172 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8173 return ParseStatus::NoMatch;
8174
8175 int64_t Pattern;
8176 if (IsHash) {
8177 Lex(); // Eat hash
8178
8179 // Parse the immediate operand.
8180 const MCExpr *ImmVal;
8181 SS = getLoc();
8182 if (Parser.parseExpression(ImmVal))
8183 return ParseStatus::Failure;
8184
8185 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8186 if (!MCE)
8187 return TokError("invalid operand for instruction");
8188
8189 Pattern = MCE->getValue();
8190 } else {
8191 // Parse the pattern
8192 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8193 if (!Pat)
8194 return ParseStatus::NoMatch;
8195
8196 Lex();
8197 Pattern = Pat->Encoding;
8198 assert(Pattern >= 0 && Pattern < 32);
8199 }
8200
8201 Operands.push_back(
8202 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8203 SS, getLoc(), getContext()));
8204
8205 return ParseStatus::Success;
8206}
8207
8209AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8210 int64_t Pattern;
8211 SMLoc SS = getLoc();
8212 const AsmToken &TokE = getTok();
8213 // Parse the pattern
8214 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8215 TokE.getString());
8216 if (!Pat)
8217 return ParseStatus::NoMatch;
8218
8219 Lex();
8220 Pattern = Pat->Encoding;
8221 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8222
8223 Operands.push_back(
8224 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8225 SS, getLoc(), getContext()));
8226
8227 return ParseStatus::Success;
8228}
8229
8230ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8231 SMLoc SS = getLoc();
8232
8233 MCRegister XReg;
8234 if (!tryParseScalarRegister(XReg).isSuccess())
8235 return ParseStatus::NoMatch;
8236
8237 MCContext &ctx = getContext();
8238 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8239 MCRegister X8Reg = RI->getMatchingSuperReg(
8240 XReg, AArch64::x8sub_0,
8241 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8242 if (!X8Reg)
8243 return Error(SS,
8244 "expected an even-numbered x-register in the range [x0,x22]");
8245
8246 Operands.push_back(
8247 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8248 return ParseStatus::Success;
8249}
8250
8251ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8252 SMLoc S = getLoc();
8253
8254 if (getTok().isNot(AsmToken::Integer))
8255 return ParseStatus::NoMatch;
8256
8257 if (getLexer().peekTok().isNot(AsmToken::Colon))
8258 return ParseStatus::NoMatch;
8259
8260 const MCExpr *ImmF;
8261 if (getParser().parseExpression(ImmF))
8262 return ParseStatus::NoMatch;
8263
8264 if (getTok().isNot(AsmToken::Colon))
8265 return ParseStatus::NoMatch;
8266
8267 Lex(); // Eat ':'
8268 if (getTok().isNot(AsmToken::Integer))
8269 return ParseStatus::NoMatch;
8270
8271 SMLoc E = getTok().getLoc();
8272 const MCExpr *ImmL;
8273 if (getParser().parseExpression(ImmL))
8274 return ParseStatus::NoMatch;
8275
8276 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8277 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8278
8279 Operands.push_back(
8280 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8281 return ParseStatus::Success;
8282}
8283
8284template <int Adj>
8285ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8286 SMLoc S = getLoc();
8287
8288 parseOptionalToken(AsmToken::Hash);
8289 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8290
8291 if (getTok().isNot(AsmToken::Integer))
8292 return ParseStatus::NoMatch;
8293
8294 const MCExpr *Ex;
8295 if (getParser().parseExpression(Ex))
8296 return ParseStatus::NoMatch;
8297
8298 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8299 if (IsNegative)
8300 Imm = -Imm;
8301
8302 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8303 // return a value, which is certain to trigger a error message about invalid
8304 // immediate range instead of a non-descriptive invalid operand error.
8305 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8306 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8307 Imm = -2;
8308 else
8309 Imm += Adj;
8310
8311 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8312 Operands.push_back(AArch64Operand::CreateImm(
8313 MCConstantExpr::create(Imm, getContext()), S, E, getContext()));
8314
8315 return ParseStatus::Success;
8316}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1346
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:435
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:432
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:355
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:819
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
unsigned getNumOperands() const
Definition: MCInst.h:209
void setLoc(SMLoc loc)
Definition: MCInst.h:204
unsigned getOpcode() const
Definition: MCInst.h:199
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
int64_t getImm() const
Definition: MCInst.h:81
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
VariantKind getKind() const
Definition: MCExpr.h:413
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
Target specific streamer interface.
Definition: MCStreamer.h:94
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:308
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:609
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:118
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:589
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:815
std::string lower() const
Definition: StringRef.cpp:113
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:616
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:176
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:400
bool isWindowsArm64EC() const
Definition: Triple.h:651
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define INT64_MIN
Definition: DataTypes.h:74
#define INT64_MAX
Definition: DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
const SysReg * lookupSysRegByName(StringRef)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const ArchInfo * parseArch(StringRef Arch)
const ArchInfo * getArchForCpu(StringRef CPU)
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition: COFF.h:844
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:212
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1099
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:170
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
Target & getTheAArch64_32Target()
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
Target & getTheARM64Target()
DWARFExpression::Operation Op
static MCRegister getWRegFromXReg(MCRegister Reg)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired