LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 MCRegister getDstReg() const { return Dst; }
140 MCRegister getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 MCRegister Dst;
150 MCRegister Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
201
202 bool parseDirectiveVariantPCS(SMLoc L);
203
204 bool parseDirectiveSEHAllocStack(SMLoc L);
205 bool parseDirectiveSEHPrologEnd(SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
209 bool parseDirectiveSEHSaveReg(SMLoc L);
210 bool parseDirectiveSEHSaveRegX(SMLoc L);
211 bool parseDirectiveSEHSaveRegP(SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(SMLoc L);
214 bool parseDirectiveSEHSaveFReg(SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
218 bool parseDirectiveSEHSetFP(SMLoc L);
219 bool parseDirectiveSEHAddFP(SMLoc L);
220 bool parseDirectiveSEHNop(SMLoc L);
221 bool parseDirectiveSEHSaveNext(SMLoc L);
222 bool parseDirectiveSEHEpilogStart(SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(SMLoc L);
224 bool parseDirectiveSEHTrapFrame(SMLoc L);
225 bool parseDirectiveSEHMachineFrame(SMLoc L);
226 bool parseDirectiveSEHContext(SMLoc L);
227 bool parseDirectiveSEHECContext(SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
229 bool parseDirectiveSEHPACSignLR(SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
231
232 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
234 unsigned getNumRegsForRegKind(RegKind K);
235 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
238 bool MatchingInlineAsm) override;
239 /// @name Auto-generated Match Functions
240 /// {
241
242#define GET_ASSEMBLER_HEADER
243#include "AArch64GenAsmMatcher.inc"
244
245 /// }
246
247 ParseStatus tryParseScalarRegister(MCRegister &Reg);
248 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
249 RegKind MatchKind);
250 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
251 ParseStatus tryParseSVCR(OperandVector &Operands);
252 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
253 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
254 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
255 ParseStatus tryParseSysReg(OperandVector &Operands);
256 ParseStatus tryParseSysCROperand(OperandVector &Operands);
257 template <bool IsSVEPrefetch = false>
258 ParseStatus tryParsePrefetch(OperandVector &Operands);
259 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
260 ParseStatus tryParsePSBHint(OperandVector &Operands);
261 ParseStatus tryParseBTIHint(OperandVector &Operands);
262 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
263 ParseStatus tryParseAdrLabel(OperandVector &Operands);
264 template <bool AddFPZeroAsLiteral>
265 ParseStatus tryParseFPImm(OperandVector &Operands);
266 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
267 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
268 bool tryParseNeonVectorRegister(OperandVector &Operands);
269 ParseStatus tryParseVectorIndex(OperandVector &Operands);
270 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
271 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
272 template <bool ParseShiftExtend,
273 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
274 ParseStatus tryParseGPROperand(OperandVector &Operands);
275 ParseStatus tryParseZTOperand(OperandVector &Operands);
276 template <bool ParseShiftExtend, bool ParseSuffix>
277 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
278 template <RegKind RK>
279 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
281 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
282 template <RegKind VectorKind>
283 ParseStatus tryParseVectorList(OperandVector &Operands,
284 bool ExpectMatch = false);
285 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
286 ParseStatus tryParseSVEPattern(OperandVector &Operands);
287 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
288 ParseStatus tryParseGPR64x8(OperandVector &Operands);
289 ParseStatus tryParseImmRange(OperandVector &Operands);
290 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
291 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
292
293public:
294 enum AArch64MatchResultTy {
295 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
296#define GET_OPERAND_DIAGNOSTIC_TYPES
297#include "AArch64GenAsmMatcher.inc"
298 };
299 bool IsILP32;
300 bool IsWindowsArm64EC;
301
302 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
303 const MCInstrInfo &MII, const MCTargetOptions &Options)
304 : MCTargetAsmParser(Options, STI, MII) {
306 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
309 if (S.getTargetStreamer() == nullptr)
311
312 // Alias .hword/.word/.[dx]word to the target-independent
313 // .2byte/.4byte/.8byte directives as they have the same form and
314 // semantics:
315 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
316 Parser.addAliasForDirective(".hword", ".2byte");
317 Parser.addAliasForDirective(".word", ".4byte");
318 Parser.addAliasForDirective(".dword", ".8byte");
319 Parser.addAliasForDirective(".xword", ".8byte");
320
321 // Initialize the set of available features.
322 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
323 }
324
325 bool areEqualRegs(const MCParsedAsmOperand &Op1,
326 const MCParsedAsmOperand &Op2) const override;
328 SMLoc NameLoc, OperandVector &Operands) override;
329 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
331 SMLoc &EndLoc) override;
332 bool ParseDirective(AsmToken DirectiveID) override;
334 unsigned Kind) override;
335
336 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
337
338 static bool classifySymbolRef(const MCExpr *Expr,
339 AArch64MCExpr::VariantKind &ELFRefKind,
340 MCSymbolRefExpr::VariantKind &DarwinRefKind,
341 int64_t &Addend);
342};
343
344/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
345/// instruction.
346class AArch64Operand : public MCParsedAsmOperand {
347private:
348 enum KindTy {
349 k_Immediate,
350 k_ShiftedImm,
351 k_ImmRange,
352 k_CondCode,
353 k_Register,
354 k_MatrixRegister,
355 k_MatrixTileList,
356 k_SVCR,
357 k_VectorList,
358 k_VectorIndex,
359 k_Token,
360 k_SysReg,
361 k_SysCR,
362 k_Prefetch,
363 k_ShiftExtend,
364 k_FPImm,
365 k_Barrier,
366 k_PSBHint,
367 k_PHint,
368 k_BTIHint,
369 } Kind;
370
371 SMLoc StartLoc, EndLoc;
372
373 struct TokOp {
374 const char *Data;
375 unsigned Length;
376 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
377 };
378
379 // Separate shift/extend operand.
380 struct ShiftExtendOp {
382 unsigned Amount;
383 bool HasExplicitAmount;
384 };
385
386 struct RegOp {
387 unsigned RegNum;
388 RegKind Kind;
389 int ElementWidth;
390
391 // The register may be allowed as a different register class,
392 // e.g. for GPR64as32 or GPR32as64.
393 RegConstraintEqualityTy EqualityTy;
394
395 // In some cases the shift/extend needs to be explicitly parsed together
396 // with the register, rather than as a separate operand. This is needed
397 // for addressing modes where the instruction as a whole dictates the
398 // scaling/extend, rather than specific bits in the instruction.
399 // By parsing them as a single operand, we avoid the need to pass an
400 // extra operand in all CodeGen patterns (because all operands need to
401 // have an associated value), and we avoid the need to update TableGen to
402 // accept operands that have no associated bits in the instruction.
403 //
404 // An added benefit of parsing them together is that the assembler
405 // can give a sensible diagnostic if the scaling is not correct.
406 //
407 // The default is 'lsl #0' (HasExplicitAmount = false) if no
408 // ShiftExtend is specified.
409 ShiftExtendOp ShiftExtend;
410 };
411
412 struct MatrixRegOp {
413 unsigned RegNum;
414 unsigned ElementWidth;
415 MatrixKind Kind;
416 };
417
418 struct MatrixTileListOp {
419 unsigned RegMask = 0;
420 };
421
422 struct VectorListOp {
423 unsigned RegNum;
424 unsigned Count;
425 unsigned Stride;
426 unsigned NumElements;
427 unsigned ElementWidth;
428 RegKind RegisterKind;
429 };
430
431 struct VectorIndexOp {
432 int Val;
433 };
434
435 struct ImmOp {
436 const MCExpr *Val;
437 };
438
439 struct ShiftedImmOp {
440 const MCExpr *Val;
441 unsigned ShiftAmount;
442 };
443
444 struct ImmRangeOp {
445 unsigned First;
446 unsigned Last;
447 };
448
449 struct CondCodeOp {
451 };
452
453 struct FPImmOp {
454 uint64_t Val; // APFloat value bitcasted to uint64_t.
455 bool IsExact; // describes whether parsed value was exact.
456 };
457
458 struct BarrierOp {
459 const char *Data;
460 unsigned Length;
461 unsigned Val; // Not the enum since not all values have names.
462 bool HasnXSModifier;
463 };
464
465 struct SysRegOp {
466 const char *Data;
467 unsigned Length;
468 uint32_t MRSReg;
469 uint32_t MSRReg;
470 uint32_t PStateField;
471 };
472
473 struct SysCRImmOp {
474 unsigned Val;
475 };
476
477 struct PrefetchOp {
478 const char *Data;
479 unsigned Length;
480 unsigned Val;
481 };
482
483 struct PSBHintOp {
484 const char *Data;
485 unsigned Length;
486 unsigned Val;
487 };
488 struct PHintOp {
489 const char *Data;
490 unsigned Length;
491 unsigned Val;
492 };
493 struct BTIHintOp {
494 const char *Data;
495 unsigned Length;
496 unsigned Val;
497 };
498
499 struct SVCROp {
500 const char *Data;
501 unsigned Length;
502 unsigned PStateField;
503 };
504
505 union {
506 struct TokOp Tok;
507 struct RegOp Reg;
508 struct MatrixRegOp MatrixReg;
509 struct MatrixTileListOp MatrixTileList;
510 struct VectorListOp VectorList;
511 struct VectorIndexOp VectorIndex;
512 struct ImmOp Imm;
513 struct ShiftedImmOp ShiftedImm;
514 struct ImmRangeOp ImmRange;
515 struct CondCodeOp CondCode;
516 struct FPImmOp FPImm;
517 struct BarrierOp Barrier;
518 struct SysRegOp SysReg;
519 struct SysCRImmOp SysCRImm;
520 struct PrefetchOp Prefetch;
521 struct PSBHintOp PSBHint;
522 struct PHintOp PHint;
523 struct BTIHintOp BTIHint;
524 struct ShiftExtendOp ShiftExtend;
525 struct SVCROp SVCR;
526 };
527
528 // Keep the MCContext around as the MCExprs may need manipulated during
529 // the add<>Operands() calls.
530 MCContext &Ctx;
531
532public:
533 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
534
535 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
536 Kind = o.Kind;
537 StartLoc = o.StartLoc;
538 EndLoc = o.EndLoc;
539 switch (Kind) {
540 case k_Token:
541 Tok = o.Tok;
542 break;
543 case k_Immediate:
544 Imm = o.Imm;
545 break;
546 case k_ShiftedImm:
547 ShiftedImm = o.ShiftedImm;
548 break;
549 case k_ImmRange:
550 ImmRange = o.ImmRange;
551 break;
552 case k_CondCode:
553 CondCode = o.CondCode;
554 break;
555 case k_FPImm:
556 FPImm = o.FPImm;
557 break;
558 case k_Barrier:
559 Barrier = o.Barrier;
560 break;
561 case k_Register:
562 Reg = o.Reg;
563 break;
564 case k_MatrixRegister:
565 MatrixReg = o.MatrixReg;
566 break;
567 case k_MatrixTileList:
568 MatrixTileList = o.MatrixTileList;
569 break;
570 case k_VectorList:
571 VectorList = o.VectorList;
572 break;
573 case k_VectorIndex:
574 VectorIndex = o.VectorIndex;
575 break;
576 case k_SysReg:
577 SysReg = o.SysReg;
578 break;
579 case k_SysCR:
580 SysCRImm = o.SysCRImm;
581 break;
582 case k_Prefetch:
583 Prefetch = o.Prefetch;
584 break;
585 case k_PSBHint:
586 PSBHint = o.PSBHint;
587 break;
588 case k_PHint:
589 PHint = o.PHint;
590 break;
591 case k_BTIHint:
592 BTIHint = o.BTIHint;
593 break;
594 case k_ShiftExtend:
595 ShiftExtend = o.ShiftExtend;
596 break;
597 case k_SVCR:
598 SVCR = o.SVCR;
599 break;
600 }
601 }
602
603 /// getStartLoc - Get the location of the first token of this operand.
604 SMLoc getStartLoc() const override { return StartLoc; }
605 /// getEndLoc - Get the location of the last token of this operand.
606 SMLoc getEndLoc() const override { return EndLoc; }
607
608 StringRef getToken() const {
609 assert(Kind == k_Token && "Invalid access!");
610 return StringRef(Tok.Data, Tok.Length);
611 }
612
613 bool isTokenSuffix() const {
614 assert(Kind == k_Token && "Invalid access!");
615 return Tok.IsSuffix;
616 }
617
618 const MCExpr *getImm() const {
619 assert(Kind == k_Immediate && "Invalid access!");
620 return Imm.Val;
621 }
622
623 const MCExpr *getShiftedImmVal() const {
624 assert(Kind == k_ShiftedImm && "Invalid access!");
625 return ShiftedImm.Val;
626 }
627
628 unsigned getShiftedImmShift() const {
629 assert(Kind == k_ShiftedImm && "Invalid access!");
630 return ShiftedImm.ShiftAmount;
631 }
632
633 unsigned getFirstImmVal() const {
634 assert(Kind == k_ImmRange && "Invalid access!");
635 return ImmRange.First;
636 }
637
638 unsigned getLastImmVal() const {
639 assert(Kind == k_ImmRange && "Invalid access!");
640 return ImmRange.Last;
641 }
642
644 assert(Kind == k_CondCode && "Invalid access!");
645 return CondCode.Code;
646 }
647
648 APFloat getFPImm() const {
649 assert (Kind == k_FPImm && "Invalid access!");
650 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
651 }
652
653 bool getFPImmIsExact() const {
654 assert (Kind == k_FPImm && "Invalid access!");
655 return FPImm.IsExact;
656 }
657
658 unsigned getBarrier() const {
659 assert(Kind == k_Barrier && "Invalid access!");
660 return Barrier.Val;
661 }
662
663 StringRef getBarrierName() const {
664 assert(Kind == k_Barrier && "Invalid access!");
665 return StringRef(Barrier.Data, Barrier.Length);
666 }
667
668 bool getBarriernXSModifier() const {
669 assert(Kind == k_Barrier && "Invalid access!");
670 return Barrier.HasnXSModifier;
671 }
672
673 MCRegister getReg() const override {
674 assert(Kind == k_Register && "Invalid access!");
675 return Reg.RegNum;
676 }
677
678 unsigned getMatrixReg() const {
679 assert(Kind == k_MatrixRegister && "Invalid access!");
680 return MatrixReg.RegNum;
681 }
682
683 unsigned getMatrixElementWidth() const {
684 assert(Kind == k_MatrixRegister && "Invalid access!");
685 return MatrixReg.ElementWidth;
686 }
687
688 MatrixKind getMatrixKind() const {
689 assert(Kind == k_MatrixRegister && "Invalid access!");
690 return MatrixReg.Kind;
691 }
692
693 unsigned getMatrixTileListRegMask() const {
694 assert(isMatrixTileList() && "Invalid access!");
695 return MatrixTileList.RegMask;
696 }
697
698 RegConstraintEqualityTy getRegEqualityTy() const {
699 assert(Kind == k_Register && "Invalid access!");
700 return Reg.EqualityTy;
701 }
702
703 unsigned getVectorListStart() const {
704 assert(Kind == k_VectorList && "Invalid access!");
705 return VectorList.RegNum;
706 }
707
708 unsigned getVectorListCount() const {
709 assert(Kind == k_VectorList && "Invalid access!");
710 return VectorList.Count;
711 }
712
713 unsigned getVectorListStride() const {
714 assert(Kind == k_VectorList && "Invalid access!");
715 return VectorList.Stride;
716 }
717
718 int getVectorIndex() const {
719 assert(Kind == k_VectorIndex && "Invalid access!");
720 return VectorIndex.Val;
721 }
722
723 StringRef getSysReg() const {
724 assert(Kind == k_SysReg && "Invalid access!");
725 return StringRef(SysReg.Data, SysReg.Length);
726 }
727
728 unsigned getSysCR() const {
729 assert(Kind == k_SysCR && "Invalid access!");
730 return SysCRImm.Val;
731 }
732
733 unsigned getPrefetch() const {
734 assert(Kind == k_Prefetch && "Invalid access!");
735 return Prefetch.Val;
736 }
737
738 unsigned getPSBHint() const {
739 assert(Kind == k_PSBHint && "Invalid access!");
740 return PSBHint.Val;
741 }
742
743 unsigned getPHint() const {
744 assert(Kind == k_PHint && "Invalid access!");
745 return PHint.Val;
746 }
747
748 StringRef getPSBHintName() const {
749 assert(Kind == k_PSBHint && "Invalid access!");
750 return StringRef(PSBHint.Data, PSBHint.Length);
751 }
752
753 StringRef getPHintName() const {
754 assert(Kind == k_PHint && "Invalid access!");
755 return StringRef(PHint.Data, PHint.Length);
756 }
757
758 unsigned getBTIHint() const {
759 assert(Kind == k_BTIHint && "Invalid access!");
760 return BTIHint.Val;
761 }
762
763 StringRef getBTIHintName() const {
764 assert(Kind == k_BTIHint && "Invalid access!");
765 return StringRef(BTIHint.Data, BTIHint.Length);
766 }
767
768 StringRef getSVCR() const {
769 assert(Kind == k_SVCR && "Invalid access!");
770 return StringRef(SVCR.Data, SVCR.Length);
771 }
772
773 StringRef getPrefetchName() const {
774 assert(Kind == k_Prefetch && "Invalid access!");
775 return StringRef(Prefetch.Data, Prefetch.Length);
776 }
777
778 AArch64_AM::ShiftExtendType getShiftExtendType() const {
779 if (Kind == k_ShiftExtend)
780 return ShiftExtend.Type;
781 if (Kind == k_Register)
782 return Reg.ShiftExtend.Type;
783 llvm_unreachable("Invalid access!");
784 }
785
786 unsigned getShiftExtendAmount() const {
787 if (Kind == k_ShiftExtend)
788 return ShiftExtend.Amount;
789 if (Kind == k_Register)
790 return Reg.ShiftExtend.Amount;
791 llvm_unreachable("Invalid access!");
792 }
793
794 bool hasShiftExtendAmount() const {
795 if (Kind == k_ShiftExtend)
796 return ShiftExtend.HasExplicitAmount;
797 if (Kind == k_Register)
798 return Reg.ShiftExtend.HasExplicitAmount;
799 llvm_unreachable("Invalid access!");
800 }
801
802 bool isImm() const override { return Kind == k_Immediate; }
803 bool isMem() const override { return false; }
804
805 bool isUImm6() const {
806 if (!isImm())
807 return false;
808 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
809 if (!MCE)
810 return false;
811 int64_t Val = MCE->getValue();
812 return (Val >= 0 && Val < 64);
813 }
814
815 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
816
817 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
818 return isImmScaled<Bits, Scale>(true);
819 }
820
821 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
822 DiagnosticPredicate isUImmScaled() const {
823 if (IsRange && isImmRange() &&
824 (getLastImmVal() != getFirstImmVal() + Offset))
825 return DiagnosticPredicateTy::NoMatch;
826
827 return isImmScaled<Bits, Scale, IsRange>(false);
828 }
829
830 template <int Bits, int Scale, bool IsRange = false>
831 DiagnosticPredicate isImmScaled(bool Signed) const {
832 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
833 (isImmRange() && !IsRange))
834 return DiagnosticPredicateTy::NoMatch;
835
836 int64_t Val;
837 if (isImmRange())
838 Val = getFirstImmVal();
839 else {
840 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
841 if (!MCE)
842 return DiagnosticPredicateTy::NoMatch;
843 Val = MCE->getValue();
844 }
845
846 int64_t MinVal, MaxVal;
847 if (Signed) {
848 int64_t Shift = Bits - 1;
849 MinVal = (int64_t(1) << Shift) * -Scale;
850 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
851 } else {
852 MinVal = 0;
853 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
854 }
855
856 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
857 return DiagnosticPredicateTy::Match;
858
859 return DiagnosticPredicateTy::NearMatch;
860 }
861
862 DiagnosticPredicate isSVEPattern() const {
863 if (!isImm())
864 return DiagnosticPredicateTy::NoMatch;
865 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
866 if (!MCE)
867 return DiagnosticPredicateTy::NoMatch;
868 int64_t Val = MCE->getValue();
869 if (Val >= 0 && Val < 32)
870 return DiagnosticPredicateTy::Match;
871 return DiagnosticPredicateTy::NearMatch;
872 }
873
874 DiagnosticPredicate isSVEVecLenSpecifier() const {
875 if (!isImm())
876 return DiagnosticPredicateTy::NoMatch;
877 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
878 if (!MCE)
879 return DiagnosticPredicateTy::NoMatch;
880 int64_t Val = MCE->getValue();
881 if (Val >= 0 && Val <= 1)
882 return DiagnosticPredicateTy::Match;
883 return DiagnosticPredicateTy::NearMatch;
884 }
885
886 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
888 MCSymbolRefExpr::VariantKind DarwinRefKind;
889 int64_t Addend;
890 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
891 Addend)) {
892 // If we don't understand the expression, assume the best and
893 // let the fixup and relocation code deal with it.
894 return true;
895 }
896
897 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
898 ELFRefKind == AArch64MCExpr::VK_LO12 ||
899 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
900 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
901 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
902 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
903 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
904 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
906 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
908 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
909 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
910 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
911 // Note that we don't range-check the addend. It's adjusted modulo page
912 // size when converted, so there is no "out of range" condition when using
913 // @pageoff.
914 return true;
915 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
916 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
917 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
918 return Addend == 0;
919 }
920
921 return false;
922 }
923
924 template <int Scale> bool isUImm12Offset() const {
925 if (!isImm())
926 return false;
927
928 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
929 if (!MCE)
930 return isSymbolicUImm12Offset(getImm());
931
932 int64_t Val = MCE->getValue();
933 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
934 }
935
936 template <int N, int M>
937 bool isImmInRange() const {
938 if (!isImm())
939 return false;
940 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941 if (!MCE)
942 return false;
943 int64_t Val = MCE->getValue();
944 return (Val >= N && Val <= M);
945 }
946
947 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
948 // a logical immediate can always be represented when inverted.
949 template <typename T>
950 bool isLogicalImm() const {
951 if (!isImm())
952 return false;
953 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
954 if (!MCE)
955 return false;
956
957 int64_t Val = MCE->getValue();
958 // Avoid left shift by 64 directly.
959 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
960 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
961 if ((Val & Upper) && (Val & Upper) != Upper)
962 return false;
963
964 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
965 }
966
967 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
968
969 bool isImmRange() const { return Kind == k_ImmRange; }
970
971 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
972 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
973 /// immediate that can be shifted by 'Shift'.
974 template <unsigned Width>
975 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
976 if (isShiftedImm() && Width == getShiftedImmShift())
977 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
978 return std::make_pair(CE->getValue(), Width);
979
980 if (isImm())
981 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
982 int64_t Val = CE->getValue();
983 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
984 return std::make_pair(Val >> Width, Width);
985 else
986 return std::make_pair(Val, 0u);
987 }
988
989 return {};
990 }
991
992 bool isAddSubImm() const {
993 if (!isShiftedImm() && !isImm())
994 return false;
995
996 const MCExpr *Expr;
997
998 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
999 if (isShiftedImm()) {
1000 unsigned Shift = ShiftedImm.ShiftAmount;
1001 Expr = ShiftedImm.Val;
1002 if (Shift != 0 && Shift != 12)
1003 return false;
1004 } else {
1005 Expr = getImm();
1006 }
1007
1008 AArch64MCExpr::VariantKind ELFRefKind;
1009 MCSymbolRefExpr::VariantKind DarwinRefKind;
1010 int64_t Addend;
1011 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1012 DarwinRefKind, Addend)) {
1013 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1014 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
1015 (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
1016 ELFRefKind == AArch64MCExpr::VK_LO12 ||
1017 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
1018 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
1019 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
1020 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
1021 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
1022 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
1023 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
1024 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
1026 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
1027 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1028 }
1029
1030 // If it's a constant, it should be a real immediate in range.
1031 if (auto ShiftedVal = getShiftedVal<12>())
1032 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1033
1034 // If it's an expression, we hope for the best and let the fixup/relocation
1035 // code deal with it.
1036 return true;
1037 }
1038
1039 bool isAddSubImmNeg() const {
1040 if (!isShiftedImm() && !isImm())
1041 return false;
1042
1043 // Otherwise it should be a real negative immediate in range.
1044 if (auto ShiftedVal = getShiftedVal<12>())
1045 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1046
1047 return false;
1048 }
1049
1050 // Signed value in the range -128 to +127. For element widths of
1051 // 16 bits or higher it may also be a signed multiple of 256 in the
1052 // range -32768 to +32512.
1053 // For element-width of 8 bits a range of -128 to 255 is accepted,
1054 // since a copy of a byte can be either signed/unsigned.
1055 template <typename T>
1056 DiagnosticPredicate isSVECpyImm() const {
1057 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1058 return DiagnosticPredicateTy::NoMatch;
1059
1060 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1061 std::is_same<int8_t, T>::value;
1062 if (auto ShiftedImm = getShiftedVal<8>())
1063 if (!(IsByte && ShiftedImm->second) &&
1064 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1065 << ShiftedImm->second))
1066 return DiagnosticPredicateTy::Match;
1067
1068 return DiagnosticPredicateTy::NearMatch;
1069 }
1070
1071 // Unsigned value in the range 0 to 255. For element widths of
1072 // 16 bits or higher it may also be a signed multiple of 256 in the
1073 // range 0 to 65280.
1074 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1075 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1076 return DiagnosticPredicateTy::NoMatch;
1077
1078 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1079 std::is_same<int8_t, T>::value;
1080 if (auto ShiftedImm = getShiftedVal<8>())
1081 if (!(IsByte && ShiftedImm->second) &&
1082 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1083 << ShiftedImm->second))
1084 return DiagnosticPredicateTy::Match;
1085
1086 return DiagnosticPredicateTy::NearMatch;
1087 }
1088
1089 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1090 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1091 return DiagnosticPredicateTy::Match;
1092 return DiagnosticPredicateTy::NoMatch;
1093 }
1094
1095 bool isCondCode() const { return Kind == k_CondCode; }
1096
1097 bool isSIMDImmType10() const {
1098 if (!isImm())
1099 return false;
1100 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1101 if (!MCE)
1102 return false;
1104 }
1105
1106 template<int N>
1107 bool isBranchTarget() const {
1108 if (!isImm())
1109 return false;
1110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1111 if (!MCE)
1112 return true;
1113 int64_t Val = MCE->getValue();
1114 if (Val & 0x3)
1115 return false;
1116 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1117 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1118 }
1119
1120 bool
1121 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1122 if (!isImm())
1123 return false;
1124
1125 AArch64MCExpr::VariantKind ELFRefKind;
1126 MCSymbolRefExpr::VariantKind DarwinRefKind;
1127 int64_t Addend;
1128 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1129 DarwinRefKind, Addend)) {
1130 return false;
1131 }
1132 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1133 return false;
1134
1135 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1136 }
1137
1138 bool isMovWSymbolG3() const {
1140 }
1141
1142 bool isMovWSymbolG2() const {
1143 return isMovWSymbol(
1148 }
1149
1150 bool isMovWSymbolG1() const {
1151 return isMovWSymbol(
1157 }
1158
1159 bool isMovWSymbolG0() const {
1160 return isMovWSymbol(
1166 }
1167
1168 template<int RegWidth, int Shift>
1169 bool isMOVZMovAlias() const {
1170 if (!isImm()) return false;
1171
1172 const MCExpr *E = getImm();
1173 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1174 uint64_t Value = CE->getValue();
1175
1176 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1177 }
1178 // Only supports the case of Shift being 0 if an expression is used as an
1179 // operand
1180 return !Shift && E;
1181 }
1182
1183 template<int RegWidth, int Shift>
1184 bool isMOVNMovAlias() const {
1185 if (!isImm()) return false;
1186
1187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188 if (!CE) return false;
1189 uint64_t Value = CE->getValue();
1190
1191 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1192 }
1193
1194 bool isFPImm() const {
1195 return Kind == k_FPImm &&
1196 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1197 }
1198
1199 bool isBarrier() const {
1200 return Kind == k_Barrier && !getBarriernXSModifier();
1201 }
1202 bool isBarriernXS() const {
1203 return Kind == k_Barrier && getBarriernXSModifier();
1204 }
1205 bool isSysReg() const { return Kind == k_SysReg; }
1206
1207 bool isMRSSystemRegister() const {
1208 if (!isSysReg()) return false;
1209
1210 return SysReg.MRSReg != -1U;
1211 }
1212
1213 bool isMSRSystemRegister() const {
1214 if (!isSysReg()) return false;
1215 return SysReg.MSRReg != -1U;
1216 }
1217
1218 bool isSystemPStateFieldWithImm0_1() const {
1219 if (!isSysReg()) return false;
1220 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1221 }
1222
1223 bool isSystemPStateFieldWithImm0_15() const {
1224 if (!isSysReg())
1225 return false;
1226 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1227 }
1228
1229 bool isSVCR() const {
1230 if (Kind != k_SVCR)
1231 return false;
1232 return SVCR.PStateField != -1U;
1233 }
1234
1235 bool isReg() const override {
1236 return Kind == k_Register;
1237 }
1238
1239 bool isVectorList() const { return Kind == k_VectorList; }
1240
1241 bool isScalarReg() const {
1242 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1243 }
1244
1245 bool isNeonVectorReg() const {
1246 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1247 }
1248
1249 bool isNeonVectorRegLo() const {
1250 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1251 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1252 Reg.RegNum) ||
1253 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1254 Reg.RegNum));
1255 }
1256
1257 bool isNeonVectorReg0to7() const {
1258 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1259 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1260 Reg.RegNum));
1261 }
1262
1263 bool isMatrix() const { return Kind == k_MatrixRegister; }
1264 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1265
1266 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1267 RegKind RK;
1268 switch (Class) {
1269 case AArch64::PPRRegClassID:
1270 case AArch64::PPR_3bRegClassID:
1271 case AArch64::PPR_p8to15RegClassID:
1272 case AArch64::PNRRegClassID:
1273 case AArch64::PNR_p8to15RegClassID:
1274 case AArch64::PPRorPNRRegClassID:
1275 RK = RegKind::SVEPredicateAsCounter;
1276 break;
1277 default:
1278 llvm_unreachable("Unsupport register class");
1279 }
1280
1281 return (Kind == k_Register && Reg.Kind == RK) &&
1282 AArch64MCRegisterClasses[Class].contains(getReg());
1283 }
1284
1285 template <unsigned Class> bool isSVEVectorReg() const {
1286 RegKind RK;
1287 switch (Class) {
1288 case AArch64::ZPRRegClassID:
1289 case AArch64::ZPR_3bRegClassID:
1290 case AArch64::ZPR_4bRegClassID:
1291 case AArch64::ZPRMul2_LoRegClassID:
1292 case AArch64::ZPRMul2_HiRegClassID:
1293 case AArch64::ZPR_KRegClassID:
1294 RK = RegKind::SVEDataVector;
1295 break;
1296 case AArch64::PPRRegClassID:
1297 case AArch64::PPR_3bRegClassID:
1298 case AArch64::PPR_p8to15RegClassID:
1299 case AArch64::PNRRegClassID:
1300 case AArch64::PNR_p8to15RegClassID:
1301 case AArch64::PPRorPNRRegClassID:
1302 RK = RegKind::SVEPredicateVector;
1303 break;
1304 default:
1305 llvm_unreachable("Unsupport register class");
1306 }
1307
1308 return (Kind == k_Register && Reg.Kind == RK) &&
1309 AArch64MCRegisterClasses[Class].contains(getReg());
1310 }
1311
1312 template <unsigned Class> bool isFPRasZPR() const {
1313 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1314 AArch64MCRegisterClasses[Class].contains(getReg());
1315 }
1316
1317 template <int ElementWidth, unsigned Class>
1318 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1319 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1320 return DiagnosticPredicateTy::NoMatch;
1321
1322 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1323 return DiagnosticPredicateTy::Match;
1324
1325 return DiagnosticPredicateTy::NearMatch;
1326 }
1327
1328 template <int ElementWidth, unsigned Class>
1329 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1330 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1331 Reg.Kind != RegKind::SVEPredicateVector))
1332 return DiagnosticPredicateTy::NoMatch;
1333
1334 if ((isSVEPredicateAsCounterReg<Class>() ||
1335 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1336 Reg.ElementWidth == ElementWidth)
1337 return DiagnosticPredicateTy::Match;
1338
1339 return DiagnosticPredicateTy::NearMatch;
1340 }
1341
1342 template <int ElementWidth, unsigned Class>
1343 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1344 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1345 return DiagnosticPredicateTy::NoMatch;
1346
1347 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1348 return DiagnosticPredicateTy::Match;
1349
1350 return DiagnosticPredicateTy::NearMatch;
1351 }
1352
1353 template <int ElementWidth, unsigned Class>
1354 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1355 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1356 return DiagnosticPredicateTy::NoMatch;
1357
1358 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1359 return DiagnosticPredicateTy::Match;
1360
1361 return DiagnosticPredicateTy::NearMatch;
1362 }
1363
1364 template <int ElementWidth, unsigned Class,
1365 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1366 bool ShiftWidthAlwaysSame>
1367 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1368 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1369 if (!VectorMatch.isMatch())
1370 return DiagnosticPredicateTy::NoMatch;
1371
1372 // Give a more specific diagnostic when the user has explicitly typed in
1373 // a shift-amount that does not match what is expected, but for which
1374 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1375 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1376 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1377 ShiftExtendTy == AArch64_AM::SXTW) &&
1378 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1379 return DiagnosticPredicateTy::NoMatch;
1380
1381 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1382 return DiagnosticPredicateTy::Match;
1383
1384 return DiagnosticPredicateTy::NearMatch;
1385 }
1386
1387 bool isGPR32as64() const {
1388 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1389 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1390 }
1391
1392 bool isGPR64as32() const {
1393 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1394 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1395 }
1396
1397 bool isGPR64x8() const {
1398 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1399 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1400 Reg.RegNum);
1401 }
1402
1403 bool isWSeqPair() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1406 Reg.RegNum);
1407 }
1408
1409 bool isXSeqPair() const {
1410 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1411 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1412 Reg.RegNum);
1413 }
1414
1415 bool isSyspXzrPair() const {
1416 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1417 }
1418
1419 template<int64_t Angle, int64_t Remainder>
1420 DiagnosticPredicate isComplexRotation() const {
1421 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1422
1423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1424 if (!CE) return DiagnosticPredicateTy::NoMatch;
1425 uint64_t Value = CE->getValue();
1426
1427 if (Value % Angle == Remainder && Value <= 270)
1428 return DiagnosticPredicateTy::Match;
1429 return DiagnosticPredicateTy::NearMatch;
1430 }
1431
1432 template <unsigned RegClassID> bool isGPR64() const {
1433 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1434 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1435 }
1436
1437 template <unsigned RegClassID, int ExtWidth>
1438 DiagnosticPredicate isGPR64WithShiftExtend() const {
1439 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1440 return DiagnosticPredicateTy::NoMatch;
1441
1442 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1443 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1444 return DiagnosticPredicateTy::Match;
1445 return DiagnosticPredicateTy::NearMatch;
1446 }
1447
1448 /// Is this a vector list with the type implicit (presumably attached to the
1449 /// instruction itself)?
1450 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1451 bool isImplicitlyTypedVectorList() const {
1452 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1453 VectorList.NumElements == 0 &&
1454 VectorList.RegisterKind == VectorKind &&
1455 (!IsConsecutive || (VectorList.Stride == 1));
1456 }
1457
1458 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1459 unsigned ElementWidth, unsigned Stride = 1>
1460 bool isTypedVectorList() const {
1461 if (Kind != k_VectorList)
1462 return false;
1463 if (VectorList.Count != NumRegs)
1464 return false;
1465 if (VectorList.RegisterKind != VectorKind)
1466 return false;
1467 if (VectorList.ElementWidth != ElementWidth)
1468 return false;
1469 if (VectorList.Stride != Stride)
1470 return false;
1471 return VectorList.NumElements == NumElements;
1472 }
1473
1474 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1475 unsigned ElementWidth, unsigned RegClass>
1476 DiagnosticPredicate isTypedVectorListMultiple() const {
1477 bool Res =
1478 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1479 if (!Res)
1480 return DiagnosticPredicateTy::NoMatch;
1481 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1482 return DiagnosticPredicateTy::NearMatch;
1483 return DiagnosticPredicateTy::Match;
1484 }
1485
1486 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1487 unsigned ElementWidth>
1488 DiagnosticPredicate isTypedVectorListStrided() const {
1489 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1490 ElementWidth, Stride>();
1491 if (!Res)
1492 return DiagnosticPredicateTy::NoMatch;
1493 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1494 ((VectorList.RegNum >= AArch64::Z16) &&
1495 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1496 return DiagnosticPredicateTy::Match;
1497 return DiagnosticPredicateTy::NoMatch;
1498 }
1499
1500 template <int Min, int Max>
1501 DiagnosticPredicate isVectorIndex() const {
1502 if (Kind != k_VectorIndex)
1503 return DiagnosticPredicateTy::NoMatch;
1504 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1505 return DiagnosticPredicateTy::Match;
1506 return DiagnosticPredicateTy::NearMatch;
1507 }
1508
1509 bool isToken() const override { return Kind == k_Token; }
1510
1511 bool isTokenEqual(StringRef Str) const {
1512 return Kind == k_Token && getToken() == Str;
1513 }
1514 bool isSysCR() const { return Kind == k_SysCR; }
1515 bool isPrefetch() const { return Kind == k_Prefetch; }
1516 bool isPSBHint() const { return Kind == k_PSBHint; }
1517 bool isPHint() const { return Kind == k_PHint; }
1518 bool isBTIHint() const { return Kind == k_BTIHint; }
1519 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1520 bool isShifter() const {
1521 if (!isShiftExtend())
1522 return false;
1523
1524 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1525 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1526 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1527 ST == AArch64_AM::MSL);
1528 }
1529
1530 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1531 if (Kind != k_FPImm)
1532 return DiagnosticPredicateTy::NoMatch;
1533
1534 if (getFPImmIsExact()) {
1535 // Lookup the immediate from table of supported immediates.
1536 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1537 assert(Desc && "Unknown enum value");
1538
1539 // Calculate its FP value.
1540 APFloat RealVal(APFloat::IEEEdouble());
1541 auto StatusOrErr =
1542 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1543 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1544 llvm_unreachable("FP immediate is not exact");
1545
1546 if (getFPImm().bitwiseIsEqual(RealVal))
1547 return DiagnosticPredicateTy::Match;
1548 }
1549
1550 return DiagnosticPredicateTy::NearMatch;
1551 }
1552
1553 template <unsigned ImmA, unsigned ImmB>
1554 DiagnosticPredicate isExactFPImm() const {
1555 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1556 if ((Res = isExactFPImm<ImmA>()))
1557 return DiagnosticPredicateTy::Match;
1558 if ((Res = isExactFPImm<ImmB>()))
1559 return DiagnosticPredicateTy::Match;
1560 return Res;
1561 }
1562
1563 bool isExtend() const {
1564 if (!isShiftExtend())
1565 return false;
1566
1567 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1568 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1569 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1570 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1571 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1572 ET == AArch64_AM::LSL) &&
1573 getShiftExtendAmount() <= 4;
1574 }
1575
1576 bool isExtend64() const {
1577 if (!isExtend())
1578 return false;
1579 // Make sure the extend expects a 32-bit source register.
1580 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1581 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1582 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1583 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1584 }
1585
1586 bool isExtendLSL64() const {
1587 if (!isExtend())
1588 return false;
1589 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1590 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1591 ET == AArch64_AM::LSL) &&
1592 getShiftExtendAmount() <= 4;
1593 }
1594
1595 bool isLSLImm3Shift() const {
1596 if (!isShiftExtend())
1597 return false;
1598 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1599 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1600 }
1601
1602 template<int Width> bool isMemXExtend() const {
1603 if (!isExtend())
1604 return false;
1605 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1606 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1607 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1608 getShiftExtendAmount() == 0);
1609 }
1610
1611 template<int Width> bool isMemWExtend() const {
1612 if (!isExtend())
1613 return false;
1614 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1615 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1616 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1617 getShiftExtendAmount() == 0);
1618 }
1619
1620 template <unsigned width>
1621 bool isArithmeticShifter() const {
1622 if (!isShifter())
1623 return false;
1624
1625 // An arithmetic shifter is LSL, LSR, or ASR.
1626 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1627 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1628 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1629 }
1630
1631 template <unsigned width>
1632 bool isLogicalShifter() const {
1633 if (!isShifter())
1634 return false;
1635
1636 // A logical shifter is LSL, LSR, ASR or ROR.
1637 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1638 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1639 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1640 getShiftExtendAmount() < width;
1641 }
1642
1643 bool isMovImm32Shifter() const {
1644 if (!isShifter())
1645 return false;
1646
1647 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1648 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1649 if (ST != AArch64_AM::LSL)
1650 return false;
1651 uint64_t Val = getShiftExtendAmount();
1652 return (Val == 0 || Val == 16);
1653 }
1654
1655 bool isMovImm64Shifter() const {
1656 if (!isShifter())
1657 return false;
1658
1659 // A MOVi shifter is LSL of 0 or 16.
1660 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1661 if (ST != AArch64_AM::LSL)
1662 return false;
1663 uint64_t Val = getShiftExtendAmount();
1664 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1665 }
1666
1667 bool isLogicalVecShifter() const {
1668 if (!isShifter())
1669 return false;
1670
1671 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1672 unsigned Shift = getShiftExtendAmount();
1673 return getShiftExtendType() == AArch64_AM::LSL &&
1674 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1675 }
1676
1677 bool isLogicalVecHalfWordShifter() const {
1678 if (!isLogicalVecShifter())
1679 return false;
1680
1681 // A logical vector shifter is a left shift by 0 or 8.
1682 unsigned Shift = getShiftExtendAmount();
1683 return getShiftExtendType() == AArch64_AM::LSL &&
1684 (Shift == 0 || Shift == 8);
1685 }
1686
1687 bool isMoveVecShifter() const {
1688 if (!isShiftExtend())
1689 return false;
1690
1691 // A logical vector shifter is a left shift by 8 or 16.
1692 unsigned Shift = getShiftExtendAmount();
1693 return getShiftExtendType() == AArch64_AM::MSL &&
1694 (Shift == 8 || Shift == 16);
1695 }
1696
1697 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1698 // to LDUR/STUR when the offset is not legal for the former but is for
1699 // the latter. As such, in addition to checking for being a legal unscaled
1700 // address, also check that it is not a legal scaled address. This avoids
1701 // ambiguity in the matcher.
1702 template<int Width>
1703 bool isSImm9OffsetFB() const {
1704 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1705 }
1706
1707 bool isAdrpLabel() const {
1708 // Validation was handled during parsing, so we just verify that
1709 // something didn't go haywire.
1710 if (!isImm())
1711 return false;
1712
1713 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1714 int64_t Val = CE->getValue();
1715 int64_t Min = - (4096 * (1LL << (21 - 1)));
1716 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1717 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1718 }
1719
1720 return true;
1721 }
1722
1723 bool isAdrLabel() const {
1724 // Validation was handled during parsing, so we just verify that
1725 // something didn't go haywire.
1726 if (!isImm())
1727 return false;
1728
1729 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1730 int64_t Val = CE->getValue();
1731 int64_t Min = - (1LL << (21 - 1));
1732 int64_t Max = ((1LL << (21 - 1)) - 1);
1733 return Val >= Min && Val <= Max;
1734 }
1735
1736 return true;
1737 }
1738
1739 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1740 DiagnosticPredicate isMatrixRegOperand() const {
1741 if (!isMatrix())
1742 return DiagnosticPredicateTy::NoMatch;
1743 if (getMatrixKind() != Kind ||
1744 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1745 EltSize != getMatrixElementWidth())
1746 return DiagnosticPredicateTy::NearMatch;
1747 return DiagnosticPredicateTy::Match;
1748 }
1749
1750 bool isPAuthPCRelLabel16Operand() const {
1751 // PAuth PCRel16 operands are similar to regular branch targets, but only
1752 // negative values are allowed for concrete immediates as signing instr
1753 // should be in a lower address.
1754 if (!isImm())
1755 return false;
1756 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1757 if (!MCE)
1758 return true;
1759 int64_t Val = MCE->getValue();
1760 if (Val & 0b11)
1761 return false;
1762 return (Val <= 0) && (Val > -(1 << 18));
1763 }
1764
1765 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1766 // Add as immediates when possible. Null MCExpr = 0.
1767 if (!Expr)
1769 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1770 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1771 else
1773 }
1774
1775 void addRegOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!");
1778 }
1779
1780 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1783 }
1784
1785 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1786 assert(N == 1 && "Invalid number of operands!");
1787 assert(
1788 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1789
1790 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1791 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1792 RI->getEncodingValue(getReg()));
1793
1795 }
1796
1797 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1798 assert(N == 1 && "Invalid number of operands!");
1799 assert(
1800 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1801
1802 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1803 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1804 RI->getEncodingValue(getReg()));
1805
1807 }
1808
1809 template <int Width>
1810 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1811 unsigned Base;
1812 switch (Width) {
1813 case 8: Base = AArch64::B0; break;
1814 case 16: Base = AArch64::H0; break;
1815 case 32: Base = AArch64::S0; break;
1816 case 64: Base = AArch64::D0; break;
1817 case 128: Base = AArch64::Q0; break;
1818 default:
1819 llvm_unreachable("Unsupported width");
1820 }
1821 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1822 }
1823
1824 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1825 assert(N == 1 && "Invalid number of operands!");
1826 unsigned Reg = getReg();
1827 // Normalise to PPR
1828 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1829 Reg = Reg - AArch64::PN0 + AArch64::P0;
1831 }
1832
1833 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1834 assert(N == 1 && "Invalid number of operands!");
1835 Inst.addOperand(
1836 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1837 }
1838
1839 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1840 assert(N == 1 && "Invalid number of operands!");
1841 assert(
1842 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1843 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1844 }
1845
1846 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1847 assert(N == 1 && "Invalid number of operands!");
1848 assert(
1849 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1851 }
1852
1853 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!");
1856 }
1857
1858 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1859 assert(N == 1 && "Invalid number of operands!");
1861 }
1862
1863 enum VecListIndexType {
1864 VecListIdx_DReg = 0,
1865 VecListIdx_QReg = 1,
1866 VecListIdx_ZReg = 2,
1867 VecListIdx_PReg = 3,
1868 };
1869
1870 template <VecListIndexType RegTy, unsigned NumRegs,
1871 bool IsConsecutive = false>
1872 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1873 assert(N == 1 && "Invalid number of operands!");
1874 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1875 "Expected consecutive registers");
1876 static const unsigned FirstRegs[][5] = {
1877 /* DReg */ { AArch64::Q0,
1878 AArch64::D0, AArch64::D0_D1,
1879 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1880 /* QReg */ { AArch64::Q0,
1881 AArch64::Q0, AArch64::Q0_Q1,
1882 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1883 /* ZReg */ { AArch64::Z0,
1884 AArch64::Z0, AArch64::Z0_Z1,
1885 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1886 /* PReg */ { AArch64::P0,
1887 AArch64::P0, AArch64::P0_P1 }
1888 };
1889
1890 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1891 " NumRegs must be <= 4 for ZRegs");
1892
1893 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1894 " NumRegs must be <= 2 for PRegs");
1895
1896 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1897 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1898 FirstRegs[(unsigned)RegTy][0]));
1899 }
1900
1901 template <unsigned NumRegs>
1902 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1903 assert(N == 1 && "Invalid number of operands!");
1904 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1905
1906 switch (NumRegs) {
1907 case 2:
1908 if (getVectorListStart() < AArch64::Z16) {
1909 assert((getVectorListStart() < AArch64::Z8) &&
1910 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1912 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1913 } else {
1914 assert((getVectorListStart() < AArch64::Z24) &&
1915 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1917 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1918 }
1919 break;
1920 case 4:
1921 if (getVectorListStart() < AArch64::Z16) {
1922 assert((getVectorListStart() < AArch64::Z4) &&
1923 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1925 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1926 } else {
1927 assert((getVectorListStart() < AArch64::Z20) &&
1928 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1930 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1931 }
1932 break;
1933 default:
1934 llvm_unreachable("Unsupported number of registers for strided vec list");
1935 }
1936 }
1937
1938 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1939 assert(N == 1 && "Invalid number of operands!");
1940 unsigned RegMask = getMatrixTileListRegMask();
1941 assert(RegMask <= 0xFF && "Invalid mask!");
1942 Inst.addOperand(MCOperand::createImm(RegMask));
1943 }
1944
1945 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1946 assert(N == 1 && "Invalid number of operands!");
1947 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1948 }
1949
1950 template <unsigned ImmIs0, unsigned ImmIs1>
1951 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1952 assert(N == 1 && "Invalid number of operands!");
1953 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1954 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1955 }
1956
1957 void addImmOperands(MCInst &Inst, unsigned N) const {
1958 assert(N == 1 && "Invalid number of operands!");
1959 // If this is a pageoff symrefexpr with an addend, adjust the addend
1960 // to be only the page-offset portion. Otherwise, just add the expr
1961 // as-is.
1962 addExpr(Inst, getImm());
1963 }
1964
1965 template <int Shift>
1966 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1967 assert(N == 2 && "Invalid number of operands!");
1968 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1969 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1970 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1971 } else if (isShiftedImm()) {
1972 addExpr(Inst, getShiftedImmVal());
1973 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1974 } else {
1975 addExpr(Inst, getImm());
1977 }
1978 }
1979
1980 template <int Shift>
1981 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1982 assert(N == 2 && "Invalid number of operands!");
1983 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1984 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1985 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1986 } else
1987 llvm_unreachable("Not a shifted negative immediate");
1988 }
1989
1990 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1991 assert(N == 1 && "Invalid number of operands!");
1993 }
1994
1995 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1996 assert(N == 1 && "Invalid number of operands!");
1997 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1998 if (!MCE)
1999 addExpr(Inst, getImm());
2000 else
2001 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2002 }
2003
2004 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2005 addImmOperands(Inst, N);
2006 }
2007
2008 template<int Scale>
2009 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2010 assert(N == 1 && "Invalid number of operands!");
2011 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2012
2013 if (!MCE) {
2014 Inst.addOperand(MCOperand::createExpr(getImm()));
2015 return;
2016 }
2017 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2018 }
2019
2020 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2021 assert(N == 1 && "Invalid number of operands!");
2022 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2024 }
2025
2026 template <int Scale>
2027 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2028 assert(N == 1 && "Invalid number of operands!");
2029 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2030 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2031 }
2032
2033 template <int Scale>
2034 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2035 assert(N == 1 && "Invalid number of operands!");
2036 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2037 }
2038
2039 template <typename T>
2040 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2041 assert(N == 1 && "Invalid number of operands!");
2042 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2043 std::make_unsigned_t<T> Val = MCE->getValue();
2044 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2045 Inst.addOperand(MCOperand::createImm(encoding));
2046 }
2047
2048 template <typename T>
2049 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2050 assert(N == 1 && "Invalid number of operands!");
2051 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2052 std::make_unsigned_t<T> Val = ~MCE->getValue();
2053 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2054 Inst.addOperand(MCOperand::createImm(encoding));
2055 }
2056
2057 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2058 assert(N == 1 && "Invalid number of operands!");
2059 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2061 Inst.addOperand(MCOperand::createImm(encoding));
2062 }
2063
2064 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2065 // Branch operands don't encode the low bits, so shift them off
2066 // here. If it's a label, however, just put it on directly as there's
2067 // not enough information now to do anything.
2068 assert(N == 1 && "Invalid number of operands!");
2069 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2070 if (!MCE) {
2071 addExpr(Inst, getImm());
2072 return;
2073 }
2074 assert(MCE && "Invalid constant immediate operand!");
2075 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2076 }
2077
2078 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2079 // PC-relative operands don't encode the low bits, so shift them off
2080 // here. If it's a label, however, just put it on directly as there's
2081 // not enough information now to do anything.
2082 assert(N == 1 && "Invalid number of operands!");
2083 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2084 if (!MCE) {
2085 addExpr(Inst, getImm());
2086 return;
2087 }
2088 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2089 }
2090
2091 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2092 // Branch operands don't encode the low bits, so shift them off
2093 // here. If it's a label, however, just put it on directly as there's
2094 // not enough information now to do anything.
2095 assert(N == 1 && "Invalid number of operands!");
2096 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2097 if (!MCE) {
2098 addExpr(Inst, getImm());
2099 return;
2100 }
2101 assert(MCE && "Invalid constant immediate operand!");
2102 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2103 }
2104
2105 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2106 // Branch operands don't encode the low bits, so shift them off
2107 // here. If it's a label, however, just put it on directly as there's
2108 // not enough information now to do anything.
2109 assert(N == 1 && "Invalid number of operands!");
2110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2111 if (!MCE) {
2112 addExpr(Inst, getImm());
2113 return;
2114 }
2115 assert(MCE && "Invalid constant immediate operand!");
2116 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2117 }
2118
2119 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2120 // Branch operands don't encode the low bits, so shift them off
2121 // here. If it's a label, however, just put it on directly as there's
2122 // not enough information now to do anything.
2123 assert(N == 1 && "Invalid number of operands!");
2124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2125 if (!MCE) {
2126 addExpr(Inst, getImm());
2127 return;
2128 }
2129 assert(MCE && "Invalid constant immediate operand!");
2130 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2131 }
2132
2133 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2134 assert(N == 1 && "Invalid number of operands!");
2136 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2137 }
2138
2139 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(MCOperand::createImm(getBarrier()));
2142 }
2143
2144 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146 Inst.addOperand(MCOperand::createImm(getBarrier()));
2147 }
2148
2149 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2150 assert(N == 1 && "Invalid number of operands!");
2151
2152 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2153 }
2154
2155 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2156 assert(N == 1 && "Invalid number of operands!");
2157
2158 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2159 }
2160
2161 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2162 assert(N == 1 && "Invalid number of operands!");
2163
2164 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2165 }
2166
2167 void addSVCROperands(MCInst &Inst, unsigned N) const {
2168 assert(N == 1 && "Invalid number of operands!");
2169
2170 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2171 }
2172
2173 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2174 assert(N == 1 && "Invalid number of operands!");
2175
2176 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2177 }
2178
2179 void addSysCROperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(MCOperand::createImm(getSysCR()));
2182 }
2183
2184 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2187 }
2188
2189 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2192 }
2193
2194 void addPHintOperands(MCInst &Inst, unsigned N) const {
2195 assert(N == 1 && "Invalid number of operands!");
2196 Inst.addOperand(MCOperand::createImm(getPHint()));
2197 }
2198
2199 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2200 assert(N == 1 && "Invalid number of operands!");
2201 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2202 }
2203
2204 void addShifterOperands(MCInst &Inst, unsigned N) const {
2205 assert(N == 1 && "Invalid number of operands!");
2206 unsigned Imm =
2207 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2209 }
2210
2211 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2212 assert(N == 1 && "Invalid number of operands!");
2213 unsigned Imm = getShiftExtendAmount();
2215 }
2216
2217 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2218 assert(N == 1 && "Invalid number of operands!");
2219
2220 if (!isScalarReg())
2221 return;
2222
2223 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2224 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2226 if (Reg != AArch64::XZR)
2227 llvm_unreachable("wrong register");
2228
2229 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2230 }
2231
2232 void addExtendOperands(MCInst &Inst, unsigned N) const {
2233 assert(N == 1 && "Invalid number of operands!");
2234 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2235 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2236 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2238 }
2239
2240 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2241 assert(N == 1 && "Invalid number of operands!");
2242 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2243 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2244 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2246 }
2247
2248 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2249 assert(N == 2 && "Invalid number of operands!");
2250 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2251 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2252 Inst.addOperand(MCOperand::createImm(IsSigned));
2253 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2254 }
2255
2256 // For 8-bit load/store instructions with a register offset, both the
2257 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2258 // they're disambiguated by whether the shift was explicit or implicit rather
2259 // than its size.
2260 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2261 assert(N == 2 && "Invalid number of operands!");
2262 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2263 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2264 Inst.addOperand(MCOperand::createImm(IsSigned));
2265 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2266 }
2267
2268 template<int Shift>
2269 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2270 assert(N == 1 && "Invalid number of operands!");
2271
2272 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2273 if (CE) {
2274 uint64_t Value = CE->getValue();
2275 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2276 } else {
2277 addExpr(Inst, getImm());
2278 }
2279 }
2280
2281 template<int Shift>
2282 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2283 assert(N == 1 && "Invalid number of operands!");
2284
2285 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2286 uint64_t Value = CE->getValue();
2287 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2288 }
2289
2290 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2291 assert(N == 1 && "Invalid number of operands!");
2292 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2293 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2294 }
2295
2296 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2297 assert(N == 1 && "Invalid number of operands!");
2298 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2299 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2300 }
2301
2302 void print(raw_ostream &OS) const override;
2303
2304 static std::unique_ptr<AArch64Operand>
2305 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2306 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2307 Op->Tok.Data = Str.data();
2308 Op->Tok.Length = Str.size();
2309 Op->Tok.IsSuffix = IsSuffix;
2310 Op->StartLoc = S;
2311 Op->EndLoc = S;
2312 return Op;
2313 }
2314
2315 static std::unique_ptr<AArch64Operand>
2316 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2317 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2319 unsigned ShiftAmount = 0,
2320 unsigned HasExplicitAmount = false) {
2321 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2322 Op->Reg.RegNum = RegNum;
2323 Op->Reg.Kind = Kind;
2324 Op->Reg.ElementWidth = 0;
2325 Op->Reg.EqualityTy = EqTy;
2326 Op->Reg.ShiftExtend.Type = ExtTy;
2327 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2328 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2329 Op->StartLoc = S;
2330 Op->EndLoc = E;
2331 return Op;
2332 }
2333
2334 static std::unique_ptr<AArch64Operand>
2335 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2336 SMLoc S, SMLoc E, MCContext &Ctx,
2338 unsigned ShiftAmount = 0,
2339 unsigned HasExplicitAmount = false) {
2340 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2341 Kind == RegKind::SVEPredicateVector ||
2342 Kind == RegKind::SVEPredicateAsCounter) &&
2343 "Invalid vector kind");
2344 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2345 HasExplicitAmount);
2346 Op->Reg.ElementWidth = ElementWidth;
2347 return Op;
2348 }
2349
2350 static std::unique_ptr<AArch64Operand>
2351 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2352 unsigned NumElements, unsigned ElementWidth,
2353 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2354 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2355 Op->VectorList.RegNum = RegNum;
2356 Op->VectorList.Count = Count;
2357 Op->VectorList.Stride = Stride;
2358 Op->VectorList.NumElements = NumElements;
2359 Op->VectorList.ElementWidth = ElementWidth;
2360 Op->VectorList.RegisterKind = RegisterKind;
2361 Op->StartLoc = S;
2362 Op->EndLoc = E;
2363 return Op;
2364 }
2365
2366 static std::unique_ptr<AArch64Operand>
2367 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2368 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2369 Op->VectorIndex.Val = Idx;
2370 Op->StartLoc = S;
2371 Op->EndLoc = E;
2372 return Op;
2373 }
2374
2375 static std::unique_ptr<AArch64Operand>
2376 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2377 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2378 Op->MatrixTileList.RegMask = RegMask;
2379 Op->StartLoc = S;
2380 Op->EndLoc = E;
2381 return Op;
2382 }
2383
2384 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2385 const unsigned ElementWidth) {
2386 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2387 RegMap = {
2388 {{0, AArch64::ZAB0},
2389 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2390 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2391 {{8, AArch64::ZAB0},
2392 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2393 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2394 {{16, AArch64::ZAH0},
2395 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2396 {{16, AArch64::ZAH1},
2397 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2398 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2399 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2400 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2401 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2402 };
2403
2404 if (ElementWidth == 64)
2405 OutRegs.insert(Reg);
2406 else {
2407 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2408 assert(!Regs.empty() && "Invalid tile or element width!");
2409 for (auto OutReg : Regs)
2410 OutRegs.insert(OutReg);
2411 }
2412 }
2413
2414 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2415 SMLoc E, MCContext &Ctx) {
2416 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2417 Op->Imm.Val = Val;
2418 Op->StartLoc = S;
2419 Op->EndLoc = E;
2420 return Op;
2421 }
2422
2423 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2424 unsigned ShiftAmount,
2425 SMLoc S, SMLoc E,
2426 MCContext &Ctx) {
2427 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2428 Op->ShiftedImm .Val = Val;
2429 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2430 Op->StartLoc = S;
2431 Op->EndLoc = E;
2432 return Op;
2433 }
2434
2435 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2436 unsigned Last, SMLoc S,
2437 SMLoc E,
2438 MCContext &Ctx) {
2439 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2440 Op->ImmRange.First = First;
2441 Op->ImmRange.Last = Last;
2442 Op->EndLoc = E;
2443 return Op;
2444 }
2445
2446 static std::unique_ptr<AArch64Operand>
2447 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2448 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2449 Op->CondCode.Code = Code;
2450 Op->StartLoc = S;
2451 Op->EndLoc = E;
2452 return Op;
2453 }
2454
2455 static std::unique_ptr<AArch64Operand>
2456 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2457 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2458 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2459 Op->FPImm.IsExact = IsExact;
2460 Op->StartLoc = S;
2461 Op->EndLoc = S;
2462 return Op;
2463 }
2464
2465 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2466 StringRef Str,
2467 SMLoc S,
2468 MCContext &Ctx,
2469 bool HasnXSModifier) {
2470 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2471 Op->Barrier.Val = Val;
2472 Op->Barrier.Data = Str.data();
2473 Op->Barrier.Length = Str.size();
2474 Op->Barrier.HasnXSModifier = HasnXSModifier;
2475 Op->StartLoc = S;
2476 Op->EndLoc = S;
2477 return Op;
2478 }
2479
2480 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2481 uint32_t MRSReg,
2482 uint32_t MSRReg,
2483 uint32_t PStateField,
2484 MCContext &Ctx) {
2485 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2486 Op->SysReg.Data = Str.data();
2487 Op->SysReg.Length = Str.size();
2488 Op->SysReg.MRSReg = MRSReg;
2489 Op->SysReg.MSRReg = MSRReg;
2490 Op->SysReg.PStateField = PStateField;
2491 Op->StartLoc = S;
2492 Op->EndLoc = S;
2493 return Op;
2494 }
2495
2496 static std::unique_ptr<AArch64Operand>
2497 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2498 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2499 Op->PHint.Val = Val;
2500 Op->PHint.Data = Str.data();
2501 Op->PHint.Length = Str.size();
2502 Op->StartLoc = S;
2503 Op->EndLoc = S;
2504 return Op;
2505 }
2506
2507 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2508 SMLoc E, MCContext &Ctx) {
2509 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2510 Op->SysCRImm.Val = Val;
2511 Op->StartLoc = S;
2512 Op->EndLoc = E;
2513 return Op;
2514 }
2515
2516 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2517 StringRef Str,
2518 SMLoc S,
2519 MCContext &Ctx) {
2520 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2521 Op->Prefetch.Val = Val;
2522 Op->Barrier.Data = Str.data();
2523 Op->Barrier.Length = Str.size();
2524 Op->StartLoc = S;
2525 Op->EndLoc = S;
2526 return Op;
2527 }
2528
2529 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2530 StringRef Str,
2531 SMLoc S,
2532 MCContext &Ctx) {
2533 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2534 Op->PSBHint.Val = Val;
2535 Op->PSBHint.Data = Str.data();
2536 Op->PSBHint.Length = Str.size();
2537 Op->StartLoc = S;
2538 Op->EndLoc = S;
2539 return Op;
2540 }
2541
2542 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2543 StringRef Str,
2544 SMLoc S,
2545 MCContext &Ctx) {
2546 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2547 Op->BTIHint.Val = Val | 32;
2548 Op->BTIHint.Data = Str.data();
2549 Op->BTIHint.Length = Str.size();
2550 Op->StartLoc = S;
2551 Op->EndLoc = S;
2552 return Op;
2553 }
2554
2555 static std::unique_ptr<AArch64Operand>
2556 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2557 SMLoc S, SMLoc E, MCContext &Ctx) {
2558 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2559 Op->MatrixReg.RegNum = RegNum;
2560 Op->MatrixReg.ElementWidth = ElementWidth;
2561 Op->MatrixReg.Kind = Kind;
2562 Op->StartLoc = S;
2563 Op->EndLoc = E;
2564 return Op;
2565 }
2566
2567 static std::unique_ptr<AArch64Operand>
2568 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2569 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2570 Op->SVCR.PStateField = PStateField;
2571 Op->SVCR.Data = Str.data();
2572 Op->SVCR.Length = Str.size();
2573 Op->StartLoc = S;
2574 Op->EndLoc = S;
2575 return Op;
2576 }
2577
2578 static std::unique_ptr<AArch64Operand>
2579 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2580 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2581 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2582 Op->ShiftExtend.Type = ShOp;
2583 Op->ShiftExtend.Amount = Val;
2584 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2585 Op->StartLoc = S;
2586 Op->EndLoc = E;
2587 return Op;
2588 }
2589};
2590
2591} // end anonymous namespace.
2592
2593void AArch64Operand::print(raw_ostream &OS) const {
2594 switch (Kind) {
2595 case k_FPImm:
2596 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2597 if (!getFPImmIsExact())
2598 OS << " (inexact)";
2599 OS << ">";
2600 break;
2601 case k_Barrier: {
2602 StringRef Name = getBarrierName();
2603 if (!Name.empty())
2604 OS << "<barrier " << Name << ">";
2605 else
2606 OS << "<barrier invalid #" << getBarrier() << ">";
2607 break;
2608 }
2609 case k_Immediate:
2610 OS << *getImm();
2611 break;
2612 case k_ShiftedImm: {
2613 unsigned Shift = getShiftedImmShift();
2614 OS << "<shiftedimm ";
2615 OS << *getShiftedImmVal();
2616 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2617 break;
2618 }
2619 case k_ImmRange: {
2620 OS << "<immrange ";
2621 OS << getFirstImmVal();
2622 OS << ":" << getLastImmVal() << ">";
2623 break;
2624 }
2625 case k_CondCode:
2626 OS << "<condcode " << getCondCode() << ">";
2627 break;
2628 case k_VectorList: {
2629 OS << "<vectorlist ";
2630 unsigned Reg = getVectorListStart();
2631 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2632 OS << Reg + i * getVectorListStride() << " ";
2633 OS << ">";
2634 break;
2635 }
2636 case k_VectorIndex:
2637 OS << "<vectorindex " << getVectorIndex() << ">";
2638 break;
2639 case k_SysReg:
2640 OS << "<sysreg: " << getSysReg() << '>';
2641 break;
2642 case k_Token:
2643 OS << "'" << getToken() << "'";
2644 break;
2645 case k_SysCR:
2646 OS << "c" << getSysCR();
2647 break;
2648 case k_Prefetch: {
2649 StringRef Name = getPrefetchName();
2650 if (!Name.empty())
2651 OS << "<prfop " << Name << ">";
2652 else
2653 OS << "<prfop invalid #" << getPrefetch() << ">";
2654 break;
2655 }
2656 case k_PSBHint:
2657 OS << getPSBHintName();
2658 break;
2659 case k_PHint:
2660 OS << getPHintName();
2661 break;
2662 case k_BTIHint:
2663 OS << getBTIHintName();
2664 break;
2665 case k_MatrixRegister:
2666 OS << "<matrix " << getMatrixReg() << ">";
2667 break;
2668 case k_MatrixTileList: {
2669 OS << "<matrixlist ";
2670 unsigned RegMask = getMatrixTileListRegMask();
2671 unsigned MaxBits = 8;
2672 for (unsigned I = MaxBits; I > 0; --I)
2673 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2674 OS << '>';
2675 break;
2676 }
2677 case k_SVCR: {
2678 OS << getSVCR();
2679 break;
2680 }
2681 case k_Register:
2682 OS << "<register " << getReg() << ">";
2683 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2684 break;
2685 [[fallthrough]];
2686 case k_ShiftExtend:
2687 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2688 << getShiftExtendAmount();
2689 if (!hasShiftExtendAmount())
2690 OS << "<imp>";
2691 OS << '>';
2692 break;
2693 }
2694}
2695
2696/// @name Auto-generated Match Functions
2697/// {
2698
2700
2701/// }
2702
2704 return StringSwitch<unsigned>(Name.lower())
2705 .Case("v0", AArch64::Q0)
2706 .Case("v1", AArch64::Q1)
2707 .Case("v2", AArch64::Q2)
2708 .Case("v3", AArch64::Q3)
2709 .Case("v4", AArch64::Q4)
2710 .Case("v5", AArch64::Q5)
2711 .Case("v6", AArch64::Q6)
2712 .Case("v7", AArch64::Q7)
2713 .Case("v8", AArch64::Q8)
2714 .Case("v9", AArch64::Q9)
2715 .Case("v10", AArch64::Q10)
2716 .Case("v11", AArch64::Q11)
2717 .Case("v12", AArch64::Q12)
2718 .Case("v13", AArch64::Q13)
2719 .Case("v14", AArch64::Q14)
2720 .Case("v15", AArch64::Q15)
2721 .Case("v16", AArch64::Q16)
2722 .Case("v17", AArch64::Q17)
2723 .Case("v18", AArch64::Q18)
2724 .Case("v19", AArch64::Q19)
2725 .Case("v20", AArch64::Q20)
2726 .Case("v21", AArch64::Q21)
2727 .Case("v22", AArch64::Q22)
2728 .Case("v23", AArch64::Q23)
2729 .Case("v24", AArch64::Q24)
2730 .Case("v25", AArch64::Q25)
2731 .Case("v26", AArch64::Q26)
2732 .Case("v27", AArch64::Q27)
2733 .Case("v28", AArch64::Q28)
2734 .Case("v29", AArch64::Q29)
2735 .Case("v30", AArch64::Q30)
2736 .Case("v31", AArch64::Q31)
2737 .Default(0);
2738}
2739
2740/// Returns an optional pair of (#elements, element-width) if Suffix
2741/// is a valid vector kind. Where the number of elements in a vector
2742/// or the vector width is implicit or explicitly unknown (but still a
2743/// valid suffix kind), 0 is used.
2744static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2745 RegKind VectorKind) {
2746 std::pair<int, int> Res = {-1, -1};
2747
2748 switch (VectorKind) {
2749 case RegKind::NeonVector:
2751 .Case("", {0, 0})
2752 .Case(".1d", {1, 64})
2753 .Case(".1q", {1, 128})
2754 // '.2h' needed for fp16 scalar pairwise reductions
2755 .Case(".2h", {2, 16})
2756 .Case(".2b", {2, 8})
2757 .Case(".2s", {2, 32})
2758 .Case(".2d", {2, 64})
2759 // '.4b' is another special case for the ARMv8.2a dot product
2760 // operand
2761 .Case(".4b", {4, 8})
2762 .Case(".4h", {4, 16})
2763 .Case(".4s", {4, 32})
2764 .Case(".8b", {8, 8})
2765 .Case(".8h", {8, 16})
2766 .Case(".16b", {16, 8})
2767 // Accept the width neutral ones, too, for verbose syntax. If
2768 // those aren't used in the right places, the token operand won't
2769 // match so all will work out.
2770 .Case(".b", {0, 8})
2771 .Case(".h", {0, 16})
2772 .Case(".s", {0, 32})
2773 .Case(".d", {0, 64})
2774 .Default({-1, -1});
2775 break;
2776 case RegKind::SVEPredicateAsCounter:
2777 case RegKind::SVEPredicateVector:
2778 case RegKind::SVEDataVector:
2779 case RegKind::Matrix:
2781 .Case("", {0, 0})
2782 .Case(".b", {0, 8})
2783 .Case(".h", {0, 16})
2784 .Case(".s", {0, 32})
2785 .Case(".d", {0, 64})
2786 .Case(".q", {0, 128})
2787 .Default({-1, -1});
2788 break;
2789 default:
2790 llvm_unreachable("Unsupported RegKind");
2791 }
2792
2793 if (Res == std::make_pair(-1, -1))
2794 return std::nullopt;
2795
2796 return std::optional<std::pair<int, int>>(Res);
2797}
2798
2799static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2800 return parseVectorKind(Suffix, VectorKind).has_value();
2801}
2802
2804 return StringSwitch<unsigned>(Name.lower())
2805 .Case("z0", AArch64::Z0)
2806 .Case("z1", AArch64::Z1)
2807 .Case("z2", AArch64::Z2)
2808 .Case("z3", AArch64::Z3)
2809 .Case("z4", AArch64::Z4)
2810 .Case("z5", AArch64::Z5)
2811 .Case("z6", AArch64::Z6)
2812 .Case("z7", AArch64::Z7)
2813 .Case("z8", AArch64::Z8)
2814 .Case("z9", AArch64::Z9)
2815 .Case("z10", AArch64::Z10)
2816 .Case("z11", AArch64::Z11)
2817 .Case("z12", AArch64::Z12)
2818 .Case("z13", AArch64::Z13)
2819 .Case("z14", AArch64::Z14)
2820 .Case("z15", AArch64::Z15)
2821 .Case("z16", AArch64::Z16)
2822 .Case("z17", AArch64::Z17)
2823 .Case("z18", AArch64::Z18)
2824 .Case("z19", AArch64::Z19)
2825 .Case("z20", AArch64::Z20)
2826 .Case("z21", AArch64::Z21)
2827 .Case("z22", AArch64::Z22)
2828 .Case("z23", AArch64::Z23)
2829 .Case("z24", AArch64::Z24)
2830 .Case("z25", AArch64::Z25)
2831 .Case("z26", AArch64::Z26)
2832 .Case("z27", AArch64::Z27)
2833 .Case("z28", AArch64::Z28)
2834 .Case("z29", AArch64::Z29)
2835 .Case("z30", AArch64::Z30)
2836 .Case("z31", AArch64::Z31)
2837 .Default(0);
2838}
2839
2841 return StringSwitch<unsigned>(Name.lower())
2842 .Case("p0", AArch64::P0)
2843 .Case("p1", AArch64::P1)
2844 .Case("p2", AArch64::P2)
2845 .Case("p3", AArch64::P3)
2846 .Case("p4", AArch64::P4)
2847 .Case("p5", AArch64::P5)
2848 .Case("p6", AArch64::P6)
2849 .Case("p7", AArch64::P7)
2850 .Case("p8", AArch64::P8)
2851 .Case("p9", AArch64::P9)
2852 .Case("p10", AArch64::P10)
2853 .Case("p11", AArch64::P11)
2854 .Case("p12", AArch64::P12)
2855 .Case("p13", AArch64::P13)
2856 .Case("p14", AArch64::P14)
2857 .Case("p15", AArch64::P15)
2858 .Default(0);
2859}
2860
2862 return StringSwitch<unsigned>(Name.lower())
2863 .Case("pn0", AArch64::PN0)
2864 .Case("pn1", AArch64::PN1)
2865 .Case("pn2", AArch64::PN2)
2866 .Case("pn3", AArch64::PN3)
2867 .Case("pn4", AArch64::PN4)
2868 .Case("pn5", AArch64::PN5)
2869 .Case("pn6", AArch64::PN6)
2870 .Case("pn7", AArch64::PN7)
2871 .Case("pn8", AArch64::PN8)
2872 .Case("pn9", AArch64::PN9)
2873 .Case("pn10", AArch64::PN10)
2874 .Case("pn11", AArch64::PN11)
2875 .Case("pn12", AArch64::PN12)
2876 .Case("pn13", AArch64::PN13)
2877 .Case("pn14", AArch64::PN14)
2878 .Case("pn15", AArch64::PN15)
2879 .Default(0);
2880}
2881
2883 return StringSwitch<unsigned>(Name.lower())
2884 .Case("za0.d", AArch64::ZAD0)
2885 .Case("za1.d", AArch64::ZAD1)
2886 .Case("za2.d", AArch64::ZAD2)
2887 .Case("za3.d", AArch64::ZAD3)
2888 .Case("za4.d", AArch64::ZAD4)
2889 .Case("za5.d", AArch64::ZAD5)
2890 .Case("za6.d", AArch64::ZAD6)
2891 .Case("za7.d", AArch64::ZAD7)
2892 .Case("za0.s", AArch64::ZAS0)
2893 .Case("za1.s", AArch64::ZAS1)
2894 .Case("za2.s", AArch64::ZAS2)
2895 .Case("za3.s", AArch64::ZAS3)
2896 .Case("za0.h", AArch64::ZAH0)
2897 .Case("za1.h", AArch64::ZAH1)
2898 .Case("za0.b", AArch64::ZAB0)
2899 .Default(0);
2900}
2901
2903 return StringSwitch<unsigned>(Name.lower())
2904 .Case("za", AArch64::ZA)
2905 .Case("za0.q", AArch64::ZAQ0)
2906 .Case("za1.q", AArch64::ZAQ1)
2907 .Case("za2.q", AArch64::ZAQ2)
2908 .Case("za3.q", AArch64::ZAQ3)
2909 .Case("za4.q", AArch64::ZAQ4)
2910 .Case("za5.q", AArch64::ZAQ5)
2911 .Case("za6.q", AArch64::ZAQ6)
2912 .Case("za7.q", AArch64::ZAQ7)
2913 .Case("za8.q", AArch64::ZAQ8)
2914 .Case("za9.q", AArch64::ZAQ9)
2915 .Case("za10.q", AArch64::ZAQ10)
2916 .Case("za11.q", AArch64::ZAQ11)
2917 .Case("za12.q", AArch64::ZAQ12)
2918 .Case("za13.q", AArch64::ZAQ13)
2919 .Case("za14.q", AArch64::ZAQ14)
2920 .Case("za15.q", AArch64::ZAQ15)
2921 .Case("za0.d", AArch64::ZAD0)
2922 .Case("za1.d", AArch64::ZAD1)
2923 .Case("za2.d", AArch64::ZAD2)
2924 .Case("za3.d", AArch64::ZAD3)
2925 .Case("za4.d", AArch64::ZAD4)
2926 .Case("za5.d", AArch64::ZAD5)
2927 .Case("za6.d", AArch64::ZAD6)
2928 .Case("za7.d", AArch64::ZAD7)
2929 .Case("za0.s", AArch64::ZAS0)
2930 .Case("za1.s", AArch64::ZAS1)
2931 .Case("za2.s", AArch64::ZAS2)
2932 .Case("za3.s", AArch64::ZAS3)
2933 .Case("za0.h", AArch64::ZAH0)
2934 .Case("za1.h", AArch64::ZAH1)
2935 .Case("za0.b", AArch64::ZAB0)
2936 .Case("za0h.q", AArch64::ZAQ0)
2937 .Case("za1h.q", AArch64::ZAQ1)
2938 .Case("za2h.q", AArch64::ZAQ2)
2939 .Case("za3h.q", AArch64::ZAQ3)
2940 .Case("za4h.q", AArch64::ZAQ4)
2941 .Case("za5h.q", AArch64::ZAQ5)
2942 .Case("za6h.q", AArch64::ZAQ6)
2943 .Case("za7h.q", AArch64::ZAQ7)
2944 .Case("za8h.q", AArch64::ZAQ8)
2945 .Case("za9h.q", AArch64::ZAQ9)
2946 .Case("za10h.q", AArch64::ZAQ10)
2947 .Case("za11h.q", AArch64::ZAQ11)
2948 .Case("za12h.q", AArch64::ZAQ12)
2949 .Case("za13h.q", AArch64::ZAQ13)
2950 .Case("za14h.q", AArch64::ZAQ14)
2951 .Case("za15h.q", AArch64::ZAQ15)
2952 .Case("za0h.d", AArch64::ZAD0)
2953 .Case("za1h.d", AArch64::ZAD1)
2954 .Case("za2h.d", AArch64::ZAD2)
2955 .Case("za3h.d", AArch64::ZAD3)
2956 .Case("za4h.d", AArch64::ZAD4)
2957 .Case("za5h.d", AArch64::ZAD5)
2958 .Case("za6h.d", AArch64::ZAD6)
2959 .Case("za7h.d", AArch64::ZAD7)
2960 .Case("za0h.s", AArch64::ZAS0)
2961 .Case("za1h.s", AArch64::ZAS1)
2962 .Case("za2h.s", AArch64::ZAS2)
2963 .Case("za3h.s", AArch64::ZAS3)
2964 .Case("za0h.h", AArch64::ZAH0)
2965 .Case("za1h.h", AArch64::ZAH1)
2966 .Case("za0h.b", AArch64::ZAB0)
2967 .Case("za0v.q", AArch64::ZAQ0)
2968 .Case("za1v.q", AArch64::ZAQ1)
2969 .Case("za2v.q", AArch64::ZAQ2)
2970 .Case("za3v.q", AArch64::ZAQ3)
2971 .Case("za4v.q", AArch64::ZAQ4)
2972 .Case("za5v.q", AArch64::ZAQ5)
2973 .Case("za6v.q", AArch64::ZAQ6)
2974 .Case("za7v.q", AArch64::ZAQ7)
2975 .Case("za8v.q", AArch64::ZAQ8)
2976 .Case("za9v.q", AArch64::ZAQ9)
2977 .Case("za10v.q", AArch64::ZAQ10)
2978 .Case("za11v.q", AArch64::ZAQ11)
2979 .Case("za12v.q", AArch64::ZAQ12)
2980 .Case("za13v.q", AArch64::ZAQ13)
2981 .Case("za14v.q", AArch64::ZAQ14)
2982 .Case("za15v.q", AArch64::ZAQ15)
2983 .Case("za0v.d", AArch64::ZAD0)
2984 .Case("za1v.d", AArch64::ZAD1)
2985 .Case("za2v.d", AArch64::ZAD2)
2986 .Case("za3v.d", AArch64::ZAD3)
2987 .Case("za4v.d", AArch64::ZAD4)
2988 .Case("za5v.d", AArch64::ZAD5)
2989 .Case("za6v.d", AArch64::ZAD6)
2990 .Case("za7v.d", AArch64::ZAD7)
2991 .Case("za0v.s", AArch64::ZAS0)
2992 .Case("za1v.s", AArch64::ZAS1)
2993 .Case("za2v.s", AArch64::ZAS2)
2994 .Case("za3v.s", AArch64::ZAS3)
2995 .Case("za0v.h", AArch64::ZAH0)
2996 .Case("za1v.h", AArch64::ZAH1)
2997 .Case("za0v.b", AArch64::ZAB0)
2998 .Default(0);
2999}
3000
3001bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3002 SMLoc &EndLoc) {
3003 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3004}
3005
3006ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3007 SMLoc &EndLoc) {
3008 StartLoc = getLoc();
3009 ParseStatus Res = tryParseScalarRegister(Reg);
3010 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 return Res;
3012}
3013
3014// Matches a register name or register alias previously defined by '.req'
3015unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3016 RegKind Kind) {
3017 unsigned RegNum = 0;
3018 if ((RegNum = matchSVEDataVectorRegName(Name)))
3019 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3020
3021 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3022 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3023
3025 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3026
3027 if ((RegNum = MatchNeonVectorRegName(Name)))
3028 return Kind == RegKind::NeonVector ? RegNum : 0;
3029
3030 if ((RegNum = matchMatrixRegName(Name)))
3031 return Kind == RegKind::Matrix ? RegNum : 0;
3032
3033 if (Name.equals_insensitive("zt0"))
3034 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3035
3036 // The parsed register must be of RegKind Scalar
3037 if ((RegNum = MatchRegisterName(Name)))
3038 return (Kind == RegKind::Scalar) ? RegNum : 0;
3039
3040 if (!RegNum) {
3041 // Handle a few common aliases of registers.
3042 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3043 .Case("fp", AArch64::FP)
3044 .Case("lr", AArch64::LR)
3045 .Case("x31", AArch64::XZR)
3046 .Case("w31", AArch64::WZR)
3047 .Default(0))
3048 return Kind == RegKind::Scalar ? RegNum : 0;
3049
3050 // Check for aliases registered via .req. Canonicalize to lower case.
3051 // That's more consistent since register names are case insensitive, and
3052 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3053 auto Entry = RegisterReqs.find(Name.lower());
3054 if (Entry == RegisterReqs.end())
3055 return 0;
3056
3057 // set RegNum if the match is the right kind of register
3058 if (Kind == Entry->getValue().first)
3059 RegNum = Entry->getValue().second;
3060 }
3061 return RegNum;
3062}
3063
3064unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3065 switch (K) {
3066 case RegKind::Scalar:
3067 case RegKind::NeonVector:
3068 case RegKind::SVEDataVector:
3069 return 32;
3070 case RegKind::Matrix:
3071 case RegKind::SVEPredicateVector:
3072 case RegKind::SVEPredicateAsCounter:
3073 return 16;
3074 case RegKind::LookupTable:
3075 return 1;
3076 }
3077 llvm_unreachable("Unsupported RegKind");
3078}
3079
3080/// tryParseScalarRegister - Try to parse a register name. The token must be an
3081/// Identifier when called, and if it is a register name the token is eaten and
3082/// the register is added to the operand list.
3083ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3084 const AsmToken &Tok = getTok();
3085 if (Tok.isNot(AsmToken::Identifier))
3086 return ParseStatus::NoMatch;
3087
3088 std::string lowerCase = Tok.getString().lower();
3089 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3090 if (Reg == 0)
3091 return ParseStatus::NoMatch;
3092
3093 RegNum = Reg;
3094 Lex(); // Eat identifier token.
3095 return ParseStatus::Success;
3096}
3097
3098/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3099ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3100 SMLoc S = getLoc();
3101
3102 if (getTok().isNot(AsmToken::Identifier))
3103 return Error(S, "Expected cN operand where 0 <= N <= 15");
3104
3105 StringRef Tok = getTok().getIdentifier();
3106 if (Tok[0] != 'c' && Tok[0] != 'C')
3107 return Error(S, "Expected cN operand where 0 <= N <= 15");
3108
3109 uint32_t CRNum;
3110 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3111 if (BadNum || CRNum > 15)
3112 return Error(S, "Expected cN operand where 0 <= N <= 15");
3113
3114 Lex(); // Eat identifier token.
3115 Operands.push_back(
3116 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3117 return ParseStatus::Success;
3118}
3119
3120// Either an identifier for named values or a 6-bit immediate.
3121ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3122 SMLoc S = getLoc();
3123 const AsmToken &Tok = getTok();
3124
3125 unsigned MaxVal = 63;
3126
3127 // Immediate case, with optional leading hash:
3128 if (parseOptionalToken(AsmToken::Hash) ||
3129 Tok.is(AsmToken::Integer)) {
3130 const MCExpr *ImmVal;
3131 if (getParser().parseExpression(ImmVal))
3132 return ParseStatus::Failure;
3133
3134 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3135 if (!MCE)
3136 return TokError("immediate value expected for prefetch operand");
3137 unsigned prfop = MCE->getValue();
3138 if (prfop > MaxVal)
3139 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3140 "] expected");
3141
3142 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3143 Operands.push_back(AArch64Operand::CreatePrefetch(
3144 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3145 return ParseStatus::Success;
3146 }
3147
3148 if (Tok.isNot(AsmToken::Identifier))
3149 return TokError("prefetch hint expected");
3150
3151 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3152 if (!RPRFM)
3153 return TokError("prefetch hint expected");
3154
3155 Operands.push_back(AArch64Operand::CreatePrefetch(
3156 RPRFM->Encoding, Tok.getString(), S, getContext()));
3157 Lex(); // Eat identifier token.
3158 return ParseStatus::Success;
3159}
3160
3161/// tryParsePrefetch - Try to parse a prefetch operand.
3162template <bool IsSVEPrefetch>
3163ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3164 SMLoc S = getLoc();
3165 const AsmToken &Tok = getTok();
3166
3167 auto LookupByName = [](StringRef N) {
3168 if (IsSVEPrefetch) {
3169 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3170 return std::optional<unsigned>(Res->Encoding);
3171 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3172 return std::optional<unsigned>(Res->Encoding);
3173 return std::optional<unsigned>();
3174 };
3175
3176 auto LookupByEncoding = [](unsigned E) {
3177 if (IsSVEPrefetch) {
3178 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3179 return std::optional<StringRef>(Res->Name);
3180 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3181 return std::optional<StringRef>(Res->Name);
3182 return std::optional<StringRef>();
3183 };
3184 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3185
3186 // Either an identifier for named values or a 5-bit immediate.
3187 // Eat optional hash.
3188 if (parseOptionalToken(AsmToken::Hash) ||
3189 Tok.is(AsmToken::Integer)) {
3190 const MCExpr *ImmVal;
3191 if (getParser().parseExpression(ImmVal))
3192 return ParseStatus::Failure;
3193
3194 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3195 if (!MCE)
3196 return TokError("immediate value expected for prefetch operand");
3197 unsigned prfop = MCE->getValue();
3198 if (prfop > MaxVal)
3199 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3200 "] expected");
3201
3202 auto PRFM = LookupByEncoding(MCE->getValue());
3203 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3204 S, getContext()));
3205 return ParseStatus::Success;
3206 }
3207
3208 if (Tok.isNot(AsmToken::Identifier))
3209 return TokError("prefetch hint expected");
3210
3211 auto PRFM = LookupByName(Tok.getString());
3212 if (!PRFM)
3213 return TokError("prefetch hint expected");
3214
3215 Operands.push_back(AArch64Operand::CreatePrefetch(
3216 *PRFM, Tok.getString(), S, getContext()));
3217 Lex(); // Eat identifier token.
3218 return ParseStatus::Success;
3219}
3220
3221/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3222ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3223 SMLoc S = getLoc();
3224 const AsmToken &Tok = getTok();
3225 if (Tok.isNot(AsmToken::Identifier))
3226 return TokError("invalid operand for instruction");
3227
3228 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3229 if (!PSB)
3230 return TokError("invalid operand for instruction");
3231
3232 Operands.push_back(AArch64Operand::CreatePSBHint(
3233 PSB->Encoding, Tok.getString(), S, getContext()));
3234 Lex(); // Eat identifier token.
3235 return ParseStatus::Success;
3236}
3237
3238ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3239 SMLoc StartLoc = getLoc();
3240
3241 MCRegister RegNum;
3242
3243 // The case where xzr, xzr is not present is handled by an InstAlias.
3244
3245 auto RegTok = getTok(); // in case we need to backtrack
3246 if (!tryParseScalarRegister(RegNum).isSuccess())
3247 return ParseStatus::NoMatch;
3248
3249 if (RegNum != AArch64::XZR) {
3250 getLexer().UnLex(RegTok);
3251 return ParseStatus::NoMatch;
3252 }
3253
3254 if (parseComma())
3255 return ParseStatus::Failure;
3256
3257 if (!tryParseScalarRegister(RegNum).isSuccess())
3258 return TokError("expected register operand");
3259
3260 if (RegNum != AArch64::XZR)
3261 return TokError("xzr must be followed by xzr");
3262
3263 // We need to push something, since we claim this is an operand in .td.
3264 // See also AArch64AsmParser::parseKeywordOperand.
3265 Operands.push_back(AArch64Operand::CreateReg(
3266 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3267
3268 return ParseStatus::Success;
3269}
3270
3271/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3272ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3273 SMLoc S = getLoc();
3274 const AsmToken &Tok = getTok();
3275 if (Tok.isNot(AsmToken::Identifier))
3276 return TokError("invalid operand for instruction");
3277
3278 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3279 if (!BTI)
3280 return TokError("invalid operand for instruction");
3281
3282 Operands.push_back(AArch64Operand::CreateBTIHint(
3283 BTI->Encoding, Tok.getString(), S, getContext()));
3284 Lex(); // Eat identifier token.
3285 return ParseStatus::Success;
3286}
3287
3288/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3289/// instruction.
3290ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3291 SMLoc S = getLoc();
3292 const MCExpr *Expr = nullptr;
3293
3294 if (getTok().is(AsmToken::Hash)) {
3295 Lex(); // Eat hash token.
3296 }
3297
3298 if (parseSymbolicImmVal(Expr))
3299 return ParseStatus::Failure;
3300
3301 AArch64MCExpr::VariantKind ELFRefKind;
3302 MCSymbolRefExpr::VariantKind DarwinRefKind;
3303 int64_t Addend;
3304 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3305 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3306 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3307 // No modifier was specified at all; this is the syntax for an ELF basic
3308 // ADRP relocation (unfortunately).
3309 Expr =
3311 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3312 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3313 Addend != 0) {
3314 return Error(S, "gotpage label reference not allowed an addend");
3315 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3316 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3317 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3318 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3319 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3320 ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
3321 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3322 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3323 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE &&
3325 // The operand must be an @page or @gotpage qualified symbolref.
3326 return Error(S, "page or gotpage label reference expected");
3327 }
3328 }
3329
3330 // We have either a label reference possibly with addend or an immediate. The
3331 // addend is a raw value here. The linker will adjust it to only reference the
3332 // page.
3333 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3334 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3335
3336 return ParseStatus::Success;
3337}
3338
3339/// tryParseAdrLabel - Parse and validate a source label for the ADR
3340/// instruction.
3341ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3342 SMLoc S = getLoc();
3343 const MCExpr *Expr = nullptr;
3344
3345 // Leave anything with a bracket to the default for SVE
3346 if (getTok().is(AsmToken::LBrac))
3347 return ParseStatus::NoMatch;
3348
3349 if (getTok().is(AsmToken::Hash))
3350 Lex(); // Eat hash token.
3351
3352 if (parseSymbolicImmVal(Expr))
3353 return ParseStatus::Failure;
3354
3355 AArch64MCExpr::VariantKind ELFRefKind;
3356 MCSymbolRefExpr::VariantKind DarwinRefKind;
3357 int64_t Addend;
3358 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3359 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3360 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3361 // No modifier was specified at all; this is the syntax for an ELF basic
3362 // ADR relocation (unfortunately).
3363 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3364 } else if (ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE) {
3365 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3366 // adr. It's not actually GOT entry page address but the GOT address
3367 // itself - we just share the same variant kind with :got_auth: operator
3368 // applied for adrp.
3369 // TODO: can we somehow get current TargetMachine object to call
3370 // getCodeModel() on it to ensure we are using tiny code model?
3371 return Error(S, "unexpected adr label");
3372 }
3373 }
3374
3375 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3376 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3377 return ParseStatus::Success;
3378}
3379
3380/// tryParseFPImm - A floating point immediate expression operand.
3381template <bool AddFPZeroAsLiteral>
3382ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3383 SMLoc S = getLoc();
3384
3385 bool Hash = parseOptionalToken(AsmToken::Hash);
3386
3387 // Handle negation, as that still comes through as a separate token.
3388 bool isNegative = parseOptionalToken(AsmToken::Minus);
3389
3390 const AsmToken &Tok = getTok();
3391 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3392 if (!Hash)
3393 return ParseStatus::NoMatch;
3394 return TokError("invalid floating point immediate");
3395 }
3396
3397 // Parse hexadecimal representation.
3398 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3399 if (Tok.getIntVal() > 255 || isNegative)
3400 return TokError("encoded floating point value out of range");
3401
3403 Operands.push_back(
3404 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3405 } else {
3406 // Parse FP representation.
3407 APFloat RealVal(APFloat::IEEEdouble());
3408 auto StatusOrErr =
3409 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3410 if (errorToBool(StatusOrErr.takeError()))
3411 return TokError("invalid floating point representation");
3412
3413 if (isNegative)
3414 RealVal.changeSign();
3415
3416 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3417 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3418 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3419 } else
3420 Operands.push_back(AArch64Operand::CreateFPImm(
3421 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3422 }
3423
3424 Lex(); // Eat the token.
3425
3426 return ParseStatus::Success;
3427}
3428
3429/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3430/// a shift suffix, for example '#1, lsl #12'.
3432AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3433 SMLoc S = getLoc();
3434
3435 if (getTok().is(AsmToken::Hash))
3436 Lex(); // Eat '#'
3437 else if (getTok().isNot(AsmToken::Integer))
3438 // Operand should start from # or should be integer, emit error otherwise.
3439 return ParseStatus::NoMatch;
3440
3441 if (getTok().is(AsmToken::Integer) &&
3442 getLexer().peekTok().is(AsmToken::Colon))
3443 return tryParseImmRange(Operands);
3444
3445 const MCExpr *Imm = nullptr;
3446 if (parseSymbolicImmVal(Imm))
3447 return ParseStatus::Failure;
3448 else if (getTok().isNot(AsmToken::Comma)) {
3449 Operands.push_back(
3450 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3451 return ParseStatus::Success;
3452 }
3453
3454 // Eat ','
3455 Lex();
3456 StringRef VecGroup;
3457 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3458 Operands.push_back(
3459 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3460 Operands.push_back(
3461 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3462 return ParseStatus::Success;
3463 }
3464
3465 // The optional operand must be "lsl #N" where N is non-negative.
3466 if (!getTok().is(AsmToken::Identifier) ||
3467 !getTok().getIdentifier().equals_insensitive("lsl"))
3468 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3469
3470 // Eat 'lsl'
3471 Lex();
3472
3473 parseOptionalToken(AsmToken::Hash);
3474
3475 if (getTok().isNot(AsmToken::Integer))
3476 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3477
3478 int64_t ShiftAmount = getTok().getIntVal();
3479
3480 if (ShiftAmount < 0)
3481 return Error(getLoc(), "positive shift amount required");
3482 Lex(); // Eat the number
3483
3484 // Just in case the optional lsl #0 is used for immediates other than zero.
3485 if (ShiftAmount == 0 && Imm != nullptr) {
3486 Operands.push_back(
3487 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3488 return ParseStatus::Success;
3489 }
3490
3491 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3492 getLoc(), getContext()));
3493 return ParseStatus::Success;
3494}
3495
3496/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3497/// suggestion to help common typos.
3499AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3501 .Case("eq", AArch64CC::EQ)
3502 .Case("ne", AArch64CC::NE)
3503 .Case("cs", AArch64CC::HS)
3504 .Case("hs", AArch64CC::HS)
3505 .Case("cc", AArch64CC::LO)
3506 .Case("lo", AArch64CC::LO)
3507 .Case("mi", AArch64CC::MI)
3508 .Case("pl", AArch64CC::PL)
3509 .Case("vs", AArch64CC::VS)
3510 .Case("vc", AArch64CC::VC)
3511 .Case("hi", AArch64CC::HI)
3512 .Case("ls", AArch64CC::LS)
3513 .Case("ge", AArch64CC::GE)
3514 .Case("lt", AArch64CC::LT)
3515 .Case("gt", AArch64CC::GT)
3516 .Case("le", AArch64CC::LE)
3517 .Case("al", AArch64CC::AL)
3518 .Case("nv", AArch64CC::NV)
3520
3521 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3523 .Case("none", AArch64CC::EQ)
3524 .Case("any", AArch64CC::NE)
3525 .Case("nlast", AArch64CC::HS)
3526 .Case("last", AArch64CC::LO)
3527 .Case("first", AArch64CC::MI)
3528 .Case("nfrst", AArch64CC::PL)
3529 .Case("pmore", AArch64CC::HI)
3530 .Case("plast", AArch64CC::LS)
3531 .Case("tcont", AArch64CC::GE)
3532 .Case("tstop", AArch64CC::LT)
3534
3535 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3536 Suggestion = "nfrst";
3537 }
3538 return CC;
3539}
3540
3541/// parseCondCode - Parse a Condition Code operand.
3542bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3543 bool invertCondCode) {
3544 SMLoc S = getLoc();
3545 const AsmToken &Tok = getTok();
3546 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3547
3548 StringRef Cond = Tok.getString();
3549 std::string Suggestion;
3550 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3551 if (CC == AArch64CC::Invalid) {
3552 std::string Msg = "invalid condition code";
3553 if (!Suggestion.empty())
3554 Msg += ", did you mean " + Suggestion + "?";
3555 return TokError(Msg);
3556 }
3557 Lex(); // Eat identifier token.
3558
3559 if (invertCondCode) {
3560 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3561 return TokError("condition codes AL and NV are invalid for this instruction");
3563 }
3564
3565 Operands.push_back(
3566 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3567 return false;
3568}
3569
3570ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3571 const AsmToken &Tok = getTok();
3572 SMLoc S = getLoc();
3573
3574 if (Tok.isNot(AsmToken::Identifier))
3575 return TokError("invalid operand for instruction");
3576
3577 unsigned PStateImm = -1;
3578 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3579 if (!SVCR)
3580 return ParseStatus::NoMatch;
3581 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3582 PStateImm = SVCR->Encoding;
3583
3584 Operands.push_back(
3585 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3586 Lex(); // Eat identifier token.
3587 return ParseStatus::Success;
3588}
3589
3590ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3591 const AsmToken &Tok = getTok();
3592 SMLoc S = getLoc();
3593
3594 StringRef Name = Tok.getString();
3595
3596 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3597 Lex(); // eat "za[.(b|h|s|d)]"
3598 unsigned ElementWidth = 0;
3599 auto DotPosition = Name.find('.');
3600 if (DotPosition != StringRef::npos) {
3601 const auto &KindRes =
3602 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3603 if (!KindRes)
3604 return TokError(
3605 "Expected the register to be followed by element width suffix");
3606 ElementWidth = KindRes->second;
3607 }
3608 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3609 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3610 getContext()));
3611 if (getLexer().is(AsmToken::LBrac)) {
3612 // There's no comma after matrix operand, so we can parse the next operand
3613 // immediately.
3614 if (parseOperand(Operands, false, false))
3615 return ParseStatus::NoMatch;
3616 }
3617 return ParseStatus::Success;
3618 }
3619
3620 // Try to parse matrix register.
3621 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3622 if (!Reg)
3623 return ParseStatus::NoMatch;
3624
3625 size_t DotPosition = Name.find('.');
3626 assert(DotPosition != StringRef::npos && "Unexpected register");
3627
3628 StringRef Head = Name.take_front(DotPosition);
3629 StringRef Tail = Name.drop_front(DotPosition);
3630 StringRef RowOrColumn = Head.take_back();
3631
3632 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3633 .Case("h", MatrixKind::Row)
3634 .Case("v", MatrixKind::Col)
3635 .Default(MatrixKind::Tile);
3636
3637 // Next up, parsing the suffix
3638 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3639 if (!KindRes)
3640 return TokError(
3641 "Expected the register to be followed by element width suffix");
3642 unsigned ElementWidth = KindRes->second;
3643
3644 Lex();
3645
3646 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3647 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3648
3649 if (getLexer().is(AsmToken::LBrac)) {
3650 // There's no comma after matrix operand, so we can parse the next operand
3651 // immediately.
3652 if (parseOperand(Operands, false, false))
3653 return ParseStatus::NoMatch;
3654 }
3655 return ParseStatus::Success;
3656}
3657
3658/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3659/// them if present.
3661AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3662 const AsmToken &Tok = getTok();
3663 std::string LowerID = Tok.getString().lower();
3666 .Case("lsl", AArch64_AM::LSL)
3667 .Case("lsr", AArch64_AM::LSR)
3668 .Case("asr", AArch64_AM::ASR)
3669 .Case("ror", AArch64_AM::ROR)
3670 .Case("msl", AArch64_AM::MSL)
3671 .Case("uxtb", AArch64_AM::UXTB)
3672 .Case("uxth", AArch64_AM::UXTH)
3673 .Case("uxtw", AArch64_AM::UXTW)
3674 .Case("uxtx", AArch64_AM::UXTX)
3675 .Case("sxtb", AArch64_AM::SXTB)
3676 .Case("sxth", AArch64_AM::SXTH)
3677 .Case("sxtw", AArch64_AM::SXTW)
3678 .Case("sxtx", AArch64_AM::SXTX)
3680
3682 return ParseStatus::NoMatch;
3683
3684 SMLoc S = Tok.getLoc();
3685 Lex();
3686
3687 bool Hash = parseOptionalToken(AsmToken::Hash);
3688
3689 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3690 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3691 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3692 ShOp == AArch64_AM::MSL) {
3693 // We expect a number here.
3694 return TokError("expected #imm after shift specifier");
3695 }
3696
3697 // "extend" type operations don't need an immediate, #0 is implicit.
3698 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3699 Operands.push_back(
3700 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3701 return ParseStatus::Success;
3702 }
3703
3704 // Make sure we do actually have a number, identifier or a parenthesized
3705 // expression.
3706 SMLoc E = getLoc();
3707 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3708 !getTok().is(AsmToken::Identifier))
3709 return Error(E, "expected integer shift amount");
3710
3711 const MCExpr *ImmVal;
3712 if (getParser().parseExpression(ImmVal))
3713 return ParseStatus::Failure;
3714
3715 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3716 if (!MCE)
3717 return Error(E, "expected constant '#imm' after shift specifier");
3718
3719 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3720 Operands.push_back(AArch64Operand::CreateShiftExtend(
3721 ShOp, MCE->getValue(), true, S, E, getContext()));
3722 return ParseStatus::Success;
3723}
3724
3725static const struct Extension {
3726 const char *Name;
3728} ExtensionMap[] = {
3729 {"crc", {AArch64::FeatureCRC}},
3730 {"sm4", {AArch64::FeatureSM4}},
3731 {"sha3", {AArch64::FeatureSHA3}},
3732 {"sha2", {AArch64::FeatureSHA2}},
3733 {"aes", {AArch64::FeatureAES}},
3734 {"crypto", {AArch64::FeatureCrypto}},
3735 {"fp", {AArch64::FeatureFPARMv8}},
3736 {"simd", {AArch64::FeatureNEON}},
3737 {"ras", {AArch64::FeatureRAS}},
3738 {"rasv2", {AArch64::FeatureRASv2}},
3739 {"lse", {AArch64::FeatureLSE}},
3740 {"predres", {AArch64::FeaturePredRes}},
3741 {"predres2", {AArch64::FeatureSPECRES2}},
3742 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3743 {"mte", {AArch64::FeatureMTE}},
3744 {"memtag", {AArch64::FeatureMTE}},
3745 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3746 {"pan", {AArch64::FeaturePAN}},
3747 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3748 {"ccpp", {AArch64::FeatureCCPP}},
3749 {"rcpc", {AArch64::FeatureRCPC}},
3750 {"rng", {AArch64::FeatureRandGen}},
3751 {"sve", {AArch64::FeatureSVE}},
3752 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3753 {"sve2", {AArch64::FeatureSVE2}},
3754 {"sve-aes", {AArch64::FeatureSVEAES}},
3755 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3756 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3757 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3758 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3759 {"sve2p1", {AArch64::FeatureSVE2p1}},
3760 {"ls64", {AArch64::FeatureLS64}},
3761 {"xs", {AArch64::FeatureXS}},
3762 {"pauth", {AArch64::FeaturePAuth}},
3763 {"flagm", {AArch64::FeatureFlagM}},
3764 {"rme", {AArch64::FeatureRME}},
3765 {"sme", {AArch64::FeatureSME}},
3766 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3767 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3768 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3769 {"sme2", {AArch64::FeatureSME2}},
3770 {"sme2p1", {AArch64::FeatureSME2p1}},
3771 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3772 {"hbc", {AArch64::FeatureHBC}},
3773 {"mops", {AArch64::FeatureMOPS}},
3774 {"mec", {AArch64::FeatureMEC}},
3775 {"the", {AArch64::FeatureTHE}},
3776 {"d128", {AArch64::FeatureD128}},
3777 {"lse128", {AArch64::FeatureLSE128}},
3778 {"ite", {AArch64::FeatureITE}},
3779 {"cssc", {AArch64::FeatureCSSC}},
3780 {"rcpc3", {AArch64::FeatureRCPC3}},
3781 {"gcs", {AArch64::FeatureGCS}},
3782 {"bf16", {AArch64::FeatureBF16}},
3783 {"compnum", {AArch64::FeatureComplxNum}},
3784 {"dotprod", {AArch64::FeatureDotProd}},
3785 {"f32mm", {AArch64::FeatureMatMulFP32}},
3786 {"f64mm", {AArch64::FeatureMatMulFP64}},
3787 {"fp16", {AArch64::FeatureFullFP16}},
3788 {"fp16fml", {AArch64::FeatureFP16FML}},
3789 {"i8mm", {AArch64::FeatureMatMulInt8}},
3790 {"lor", {AArch64::FeatureLOR}},
3791 {"profile", {AArch64::FeatureSPE}},
3792 // "rdma" is the name documented by binutils for the feature, but
3793 // binutils also accepts incomplete prefixes of features, so "rdm"
3794 // works too. Support both spellings here.
3795 {"rdm", {AArch64::FeatureRDM}},
3796 {"rdma", {AArch64::FeatureRDM}},
3797 {"sb", {AArch64::FeatureSB}},
3798 {"ssbs", {AArch64::FeatureSSBS}},
3799 {"tme", {AArch64::FeatureTME}},
3800 {"fp8", {AArch64::FeatureFP8}},
3801 {"faminmax", {AArch64::FeatureFAMINMAX}},
3802 {"fp8fma", {AArch64::FeatureFP8FMA}},
3803 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3804 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3805 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3806 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3807 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3808 {"lut", {AArch64::FeatureLUT}},
3809 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3810 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3811 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3812 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3813 {"cpa", {AArch64::FeatureCPA}},
3814 {"tlbiw", {AArch64::FeatureTLBIW}},
3815 {"pops", {AArch64::FeaturePoPS}},
3816 {"cmpbr", {AArch64::FeatureCMPBR}},
3817 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3818 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3819 {"fprcvt", {AArch64::FeatureFPRCVT}},
3820 {"lsfe", {AArch64::FeatureLSFE}},
3821 {"sme2p2", {AArch64::FeatureSME2p2}},
3822 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3823 {"sve2p2", {AArch64::FeatureSVE2p2}},
3824 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3825 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3826 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3827 {"lsui", {AArch64::FeatureLSUI}},
3828 {"occmo", {AArch64::FeatureOCCMO}},
3829 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3831
3832static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3833 if (FBS[AArch64::HasV8_0aOps])
3834 Str += "ARMv8a";
3835 if (FBS[AArch64::HasV8_1aOps])
3836 Str += "ARMv8.1a";
3837 else if (FBS[AArch64::HasV8_2aOps])
3838 Str += "ARMv8.2a";
3839 else if (FBS[AArch64::HasV8_3aOps])
3840 Str += "ARMv8.3a";
3841 else if (FBS[AArch64::HasV8_4aOps])
3842 Str += "ARMv8.4a";
3843 else if (FBS[AArch64::HasV8_5aOps])
3844 Str += "ARMv8.5a";
3845 else if (FBS[AArch64::HasV8_6aOps])
3846 Str += "ARMv8.6a";
3847 else if (FBS[AArch64::HasV8_7aOps])
3848 Str += "ARMv8.7a";
3849 else if (FBS[AArch64::HasV8_8aOps])
3850 Str += "ARMv8.8a";
3851 else if (FBS[AArch64::HasV8_9aOps])
3852 Str += "ARMv8.9a";
3853 else if (FBS[AArch64::HasV9_0aOps])
3854 Str += "ARMv9-a";
3855 else if (FBS[AArch64::HasV9_1aOps])
3856 Str += "ARMv9.1a";
3857 else if (FBS[AArch64::HasV9_2aOps])
3858 Str += "ARMv9.2a";
3859 else if (FBS[AArch64::HasV9_3aOps])
3860 Str += "ARMv9.3a";
3861 else if (FBS[AArch64::HasV9_4aOps])
3862 Str += "ARMv9.4a";
3863 else if (FBS[AArch64::HasV9_5aOps])
3864 Str += "ARMv9.5a";
3865 else if (FBS[AArch64::HasV9_6aOps])
3866 Str += "ARMv9.6a";
3867 else if (FBS[AArch64::HasV8_0rOps])
3868 Str += "ARMv8r";
3869 else {
3870 SmallVector<std::string, 2> ExtMatches;
3871 for (const auto& Ext : ExtensionMap) {
3872 // Use & in case multiple features are enabled
3873 if ((FBS & Ext.Features) != FeatureBitset())
3874 ExtMatches.push_back(Ext.Name);
3875 }
3876 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3877 }
3878}
3879
3880void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3881 SMLoc S) {
3882 const uint16_t Op2 = Encoding & 7;
3883 const uint16_t Cm = (Encoding & 0x78) >> 3;
3884 const uint16_t Cn = (Encoding & 0x780) >> 7;
3885 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3886
3887 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3888
3889 Operands.push_back(
3890 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3891 Operands.push_back(
3892 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3893 Operands.push_back(
3894 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3895 Expr = MCConstantExpr::create(Op2, getContext());
3896 Operands.push_back(
3897 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3898}
3899
3900/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3901/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3902bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3904 if (Name.contains('.'))
3905 return TokError("invalid operand");
3906
3907 Mnemonic = Name;
3908 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3909
3910 const AsmToken &Tok = getTok();
3911 StringRef Op = Tok.getString();
3912 SMLoc S = Tok.getLoc();
3913
3914 if (Mnemonic == "ic") {
3915 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3916 if (!IC)
3917 return TokError("invalid operand for IC instruction");
3918 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3919 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3921 return TokError(Str);
3922 }
3923 createSysAlias(IC->Encoding, Operands, S);
3924 } else if (Mnemonic == "dc") {
3925 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3926 if (!DC)
3927 return TokError("invalid operand for DC instruction");
3928 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3929 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3931 return TokError(Str);
3932 }
3933 createSysAlias(DC->Encoding, Operands, S);
3934 } else if (Mnemonic == "at") {
3935 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3936 if (!AT)
3937 return TokError("invalid operand for AT instruction");
3938 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3939 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3941 return TokError(Str);
3942 }
3943 createSysAlias(AT->Encoding, Operands, S);
3944 } else if (Mnemonic == "tlbi") {
3945 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3946 if (!TLBI)
3947 return TokError("invalid operand for TLBI instruction");
3948 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3949 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3951 return TokError(Str);
3952 }
3953 createSysAlias(TLBI->Encoding, Operands, S);
3954 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3955
3956 if (Op.lower() != "rctx")
3957 return TokError("invalid operand for prediction restriction instruction");
3958
3959 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3960 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3961 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3962
3963 if (Mnemonic == "cosp" && !hasSpecres2)
3964 return TokError("COSP requires: predres2");
3965 if (!hasPredres)
3966 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3967
3968 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3969 : Mnemonic == "dvp" ? 0b101
3970 : Mnemonic == "cosp" ? 0b110
3971 : Mnemonic == "cpp" ? 0b111
3972 : 0;
3973 assert(PRCTX_Op2 &&
3974 "Invalid mnemonic for prediction restriction instruction");
3975 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3976 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3977
3978 createSysAlias(Encoding, Operands, S);
3979 }
3980
3981 Lex(); // Eat operand.
3982
3983 bool ExpectRegister = !Op.contains_insensitive("all");
3984 bool HasRegister = false;
3985
3986 // Check for the optional register operand.
3987 if (parseOptionalToken(AsmToken::Comma)) {
3988 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3989 return TokError("expected register operand");
3990 HasRegister = true;
3991 }
3992
3993 if (ExpectRegister && !HasRegister)
3994 return TokError("specified " + Mnemonic + " op requires a register");
3995 else if (!ExpectRegister && HasRegister)
3996 return TokError("specified " + Mnemonic + " op does not use a register");
3997
3998 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3999 return true;
4000
4001 return false;
4002}
4003
4004/// parseSyspAlias - The TLBIP instructions are simple aliases for
4005/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4006bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4008 if (Name.contains('.'))
4009 return TokError("invalid operand");
4010
4011 Mnemonic = Name;
4012 Operands.push_back(
4013 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4014
4015 const AsmToken &Tok = getTok();
4016 StringRef Op = Tok.getString();
4017 SMLoc S = Tok.getLoc();
4018
4019 if (Mnemonic == "tlbip") {
4020 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4021 if (HasnXSQualifier) {
4022 Op = Op.drop_back(3);
4023 }
4024 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4025 if (!TLBIorig)
4026 return TokError("invalid operand for TLBIP instruction");
4027 const AArch64TLBI::TLBI TLBI(
4028 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4029 TLBIorig->NeedsReg,
4030 HasnXSQualifier
4031 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4032 : TLBIorig->FeaturesRequired);
4033 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4034 std::string Name =
4035 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4036 std::string Str("TLBIP " + Name + " requires: ");
4038 return TokError(Str);
4039 }
4040 createSysAlias(TLBI.Encoding, Operands, S);
4041 }
4042
4043 Lex(); // Eat operand.
4044
4045 if (parseComma())
4046 return true;
4047
4048 if (Tok.isNot(AsmToken::Identifier))
4049 return TokError("expected register identifier");
4050 auto Result = tryParseSyspXzrPair(Operands);
4051 if (Result.isNoMatch())
4052 Result = tryParseGPRSeqPair(Operands);
4053 if (!Result.isSuccess())
4054 return TokError("specified " + Mnemonic +
4055 " op requires a pair of registers");
4056
4057 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4058 return true;
4059
4060 return false;
4061}
4062
4063ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4064 MCAsmParser &Parser = getParser();
4065 const AsmToken &Tok = getTok();
4066
4067 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4068 return TokError("'csync' operand expected");
4069 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4070 // Immediate operand.
4071 const MCExpr *ImmVal;
4072 SMLoc ExprLoc = getLoc();
4073 AsmToken IntTok = Tok;
4074 if (getParser().parseExpression(ImmVal))
4075 return ParseStatus::Failure;
4076 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4077 if (!MCE)
4078 return Error(ExprLoc, "immediate value expected for barrier operand");
4079 int64_t Value = MCE->getValue();
4080 if (Mnemonic == "dsb" && Value > 15) {
4081 // This case is a no match here, but it might be matched by the nXS
4082 // variant. Deliberately not unlex the optional '#' as it is not necessary
4083 // to characterize an integer immediate.
4084 Parser.getLexer().UnLex(IntTok);
4085 return ParseStatus::NoMatch;
4086 }
4087 if (Value < 0 || Value > 15)
4088 return Error(ExprLoc, "barrier operand out of range");
4089 auto DB = AArch64DB::lookupDBByEncoding(Value);
4090 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4091 ExprLoc, getContext(),
4092 false /*hasnXSModifier*/));
4093 return ParseStatus::Success;
4094 }
4095
4096 if (Tok.isNot(AsmToken::Identifier))
4097 return TokError("invalid operand for instruction");
4098
4099 StringRef Operand = Tok.getString();
4100 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4101 auto DB = AArch64DB::lookupDBByName(Operand);
4102 // The only valid named option for ISB is 'sy'
4103 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4104 return TokError("'sy' or #imm operand expected");
4105 // The only valid named option for TSB is 'csync'
4106 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4107 return TokError("'csync' operand expected");
4108 if (!DB && !TSB) {
4109 if (Mnemonic == "dsb") {
4110 // This case is a no match here, but it might be matched by the nXS
4111 // variant.
4112 return ParseStatus::NoMatch;
4113 }
4114 return TokError("invalid barrier option name");
4115 }
4116
4117 Operands.push_back(AArch64Operand::CreateBarrier(
4118 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4119 getContext(), false /*hasnXSModifier*/));
4120 Lex(); // Consume the option
4121
4122 return ParseStatus::Success;
4123}
4124
4126AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4127 const AsmToken &Tok = getTok();
4128
4129 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4130 if (Mnemonic != "dsb")
4131 return ParseStatus::Failure;
4132
4133 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4134 // Immediate operand.
4135 const MCExpr *ImmVal;
4136 SMLoc ExprLoc = getLoc();
4137 if (getParser().parseExpression(ImmVal))
4138 return ParseStatus::Failure;
4139 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4140 if (!MCE)
4141 return Error(ExprLoc, "immediate value expected for barrier operand");
4142 int64_t Value = MCE->getValue();
4143 // v8.7-A DSB in the nXS variant accepts only the following immediate
4144 // values: 16, 20, 24, 28.
4145 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4146 return Error(ExprLoc, "barrier operand out of range");
4147 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4148 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4149 ExprLoc, getContext(),
4150 true /*hasnXSModifier*/));
4151 return ParseStatus::Success;
4152 }
4153
4154 if (Tok.isNot(AsmToken::Identifier))
4155 return TokError("invalid operand for instruction");
4156
4157 StringRef Operand = Tok.getString();
4158 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4159
4160 if (!DB)
4161 return TokError("invalid barrier option name");
4162
4163 Operands.push_back(
4164 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4165 getContext(), true /*hasnXSModifier*/));
4166 Lex(); // Consume the option
4167
4168 return ParseStatus::Success;
4169}
4170
4171ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4172 const AsmToken &Tok = getTok();
4173
4174 if (Tok.isNot(AsmToken::Identifier))
4175 return ParseStatus::NoMatch;
4176
4177 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4178 return ParseStatus::NoMatch;
4179
4180 int MRSReg, MSRReg;
4181 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4182 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4183 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4184 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4185 } else
4186 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4187
4188 unsigned PStateImm = -1;
4189 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4190 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4191 PStateImm = PState15->Encoding;
4192 if (!PState15) {
4193 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4194 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4195 PStateImm = PState1->Encoding;
4196 }
4197
4198 Operands.push_back(
4199 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4200 PStateImm, getContext()));
4201 Lex(); // Eat identifier
4202
4203 return ParseStatus::Success;
4204}
4205
4207AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4208 SMLoc S = getLoc();
4209 const AsmToken &Tok = getTok();
4210 if (Tok.isNot(AsmToken::Identifier))
4211 return TokError("invalid operand for instruction");
4212
4214 if (!PH)
4215 return TokError("invalid operand for instruction");
4216
4217 Operands.push_back(AArch64Operand::CreatePHintInst(
4218 PH->Encoding, Tok.getString(), S, getContext()));
4219 Lex(); // Eat identifier token.
4220 return ParseStatus::Success;
4221}
4222
4223/// tryParseNeonVectorRegister - Parse a vector register operand.
4224bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4225 if (getTok().isNot(AsmToken::Identifier))
4226 return true;
4227
4228 SMLoc S = getLoc();
4229 // Check for a vector register specifier first.
4232 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4233 if (!Res.isSuccess())
4234 return true;
4235
4236 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4237 if (!KindRes)
4238 return true;
4239
4240 unsigned ElementWidth = KindRes->second;
4241 Operands.push_back(
4242 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4243 S, getLoc(), getContext()));
4244
4245 // If there was an explicit qualifier, that goes on as a literal text
4246 // operand.
4247 if (!Kind.empty())
4248 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4249
4250 return tryParseVectorIndex(Operands).isFailure();
4251}
4252
4253ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4254 SMLoc SIdx = getLoc();
4255 if (parseOptionalToken(AsmToken::LBrac)) {
4256 const MCExpr *ImmVal;
4257 if (getParser().parseExpression(ImmVal))
4258 return ParseStatus::NoMatch;
4259 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4260 if (!MCE)
4261 return TokError("immediate value expected for vector index");
4262
4263 SMLoc E = getLoc();
4264
4265 if (parseToken(AsmToken::RBrac, "']' expected"))
4266 return ParseStatus::Failure;
4267
4268 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4269 E, getContext()));
4270 return ParseStatus::Success;
4271 }
4272
4273 return ParseStatus::NoMatch;
4274}
4275
4276// tryParseVectorRegister - Try to parse a vector register name with
4277// optional kind specifier. If it is a register specifier, eat the token
4278// and return it.
4279ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4280 StringRef &Kind,
4281 RegKind MatchKind) {
4282 const AsmToken &Tok = getTok();
4283
4284 if (Tok.isNot(AsmToken::Identifier))
4285 return ParseStatus::NoMatch;
4286
4287 StringRef Name = Tok.getString();
4288 // If there is a kind specifier, it's separated from the register name by
4289 // a '.'.
4290 size_t Start = 0, Next = Name.find('.');
4291 StringRef Head = Name.slice(Start, Next);
4292 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4293
4294 if (RegNum) {
4295 if (Next != StringRef::npos) {
4296 Kind = Name.substr(Next);
4297 if (!isValidVectorKind(Kind, MatchKind))
4298 return TokError("invalid vector kind qualifier");
4299 }
4300 Lex(); // Eat the register token.
4301
4302 Reg = RegNum;
4303 return ParseStatus::Success;
4304 }
4305
4306 return ParseStatus::NoMatch;
4307}
4308
4309ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4312 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4313 if (!Status.isSuccess())
4314 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4315 return Status;
4316}
4317
4318/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4319template <RegKind RK>
4321AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4322 // Check for a SVE predicate register specifier first.
4323 const SMLoc S = getLoc();
4325 MCRegister RegNum;
4326 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4327 if (!Res.isSuccess())
4328 return Res;
4329
4330 const auto &KindRes = parseVectorKind(Kind, RK);
4331 if (!KindRes)
4332 return ParseStatus::NoMatch;
4333
4334 unsigned ElementWidth = KindRes->second;
4335 Operands.push_back(AArch64Operand::CreateVectorReg(
4336 RegNum, RK, ElementWidth, S,
4337 getLoc(), getContext()));
4338
4339 if (getLexer().is(AsmToken::LBrac)) {
4340 if (RK == RegKind::SVEPredicateAsCounter) {
4341 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4342 if (ResIndex.isSuccess())
4343 return ParseStatus::Success;
4344 } else {
4345 // Indexed predicate, there's no comma so try parse the next operand
4346 // immediately.
4347 if (parseOperand(Operands, false, false))
4348 return ParseStatus::NoMatch;
4349 }
4350 }
4351
4352 // Not all predicates are followed by a '/m' or '/z'.
4353 if (getTok().isNot(AsmToken::Slash))
4354 return ParseStatus::Success;
4355
4356 // But when they do they shouldn't have an element type suffix.
4357 if (!Kind.empty())
4358 return Error(S, "not expecting size suffix");
4359
4360 // Add a literal slash as operand
4361 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4362
4363 Lex(); // Eat the slash.
4364
4365 // Zeroing or merging?
4366 auto Pred = getTok().getString().lower();
4367 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4368 return Error(getLoc(), "expecting 'z' predication");
4369
4370 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4371 return Error(getLoc(), "expecting 'm' or 'z' predication");
4372
4373 // Add zero/merge token.
4374 const char *ZM = Pred == "z" ? "z" : "m";
4375 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4376
4377 Lex(); // Eat zero/merge token.
4378 return ParseStatus::Success;
4379}
4380
4381/// parseRegister - Parse a register operand.
4382bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4383 // Try for a Neon vector register.
4384 if (!tryParseNeonVectorRegister(Operands))
4385 return false;
4386
4387 if (tryParseZTOperand(Operands).isSuccess())
4388 return false;
4389
4390 // Otherwise try for a scalar register.
4391 if (tryParseGPROperand<false>(Operands).isSuccess())
4392 return false;
4393
4394 return true;
4395}
4396
4397bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4398 bool HasELFModifier = false;
4400
4401 if (parseOptionalToken(AsmToken::Colon)) {
4402 HasELFModifier = true;
4403
4404 if (getTok().isNot(AsmToken::Identifier))
4405 return TokError("expect relocation specifier in operand after ':'");
4406
4407 std::string LowerCase = getTok().getIdentifier().lower();
4408 RefKind =
4411 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4412 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4413 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4414 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4415 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4416 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4417 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4418 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4419 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4420 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4421 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4422 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4423 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4424 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4425 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4426 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4427 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4428 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4429 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4430 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4431 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4432 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4433 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4434 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4435 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4436 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4437 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4438 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4439 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4440 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4441 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4442 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4443 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4444 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4445 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4446 .Case("tlsdesc_auth_lo12", AArch64MCExpr::VK_TLSDESC_AUTH_LO12)
4448 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4449 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4451 .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
4453 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4454 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4455 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4458 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4459 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4461
4462 if (RefKind == AArch64MCExpr::VK_INVALID)
4463 return TokError("expect relocation specifier in operand after ':'");
4464
4465 Lex(); // Eat identifier
4466
4467 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4468 return true;
4469 }
4470
4471 if (getParser().parseExpression(ImmVal))
4472 return true;
4473
4474 if (HasELFModifier)
4475 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4476
4477 return false;
4478}
4479
4480ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4481 if (getTok().isNot(AsmToken::LCurly))
4482 return ParseStatus::NoMatch;
4483
4484 auto ParseMatrixTile = [this](unsigned &Reg,
4485 unsigned &ElementWidth) -> ParseStatus {
4486 StringRef Name = getTok().getString();
4487 size_t DotPosition = Name.find('.');
4488 if (DotPosition == StringRef::npos)
4489 return ParseStatus::NoMatch;
4490
4491 unsigned RegNum = matchMatrixTileListRegName(Name);
4492 if (!RegNum)
4493 return ParseStatus::NoMatch;
4494
4495 StringRef Tail = Name.drop_front(DotPosition);
4496 const std::optional<std::pair<int, int>> &KindRes =
4497 parseVectorKind(Tail, RegKind::Matrix);
4498 if (!KindRes)
4499 return TokError(
4500 "Expected the register to be followed by element width suffix");
4501 ElementWidth = KindRes->second;
4502 Reg = RegNum;
4503 Lex(); // Eat the register.
4504 return ParseStatus::Success;
4505 };
4506
4507 SMLoc S = getLoc();
4508 auto LCurly = getTok();
4509 Lex(); // Eat left bracket token.
4510
4511 // Empty matrix list
4512 if (parseOptionalToken(AsmToken::RCurly)) {
4513 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4514 /*RegMask=*/0, S, getLoc(), getContext()));
4515 return ParseStatus::Success;
4516 }
4517
4518 // Try parse {za} alias early
4519 if (getTok().getString().equals_insensitive("za")) {
4520 Lex(); // Eat 'za'
4521
4522 if (parseToken(AsmToken::RCurly, "'}' expected"))
4523 return ParseStatus::Failure;
4524
4525 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4526 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4527 return ParseStatus::Success;
4528 }
4529
4530 SMLoc TileLoc = getLoc();
4531
4532 unsigned FirstReg, ElementWidth;
4533 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4534 if (!ParseRes.isSuccess()) {
4535 getLexer().UnLex(LCurly);
4536 return ParseRes;
4537 }
4538
4539 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4540
4541 unsigned PrevReg = FirstReg;
4542
4544 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4545
4546 SmallSet<unsigned, 8> SeenRegs;
4547 SeenRegs.insert(FirstReg);
4548
4549 while (parseOptionalToken(AsmToken::Comma)) {
4550 TileLoc = getLoc();
4551 unsigned Reg, NextElementWidth;
4552 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4553 if (!ParseRes.isSuccess())
4554 return ParseRes;
4555
4556 // Element size must match on all regs in the list.
4557 if (ElementWidth != NextElementWidth)
4558 return Error(TileLoc, "mismatched register size suffix");
4559
4560 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4561 Warning(TileLoc, "tile list not in ascending order");
4562
4563 if (SeenRegs.contains(Reg))
4564 Warning(TileLoc, "duplicate tile in list");
4565 else {
4566 SeenRegs.insert(Reg);
4567 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4568 }
4569
4570 PrevReg = Reg;
4571 }
4572
4573 if (parseToken(AsmToken::RCurly, "'}' expected"))
4574 return ParseStatus::Failure;
4575
4576 unsigned RegMask = 0;
4577 for (auto Reg : DRegs)
4578 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4579 RI->getEncodingValue(AArch64::ZAD0));
4580 Operands.push_back(
4581 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4582
4583 return ParseStatus::Success;
4584}
4585
4586template <RegKind VectorKind>
4587ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4588 bool ExpectMatch) {
4589 MCAsmParser &Parser = getParser();
4590 if (!getTok().is(AsmToken::LCurly))
4591 return ParseStatus::NoMatch;
4592
4593 // Wrapper around parse function
4594 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4595 bool NoMatchIsError) -> ParseStatus {
4596 auto RegTok = getTok();
4597 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4598 if (ParseRes.isSuccess()) {
4599 if (parseVectorKind(Kind, VectorKind))
4600 return ParseRes;
4601 llvm_unreachable("Expected a valid vector kind");
4602 }
4603
4604 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4605 RegTok.getString().equals_insensitive("zt0"))
4606 return ParseStatus::NoMatch;
4607
4608 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4609 (ParseRes.isNoMatch() && NoMatchIsError &&
4610 !RegTok.getString().starts_with_insensitive("za")))
4611 return Error(Loc, "vector register expected");
4612
4613 return ParseStatus::NoMatch;
4614 };
4615
4616 int NumRegs = getNumRegsForRegKind(VectorKind);
4617 SMLoc S = getLoc();
4618 auto LCurly = getTok();
4619 Lex(); // Eat left bracket token.
4620
4622 MCRegister FirstReg;
4623 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4624
4625 // Put back the original left bracket if there was no match, so that
4626 // different types of list-operands can be matched (e.g. SVE, Neon).
4627 if (ParseRes.isNoMatch())
4628 Parser.getLexer().UnLex(LCurly);
4629
4630 if (!ParseRes.isSuccess())
4631 return ParseRes;
4632
4633 int64_t PrevReg = FirstReg;
4634 unsigned Count = 1;
4635
4636 int Stride = 1;
4637 if (parseOptionalToken(AsmToken::Minus)) {
4638 SMLoc Loc = getLoc();
4639 StringRef NextKind;
4640
4642 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4643 if (!ParseRes.isSuccess())
4644 return ParseRes;
4645
4646 // Any Kind suffices must match on all regs in the list.
4647 if (Kind != NextKind)
4648 return Error(Loc, "mismatched register size suffix");
4649
4650 unsigned Space =
4651 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4652
4653 if (Space == 0 || Space > 3)
4654 return Error(Loc, "invalid number of vectors");
4655
4656 Count += Space;
4657 }
4658 else {
4659 bool HasCalculatedStride = false;
4660 while (parseOptionalToken(AsmToken::Comma)) {
4661 SMLoc Loc = getLoc();
4662 StringRef NextKind;
4664 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4665 if (!ParseRes.isSuccess())
4666 return ParseRes;
4667
4668 // Any Kind suffices must match on all regs in the list.
4669 if (Kind != NextKind)
4670 return Error(Loc, "mismatched register size suffix");
4671
4672 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4673 unsigned PrevRegVal =
4674 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4675 if (!HasCalculatedStride) {
4676 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4677 : (RegVal + NumRegs - PrevRegVal);
4678 HasCalculatedStride = true;
4679 }
4680
4681 // Register must be incremental (with a wraparound at last register).
4682 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4683 return Error(Loc, "registers must have the same sequential stride");
4684
4685 PrevReg = Reg;
4686 ++Count;
4687 }
4688 }
4689
4690 if (parseToken(AsmToken::RCurly, "'}' expected"))
4691 return ParseStatus::Failure;
4692
4693 if (Count > 4)
4694 return Error(S, "invalid number of vectors");
4695
4696 unsigned NumElements = 0;
4697 unsigned ElementWidth = 0;
4698 if (!Kind.empty()) {
4699 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4700 std::tie(NumElements, ElementWidth) = *VK;
4701 }
4702
4703 Operands.push_back(AArch64Operand::CreateVectorList(
4704 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4705 getLoc(), getContext()));
4706
4707 return ParseStatus::Success;
4708}
4709
4710/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4711bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4712 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4713 if (!ParseRes.isSuccess())
4714 return true;
4715
4716 return tryParseVectorIndex(Operands).isFailure();
4717}
4718
4719ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4720 SMLoc StartLoc = getLoc();
4721
4722 MCRegister RegNum;
4723 ParseStatus Res = tryParseScalarRegister(RegNum);
4724 if (!Res.isSuccess())
4725 return Res;
4726
4727 if (!parseOptionalToken(AsmToken::Comma)) {
4728 Operands.push_back(AArch64Operand::CreateReg(
4729 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4730 return ParseStatus::Success;
4731 }
4732
4733 parseOptionalToken(AsmToken::Hash);
4734
4735 if (getTok().isNot(AsmToken::Integer))
4736 return Error(getLoc(), "index must be absent or #0");
4737
4738 const MCExpr *ImmVal;
4739 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4740 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4741 return Error(getLoc(), "index must be absent or #0");
4742
4743 Operands.push_back(AArch64Operand::CreateReg(
4744 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4745 return ParseStatus::Success;
4746}
4747
4748ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4749 SMLoc StartLoc = getLoc();
4750 const AsmToken &Tok = getTok();
4751 std::string Name = Tok.getString().lower();
4752
4753 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4754
4755 if (RegNum == 0)
4756 return ParseStatus::NoMatch;
4757
4758 Operands.push_back(AArch64Operand::CreateReg(
4759 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4760 Lex(); // Eat register.
4761
4762 // Check if register is followed by an index
4763 if (parseOptionalToken(AsmToken::LBrac)) {
4764 Operands.push_back(
4765 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4766 const MCExpr *ImmVal;
4767 if (getParser().parseExpression(ImmVal))
4768 return ParseStatus::NoMatch;
4769 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4770 if (!MCE)
4771 return TokError("immediate value expected for vector index");
4772 Operands.push_back(AArch64Operand::CreateImm(
4773 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4774 getLoc(), getContext()));
4775 if (parseOptionalToken(AsmToken::Comma))
4776 if (parseOptionalMulOperand(Operands))
4777 return ParseStatus::Failure;
4778 if (parseToken(AsmToken::RBrac, "']' expected"))
4779 return ParseStatus::Failure;
4780 Operands.push_back(
4781 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4782 }
4783 return ParseStatus::Success;
4784}
4785
4786template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4787ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4788 SMLoc StartLoc = getLoc();
4789
4790 MCRegister RegNum;
4791 ParseStatus Res = tryParseScalarRegister(RegNum);
4792 if (!Res.isSuccess())
4793 return Res;
4794
4795 // No shift/extend is the default.
4796 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4797 Operands.push_back(AArch64Operand::CreateReg(
4798 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4799 return ParseStatus::Success;
4800 }
4801
4802 // Eat the comma
4803 Lex();
4804
4805 // Match the shift
4807 Res = tryParseOptionalShiftExtend(ExtOpnd);
4808 if (!Res.isSuccess())
4809 return Res;
4810
4811 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4812 Operands.push_back(AArch64Operand::CreateReg(
4813 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4814 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4815 Ext->hasShiftExtendAmount()));
4816
4817 return ParseStatus::Success;
4818}
4819
4820bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4821 MCAsmParser &Parser = getParser();
4822
4823 // Some SVE instructions have a decoration after the immediate, i.e.
4824 // "mul vl". We parse them here and add tokens, which must be present in the
4825 // asm string in the tablegen instruction.
4826 bool NextIsVL =
4827 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4828 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4829 if (!getTok().getString().equals_insensitive("mul") ||
4830 !(NextIsVL || NextIsHash))
4831 return true;
4832
4833 Operands.push_back(
4834 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4835 Lex(); // Eat the "mul"
4836
4837 if (NextIsVL) {
4838 Operands.push_back(
4839 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4840 Lex(); // Eat the "vl"
4841 return false;
4842 }
4843
4844 if (NextIsHash) {
4845 Lex(); // Eat the #
4846 SMLoc S = getLoc();
4847
4848 // Parse immediate operand.
4849 const MCExpr *ImmVal;
4850 if (!Parser.parseExpression(ImmVal))
4851 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4852 Operands.push_back(AArch64Operand::CreateImm(
4853 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4854 getContext()));
4855 return false;
4856 }
4857 }
4858
4859 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4860}
4861
4862bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4863 StringRef &VecGroup) {
4864 MCAsmParser &Parser = getParser();
4865 auto Tok = Parser.getTok();
4866 if (Tok.isNot(AsmToken::Identifier))
4867 return true;
4868
4870 .Case("vgx2", "vgx2")
4871 .Case("vgx4", "vgx4")
4872 .Default("");
4873
4874 if (VG.empty())
4875 return true;
4876
4877 VecGroup = VG;
4878 Parser.Lex(); // Eat vgx[2|4]
4879 return false;
4880}
4881
4882bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4883 auto Tok = getTok();
4884 if (Tok.isNot(AsmToken::Identifier))
4885 return true;
4886
4887 auto Keyword = Tok.getString();
4889 .Case("sm", "sm")
4890 .Case("za", "za")
4891 .Default(Keyword);
4892 Operands.push_back(
4893 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4894
4895 Lex();
4896 return false;
4897}
4898
4899/// parseOperand - Parse a arm instruction operand. For now this parses the
4900/// operand regardless of the mnemonic.
4901bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4902 bool invertCondCode) {
4903 MCAsmParser &Parser = getParser();
4904
4905 ParseStatus ResTy =
4906 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4907
4908 // Check if the current operand has a custom associated parser, if so, try to
4909 // custom parse the operand, or fallback to the general approach.
4910 if (ResTy.isSuccess())
4911 return false;
4912 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4913 // there was a match, but an error occurred, in which case, just return that
4914 // the operand parsing failed.
4915 if (ResTy.isFailure())
4916 return true;
4917
4918 // Nothing custom, so do general case parsing.
4919 SMLoc S, E;
4920 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4921 if (parseOptionalToken(AsmToken::Comma)) {
4922 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4923 if (!Res.isNoMatch())
4924 return Res.isFailure();
4925 getLexer().UnLex(SavedTok);
4926 }
4927 return false;
4928 };
4929 switch (getLexer().getKind()) {
4930 default: {
4931 SMLoc S = getLoc();
4932 const MCExpr *Expr;
4933 if (parseSymbolicImmVal(Expr))
4934 return Error(S, "invalid operand");
4935
4936 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4937 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4938 return parseOptionalShiftExtend(getTok());
4939 }
4940 case AsmToken::LBrac: {
4941 Operands.push_back(
4942 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4943 Lex(); // Eat '['
4944
4945 // There's no comma after a '[', so we can parse the next operand
4946 // immediately.
4947 return parseOperand(Operands, false, false);
4948 }
4949 case AsmToken::LCurly: {
4950 if (!parseNeonVectorList(Operands))
4951 return false;
4952
4953 Operands.push_back(
4954 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4955 Lex(); // Eat '{'
4956
4957 // There's no comma after a '{', so we can parse the next operand
4958 // immediately.
4959 return parseOperand(Operands, false, false);
4960 }
4961 case AsmToken::Identifier: {
4962 // See if this is a "VG" decoration used by SME instructions.
4963 StringRef VecGroup;
4964 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4965 Operands.push_back(
4966 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4967 return false;
4968 }
4969 // If we're expecting a Condition Code operand, then just parse that.
4970 if (isCondCode)
4971 return parseCondCode(Operands, invertCondCode);
4972
4973 // If it's a register name, parse it.
4974 if (!parseRegister(Operands)) {
4975 // Parse an optional shift/extend modifier.
4976 AsmToken SavedTok = getTok();
4977 if (parseOptionalToken(AsmToken::Comma)) {
4978 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4979 // such cases and don't report an error when <label> happens to match a
4980 // shift/extend modifier.
4981 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4982 /*ParseForAllFeatures=*/true);
4983 if (!Res.isNoMatch())
4984 return Res.isFailure();
4985 Res = tryParseOptionalShiftExtend(Operands);
4986 if (!Res.isNoMatch())
4987 return Res.isFailure();
4988 getLexer().UnLex(SavedTok);
4989 }
4990 return false;
4991 }
4992
4993 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4994 // by SVE instructions.
4995 if (!parseOptionalMulOperand(Operands))
4996 return false;
4997
4998 // If this is a two-word mnemonic, parse its special keyword
4999 // operand as an identifier.
5000 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5001 Mnemonic == "gcsb")
5002 return parseKeywordOperand(Operands);
5003
5004 // This was not a register so parse other operands that start with an
5005 // identifier (like labels) as expressions and create them as immediates.
5006 const MCExpr *IdVal;
5007 S = getLoc();
5008 if (getParser().parseExpression(IdVal))
5009 return true;
5010 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5011 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5012 return false;
5013 }
5014 case AsmToken::Integer:
5015 case AsmToken::Real:
5016 case AsmToken::Hash: {
5017 // #42 -> immediate.
5018 S = getLoc();
5019
5020 parseOptionalToken(AsmToken::Hash);
5021
5022 // Parse a negative sign
5023 bool isNegative = false;
5024 if (getTok().is(AsmToken::Minus)) {
5025 isNegative = true;
5026 // We need to consume this token only when we have a Real, otherwise
5027 // we let parseSymbolicImmVal take care of it
5028 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5029 Lex();
5030 }
5031
5032 // The only Real that should come through here is a literal #0.0 for
5033 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5034 // so convert the value.
5035 const AsmToken &Tok = getTok();
5036 if (Tok.is(AsmToken::Real)) {
5037 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5038 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5039 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5040 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5041 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5042 return TokError("unexpected floating point literal");
5043 else if (IntVal != 0 || isNegative)
5044 return TokError("expected floating-point constant #0.0");
5045 Lex(); // Eat the token.
5046
5047 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5048 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5049 return false;
5050 }
5051
5052 const MCExpr *ImmVal;
5053 if (parseSymbolicImmVal(ImmVal))
5054 return true;
5055
5056 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5057 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5058
5059 // Parse an optional shift/extend modifier.
5060 return parseOptionalShiftExtend(Tok);
5061 }
5062 case AsmToken::Equal: {
5063 SMLoc Loc = getLoc();
5064 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5065 return TokError("unexpected token in operand");
5066 Lex(); // Eat '='
5067 const MCExpr *SubExprVal;
5068 if (getParser().parseExpression(SubExprVal))
5069 return true;
5070
5071 if (Operands.size() < 2 ||
5072 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5073 return Error(Loc, "Only valid when first operand is register");
5074
5075 bool IsXReg =
5076 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5077 Operands[1]->getReg());
5078
5079 MCContext& Ctx = getContext();
5080 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5081 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5082 if (isa<MCConstantExpr>(SubExprVal)) {
5083 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5084 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5085 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5086 ShiftAmt += 16;
5087 Imm >>= 16;
5088 }
5089 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5090 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5091 Operands.push_back(AArch64Operand::CreateImm(
5092 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5093 if (ShiftAmt)
5094 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5095 ShiftAmt, true, S, E, Ctx));
5096 return false;
5097 }
5098 APInt Simm = APInt(64, Imm << ShiftAmt);
5099 // check if the immediate is an unsigned or signed 32-bit int for W regs
5100 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5101 return Error(Loc, "Immediate too large for register");
5102 }
5103 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5104 const MCExpr *CPLoc =
5105 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5106 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5107 return false;
5108 }
5109 }
5110}
5111
5112bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5113 const MCExpr *Expr = nullptr;
5114 SMLoc L = getLoc();
5115 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5116 return true;
5117 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5118 if (check(!Value, L, "expected constant expression"))
5119 return true;
5120 Out = Value->getValue();
5121 return false;
5122}
5123
5124bool AArch64AsmParser::parseComma() {
5125 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5126 return true;
5127 // Eat the comma
5128 Lex();
5129 return false;
5130}
5131
5132bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5133 unsigned First, unsigned Last) {
5135 SMLoc Start, End;
5136 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5137 return true;
5138
5139 // Special handling for FP and LR; they aren't linearly after x28 in
5140 // the registers enum.
5141 unsigned RangeEnd = Last;
5142 if (Base == AArch64::X0) {
5143 if (Last == AArch64::FP) {
5144 RangeEnd = AArch64::X28;
5145 if (Reg == AArch64::FP) {
5146 Out = 29;
5147 return false;
5148 }
5149 }
5150 if (Last == AArch64::LR) {
5151 RangeEnd = AArch64::X28;
5152 if (Reg == AArch64::FP) {
5153 Out = 29;
5154 return false;
5155 } else if (Reg == AArch64::LR) {
5156 Out = 30;
5157 return false;
5158 }
5159 }
5160 }
5161
5162 if (check(Reg < First || Reg > RangeEnd, Start,
5163 Twine("expected register in range ") +
5166 return true;
5167 Out = Reg - Base;
5168 return false;
5169}
5170
5171bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5172 const MCParsedAsmOperand &Op2) const {
5173 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5174 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5175
5176 if (AOp1.isVectorList() && AOp2.isVectorList())
5177 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5178 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5179 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5180
5181 if (!AOp1.isReg() || !AOp2.isReg())
5182 return false;
5183
5184 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5185 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5186 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5187
5188 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5189 "Testing equality of non-scalar registers not supported");
5190
5191 // Check if a registers match their sub/super register classes.
5192 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5193 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5194 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5195 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5196 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5197 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5198 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5199 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5200
5201 return false;
5202}
5203
5204/// Parse an AArch64 instruction mnemonic followed by its operands.
5205bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5206 StringRef Name, SMLoc NameLoc,
5209 .Case("beq", "b.eq")
5210 .Case("bne", "b.ne")
5211 .Case("bhs", "b.hs")
5212 .Case("bcs", "b.cs")
5213 .Case("blo", "b.lo")
5214 .Case("bcc", "b.cc")
5215 .Case("bmi", "b.mi")
5216 .Case("bpl", "b.pl")
5217 .Case("bvs", "b.vs")
5218 .Case("bvc", "b.vc")
5219 .Case("bhi", "b.hi")
5220 .Case("bls", "b.ls")
5221 .Case("bge", "b.ge")
5222 .Case("blt", "b.lt")
5223 .Case("bgt", "b.gt")
5224 .Case("ble", "b.le")
5225 .Case("bal", "b.al")
5226 .Case("bnv", "b.nv")
5227 .Default(Name);
5228
5229 // First check for the AArch64-specific .req directive.
5230 if (getTok().is(AsmToken::Identifier) &&
5231 getTok().getIdentifier().lower() == ".req") {
5232 parseDirectiveReq(Name, NameLoc);
5233 // We always return 'error' for this, as we're done with this
5234 // statement and don't need to match the 'instruction."
5235 return true;
5236 }
5237
5238 // Create the leading tokens for the mnemonic, split by '.' characters.
5239 size_t Start = 0, Next = Name.find('.');
5240 StringRef Head = Name.slice(Start, Next);
5241
5242 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5243 // the SYS instruction.
5244 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5245 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5246 return parseSysAlias(Head, NameLoc, Operands);
5247
5248 // TLBIP instructions are aliases for the SYSP instruction.
5249 if (Head == "tlbip")
5250 return parseSyspAlias(Head, NameLoc, Operands);
5251
5252 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5253 Mnemonic = Head;
5254
5255 // Handle condition codes for a branch mnemonic
5256 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5257 Start = Next;
5258 Next = Name.find('.', Start + 1);
5259 Head = Name.slice(Start + 1, Next);
5260
5261 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5262 (Head.data() - Name.data()));
5263 std::string Suggestion;
5264 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5265 if (CC == AArch64CC::Invalid) {
5266 std::string Msg = "invalid condition code";
5267 if (!Suggestion.empty())
5268 Msg += ", did you mean " + Suggestion + "?";
5269 return Error(SuffixLoc, Msg);
5270 }
5271 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5272 /*IsSuffix=*/true));
5273 Operands.push_back(
5274 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5275 }
5276
5277 // Add the remaining tokens in the mnemonic.
5278 while (Next != StringRef::npos) {
5279 Start = Next;
5280 Next = Name.find('.', Start + 1);
5281 Head = Name.slice(Start, Next);
5282 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5283 (Head.data() - Name.data()) + 1);
5284 Operands.push_back(AArch64Operand::CreateToken(
5285 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5286 }
5287
5288 // Conditional compare instructions have a Condition Code operand, which needs
5289 // to be parsed and an immediate operand created.
5290 bool condCodeFourthOperand =
5291 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5292 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5293 Head == "csinc" || Head == "csinv" || Head == "csneg");
5294
5295 // These instructions are aliases to some of the conditional select
5296 // instructions. However, the condition code is inverted in the aliased
5297 // instruction.
5298 //
5299 // FIXME: Is this the correct way to handle these? Or should the parser
5300 // generate the aliased instructions directly?
5301 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5302 bool condCodeThirdOperand =
5303 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5304
5305 // Read the remaining operands.
5306 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5307
5308 unsigned N = 1;
5309 do {
5310 // Parse and remember the operand.
5311 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5312 (N == 3 && condCodeThirdOperand) ||
5313 (N == 2 && condCodeSecondOperand),
5314 condCodeSecondOperand || condCodeThirdOperand)) {
5315 return true;
5316 }
5317
5318 // After successfully parsing some operands there are three special cases
5319 // to consider (i.e. notional operands not separated by commas). Two are
5320 // due to memory specifiers:
5321 // + An RBrac will end an address for load/store/prefetch
5322 // + An '!' will indicate a pre-indexed operation.
5323 //
5324 // And a further case is '}', which ends a group of tokens specifying the
5325 // SME accumulator array 'ZA' or tile vector, i.e.
5326 //
5327 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5328 //
5329 // It's someone else's responsibility to make sure these tokens are sane
5330 // in the given context!
5331
5332 if (parseOptionalToken(AsmToken::RBrac))
5333 Operands.push_back(
5334 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5335 if (parseOptionalToken(AsmToken::Exclaim))
5336 Operands.push_back(
5337 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5338 if (parseOptionalToken(AsmToken::RCurly))
5339 Operands.push_back(
5340 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5341
5342 ++N;
5343 } while (parseOptionalToken(AsmToken::Comma));
5344 }
5345
5346 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5347 return true;
5348
5349 return false;
5350}
5351
5352static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5353 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5354 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5355 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5356 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5357 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5358 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5359 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5360}
5361
5362// FIXME: This entire function is a giant hack to provide us with decent
5363// operand range validation/diagnostics until TableGen/MC can be extended
5364// to support autogeneration of this kind of validation.
5365bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5367 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5368 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5369
5370 // A prefix only applies to the instruction following it. Here we extract
5371 // prefix information for the next instruction before validating the current
5372 // one so that in the case of failure we don't erronously continue using the
5373 // current prefix.
5374 PrefixInfo Prefix = NextPrefix;
5375 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5376
5377 // Before validating the instruction in isolation we run through the rules
5378 // applicable when it follows a prefix instruction.
5379 // NOTE: brk & hlt can be prefixed but require no additional validation.
5380 if (Prefix.isActive() &&
5381 (Inst.getOpcode() != AArch64::BRK) &&
5382 (Inst.getOpcode() != AArch64::HLT)) {
5383
5384 // Prefixed intructions must have a destructive operand.
5387 return Error(IDLoc, "instruction is unpredictable when following a"
5388 " movprfx, suggest replacing movprfx with mov");
5389
5390 // Destination operands must match.
5391 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5392 return Error(Loc[0], "instruction is unpredictable when following a"
5393 " movprfx writing to a different destination");
5394
5395 // Destination operand must not be used in any other location.
5396 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5397 if (Inst.getOperand(i).isReg() &&
5398 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5399 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5400 return Error(Loc[0], "instruction is unpredictable when following a"
5401 " movprfx and destination also used as non-destructive"
5402 " source");
5403 }
5404
5405 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5406 if (Prefix.isPredicated()) {
5407 int PgIdx = -1;
5408
5409 // Find the instructions general predicate.
5410 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5411 if (Inst.getOperand(i).isReg() &&
5412 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5413 PgIdx = i;
5414 break;
5415 }
5416
5417 // Instruction must be predicated if the movprfx is predicated.
5418 if (PgIdx == -1 ||
5420 return Error(IDLoc, "instruction is unpredictable when following a"
5421 " predicated movprfx, suggest using unpredicated movprfx");
5422
5423 // Instruction must use same general predicate as the movprfx.
5424 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5425 return Error(IDLoc, "instruction is unpredictable when following a"
5426 " predicated movprfx using a different general predicate");
5427
5428 // Instruction element type must match the movprfx.
5429 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5430 return Error(IDLoc, "instruction is unpredictable when following a"
5431 " predicated movprfx with a different element size");
5432 }
5433 }
5434
5435 // On ARM64EC, only valid registers may be used. Warn against using
5436 // explicitly disallowed registers.
5437 if (IsWindowsArm64EC) {
5438 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5439 if (Inst.getOperand(i).isReg()) {
5440 MCRegister Reg = Inst.getOperand(i).getReg();
5441 // At this point, vector registers are matched to their
5442 // appropriately sized alias.
5443 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5444 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5445 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5446 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5447 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5448 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5449 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5450 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5451 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5452 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5453 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5454 " is disallowed on ARM64EC.");
5455 }
5456 }
5457 }
5458 }
5459
5460 // Check for indexed addressing modes w/ the base register being the
5461 // same as a destination/source register or pair load where
5462 // the Rt == Rt2. All of those are undefined behaviour.
5463 switch (Inst.getOpcode()) {
5464 case AArch64::LDPSWpre:
5465 case AArch64::LDPWpost:
5466 case AArch64::LDPWpre:
5467 case AArch64::LDPXpost:
5468 case AArch64::LDPXpre: {
5469 MCRegister Rt = Inst.getOperand(1).getReg();
5470 MCRegister Rt2 = Inst.getOperand(2).getReg();
5471 MCRegister Rn = Inst.getOperand(3).getReg();
5472 if (RI->isSubRegisterEq(Rn, Rt))
5473 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5474 "is also a destination");
5475 if (RI->isSubRegisterEq(Rn, Rt2))
5476 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5477 "is also a destination");
5478 [[fallthrough]];
5479 }
5480 case AArch64::LDR_ZA:
5481 case AArch64::STR_ZA: {
5482 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5483 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5484 return Error(Loc[1],
5485 "unpredictable instruction, immediate and offset mismatch.");
5486 break;
5487 }
5488 case AArch64::LDPDi:
5489 case AArch64::LDPQi:
5490 case AArch64::LDPSi:
5491 case AArch64::LDPSWi:
5492 case AArch64::LDPWi:
5493 case AArch64::LDPXi: {
5494 MCRegister Rt = Inst.getOperand(0).getReg();
5495 MCRegister Rt2 = Inst.getOperand(1).getReg();
5496 if (Rt == Rt2)
5497 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5498 break;
5499 }
5500 case AArch64::LDPDpost:
5501 case AArch64::LDPDpre:
5502 case AArch64::LDPQpost:
5503 case AArch64::LDPQpre:
5504 case AArch64::LDPSpost:
5505 case AArch64::LDPSpre:
5506 case AArch64::LDPSWpost: {
5507 MCRegister Rt = Inst.getOperand(1).getReg();
5508 MCRegister Rt2 = Inst.getOperand(2).getReg();
5509 if (Rt == Rt2)
5510 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5511 break;
5512 }
5513 case AArch64::STPDpost:
5514 case AArch64::STPDpre:
5515 case AArch64::STPQpost:
5516 case AArch64::STPQpre:
5517 case AArch64::STPSpost:
5518 case AArch64::STPSpre:
5519 case AArch64::STPWpost:
5520 case AArch64::STPWpre:
5521 case AArch64::STPXpost:
5522 case AArch64::STPXpre: {
5523 MCRegister Rt = Inst.getOperand(1).getReg();
5524 MCRegister Rt2 = Inst.getOperand(2).getReg();
5525 MCRegister Rn = Inst.getOperand(3).getReg();
5526 if (RI->isSubRegisterEq(Rn, Rt))
5527 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5528 "is also a source");
5529 if (RI->isSubRegisterEq(Rn, Rt2))
5530 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5531 "is also a source");
5532 break;
5533 }
5534 case AArch64::LDRBBpre:
5535 case AArch64::LDRBpre:
5536 case AArch64::LDRHHpre:
5537 case AArch64::LDRHpre:
5538 case AArch64::LDRSBWpre:
5539 case AArch64::LDRSBXpre:
5540 case AArch64::LDRSHWpre:
5541 case AArch64::LDRSHXpre:
5542 case AArch64::LDRSWpre:
5543 case AArch64::LDRWpre:
5544 case AArch64::LDRXpre:
5545 case AArch64::LDRBBpost:
5546 case AArch64::LDRBpost:
5547 case AArch64::LDRHHpost:
5548 case AArch64::LDRHpost:
5549 case AArch64::LDRSBWpost:
5550 case AArch64::LDRSBXpost:
5551 case AArch64::LDRSHWpost:
5552 case AArch64::LDRSHXpost:
5553 case AArch64::LDRSWpost:
5554 case AArch64::LDRWpost:
5555 case AArch64::LDRXpost: {
5556 MCRegister Rt = Inst.getOperand(1).getReg();
5557 MCRegister Rn = Inst.getOperand(2).getReg();
5558 if (RI->isSubRegisterEq(Rn, Rt))
5559 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5560 "is also a source");
5561 break;
5562 }
5563 case AArch64::STRBBpost:
5564 case AArch64::STRBpost:
5565 case AArch64::STRHHpost:
5566 case AArch64::STRHpost:
5567 case AArch64::STRWpost:
5568 case AArch64::STRXpost:
5569 case AArch64::STRBBpre:
5570 case AArch64::STRBpre:
5571 case AArch64::STRHHpre:
5572 case AArch64::STRHpre:
5573 case AArch64::STRWpre:
5574 case AArch64::STRXpre: {
5575 MCRegister Rt = Inst.getOperand(1).getReg();
5576 MCRegister Rn = Inst.getOperand(2).getReg();
5577 if (RI->isSubRegisterEq(Rn, Rt))
5578 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5579 "is also a source");
5580 break;
5581 }
5582 case AArch64::STXRB:
5583 case AArch64::STXRH:
5584 case AArch64::STXRW:
5585 case AArch64::STXRX:
5586 case AArch64::STLXRB:
5587 case AArch64::STLXRH:
5588 case AArch64::STLXRW:
5589 case AArch64::STLXRX: {
5590 MCRegister Rs = Inst.getOperand(0).getReg();
5591 MCRegister Rt = Inst.getOperand(1).getReg();
5592 MCRegister Rn = Inst.getOperand(2).getReg();
5593 if (RI->isSubRegisterEq(Rt, Rs) ||
5594 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5595 return Error(Loc[0],
5596 "unpredictable STXR instruction, status is also a source");
5597 break;
5598 }
5599 case AArch64::STXPW:
5600 case AArch64::STXPX:
5601 case AArch64::STLXPW:
5602 case AArch64::STLXPX: {
5603 MCRegister Rs = Inst.getOperand(0).getReg();
5604 MCRegister Rt1 = Inst.getOperand(1).getReg();
5605 MCRegister Rt2 = Inst.getOperand(2).getReg();
5606 MCRegister Rn = Inst.getOperand(3).getReg();
5607 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5608 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5609 return Error(Loc[0],
5610 "unpredictable STXP instruction, status is also a source");
5611 break;
5612 }
5613 case AArch64::LDRABwriteback:
5614 case AArch64::LDRAAwriteback: {
5615 MCRegister Xt = Inst.getOperand(0).getReg();
5616 MCRegister Xn = Inst.getOperand(1).getReg();
5617 if (Xt == Xn)
5618 return Error(Loc[0],
5619 "unpredictable LDRA instruction, writeback base"
5620 " is also a destination");
5621 break;
5622 }
5623 }
5624
5625 // Check v8.8-A memops instructions.
5626 switch (Inst.getOpcode()) {
5627 case AArch64::CPYFP:
5628 case AArch64::CPYFPWN:
5629 case AArch64::CPYFPRN:
5630 case AArch64::CPYFPN:
5631 case AArch64::CPYFPWT:
5632 case AArch64::CPYFPWTWN:
5633 case AArch64::CPYFPWTRN:
5634 case AArch64::CPYFPWTN:
5635 case AArch64::CPYFPRT:
5636 case AArch64::CPYFPRTWN:
5637 case AArch64::CPYFPRTRN:
5638 case AArch64::CPYFPRTN:
5639 case AArch64::CPYFPT:
5640 case AArch64::CPYFPTWN:
5641 case AArch64::CPYFPTRN:
5642 case AArch64::CPYFPTN:
5643 case AArch64::CPYFM:
5644 case AArch64::CPYFMWN:
5645 case AArch64::CPYFMRN:
5646 case AArch64::CPYFMN:
5647 case AArch64::CPYFMWT:
5648 case AArch64::CPYFMWTWN:
5649 case AArch64::CPYFMWTRN:
5650 case AArch64::CPYFMWTN:
5651 case AArch64::CPYFMRT:
5652 case AArch64::CPYFMRTWN:
5653 case AArch64::CPYFMRTRN:
5654 case AArch64::CPYFMRTN:
5655 case AArch64::CPYFMT:
5656 case AArch64::CPYFMTWN:
5657 case AArch64::CPYFMTRN:
5658 case AArch64::CPYFMTN:
5659 case AArch64::CPYFE:
5660 case AArch64::CPYFEWN:
5661 case AArch64::CPYFERN:
5662 case AArch64::CPYFEN:
5663 case AArch64::CPYFEWT:
5664 case AArch64::CPYFEWTWN:
5665 case AArch64::CPYFEWTRN:
5666 case AArch64::CPYFEWTN:
5667 case AArch64::CPYFERT:
5668 case AArch64::CPYFERTWN:
5669 case AArch64::CPYFERTRN:
5670 case AArch64::CPYFERTN:
5671 case AArch64::CPYFET:
5672 case AArch64::CPYFETWN:
5673 case AArch64::CPYFETRN:
5674 case AArch64::CPYFETN:
5675 case AArch64::CPYP:
5676 case AArch64::CPYPWN:
5677 case AArch64::CPYPRN:
5678 case AArch64::CPYPN:
5679 case AArch64::CPYPWT:
5680 case AArch64::CPYPWTWN:
5681 case AArch64::CPYPWTRN:
5682 case AArch64::CPYPWTN:
5683 case AArch64::CPYPRT:
5684 case AArch64::CPYPRTWN:
5685 case AArch64::CPYPRTRN:
5686 case AArch64::CPYPRTN:
5687 case AArch64::CPYPT:
5688 case AArch64::CPYPTWN:
5689 case AArch64::CPYPTRN:
5690 case AArch64::CPYPTN:
5691 case AArch64::CPYM:
5692 case AArch64::CPYMWN:
5693 case AArch64::CPYMRN:
5694 case AArch64::CPYMN:
5695 case AArch64::CPYMWT:
5696 case AArch64::CPYMWTWN:
5697 case AArch64::CPYMWTRN:
5698 case AArch64::CPYMWTN:
5699 case AArch64::CPYMRT:
5700 case AArch64::CPYMRTWN:
5701 case AArch64::CPYMRTRN:
5702 case AArch64::CPYMRTN:
5703 case AArch64::CPYMT:
5704 case AArch64::CPYMTWN:
5705 case AArch64::CPYMTRN:
5706 case AArch64::CPYMTN:
5707 case AArch64::CPYE:
5708 case AArch64::CPYEWN:
5709 case AArch64::CPYERN:
5710 case AArch64::CPYEN:
5711 case AArch64::CPYEWT:
5712 case AArch64::CPYEWTWN:
5713 case AArch64::CPYEWTRN:
5714 case AArch64::CPYEWTN:
5715 case AArch64::CPYERT:
5716 case AArch64::CPYERTWN:
5717 case AArch64::CPYERTRN:
5718 case AArch64::CPYERTN:
5719 case AArch64::CPYET:
5720 case AArch64::CPYETWN:
5721 case AArch64::CPYETRN:
5722 case AArch64::CPYETN: {
5723 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5724 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5725 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5726 MCRegister Xd = Inst.getOperand(3).getReg();
5727 MCRegister Xs = Inst.getOperand(4).getReg();
5728 MCRegister Xn = Inst.getOperand(5).getReg();
5729 if (Xd_wb != Xd)
5730 return Error(Loc[0],
5731 "invalid CPY instruction, Xd_wb and Xd do not match");
5732 if (Xs_wb != Xs)
5733 return Error(Loc[0],
5734 "invalid CPY instruction, Xs_wb and Xs do not match");
5735 if (Xn_wb != Xn)
5736 return Error(Loc[0],
5737 "invalid CPY instruction, Xn_wb and Xn do not match");
5738 if (Xd == Xs)
5739 return Error(Loc[0], "invalid CPY instruction, destination and source"
5740 " registers are the same");
5741 if (Xd == Xn)
5742 return Error(Loc[0], "invalid CPY instruction, destination and size"
5743 " registers are the same");
5744 if (Xs == Xn)
5745 return Error(Loc[0], "invalid CPY instruction, source and size"
5746 " registers are the same");
5747 break;
5748 }
5749 case AArch64::SETP:
5750 case AArch64::SETPT:
5751 case AArch64::SETPN:
5752 case AArch64::SETPTN:
5753 case AArch64::SETM:
5754 case AArch64::SETMT:
5755 case AArch64::SETMN:
5756 case AArch64::SETMTN:
5757 case AArch64::SETE:
5758 case AArch64::SETET:
5759 case AArch64::SETEN:
5760 case AArch64::SETETN:
5761 case AArch64::SETGP:
5762 case AArch64::SETGPT:
5763 case AArch64::SETGPN:
5764 case AArch64::SETGPTN:
5765 case AArch64::SETGM:
5766 case AArch64::SETGMT:
5767 case AArch64::SETGMN:
5768 case AArch64::SETGMTN:
5769 case AArch64::MOPSSETGE:
5770 case AArch64::MOPSSETGET:
5771 case AArch64::MOPSSETGEN:
5772 case AArch64::MOPSSETGETN: {
5773 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5774 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5775 MCRegister Xd = Inst.getOperand(2).getReg();
5776 MCRegister Xn = Inst.getOperand(3).getReg();
5777 MCRegister Xm = Inst.getOperand(4).getReg();
5778 if (Xd_wb != Xd)
5779 return Error(Loc[0],
5780 "invalid SET instruction, Xd_wb and Xd do not match");
5781 if (Xn_wb != Xn)
5782 return Error(Loc[0],
5783 "invalid SET instruction, Xn_wb and Xn do not match");
5784 if (Xd == Xn)
5785 return Error(Loc[0], "invalid SET instruction, destination and size"
5786 " registers are the same");
5787 if (Xd == Xm)
5788 return Error(Loc[0], "invalid SET instruction, destination and source"
5789 " registers are the same");
5790 if (Xn == Xm)
5791 return Error(Loc[0], "invalid SET instruction, source and size"
5792 " registers are the same");
5793 break;
5794 }
5795 }
5796
5797 // Now check immediate ranges. Separate from the above as there is overlap
5798 // in the instructions being checked and this keeps the nested conditionals
5799 // to a minimum.
5800 switch (Inst.getOpcode()) {
5801 case AArch64::ADDSWri:
5802 case AArch64::ADDSXri:
5803 case AArch64::ADDWri:
5804 case AArch64::ADDXri:
5805 case AArch64::SUBSWri:
5806 case AArch64::SUBSXri:
5807 case AArch64::SUBWri:
5808 case AArch64::SUBXri: {
5809 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5810 // some slight duplication here.
5811 if (Inst.getOperand(2).isExpr()) {
5812 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5813 AArch64MCExpr::VariantKind ELFRefKind;
5814 MCSymbolRefExpr::VariantKind DarwinRefKind;
5815 int64_t Addend;
5816 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5817
5818 // Only allow these with ADDXri.
5819 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5820 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5821 Inst.getOpcode() == AArch64::ADDXri)
5822 return false;
5823
5824 // Only allow these with ADDXri/ADDWri
5825 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5826 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
5827 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5828 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5829 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5830 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5831 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5832 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5833 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5835 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5836 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5837 (Inst.getOpcode() == AArch64::ADDXri ||
5838 Inst.getOpcode() == AArch64::ADDWri))
5839 return false;
5840
5841 // Don't allow symbol refs in the immediate field otherwise
5842 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5843 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5844 // 'cmp w0, 'borked')
5845 return Error(Loc.back(), "invalid immediate expression");
5846 }
5847 // We don't validate more complex expressions here
5848 }
5849 return false;
5850 }
5851 default:
5852 return false;
5853 }
5854}
5855
5857 const FeatureBitset &FBS,
5858 unsigned VariantID = 0);
5859
5860bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5863 switch (ErrCode) {
5864 case Match_InvalidTiedOperand: {
5865 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5866 if (Op.isVectorList())
5867 return Error(Loc, "operand must match destination register list");
5868
5869 assert(Op.isReg() && "Unexpected operand type");
5870 switch (Op.getRegEqualityTy()) {
5871 case RegConstraintEqualityTy::EqualsSubReg:
5872 return Error(Loc, "operand must be 64-bit form of destination register");
5873 case RegConstraintEqualityTy::EqualsSuperReg:
5874 return Error(Loc, "operand must be 32-bit form of destination register");
5875 case RegConstraintEqualityTy::EqualsReg:
5876 return Error(Loc, "operand must match destination register");
5877 }
5878 llvm_unreachable("Unknown RegConstraintEqualityTy");
5879 }
5880 case Match_MissingFeature:
5881 return Error(Loc,
5882 "instruction requires a CPU feature not currently enabled");
5883 case Match_InvalidOperand:
5884 return Error(Loc, "invalid operand for instruction");
5885 case Match_InvalidSuffix:
5886 return Error(Loc, "invalid type suffix for instruction");
5887 case Match_InvalidCondCode:
5888 return Error(Loc, "expected AArch64 condition code");
5889 case Match_AddSubRegExtendSmall:
5890 return Error(Loc,
5891 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5892 case Match_AddSubRegExtendLarge:
5893 return Error(Loc,
5894 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5895 case Match_AddSubSecondSource:
5896 return Error(Loc,
5897 "expected compatible register, symbol or integer in range [0, 4095]");
5898 case Match_LogicalSecondSource:
5899 return Error(Loc, "expected compatible register or logical immediate");
5900 case Match_InvalidMovImm32Shift:
5901 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5902 case Match_InvalidMovImm64Shift:
5903 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5904 case Match_AddSubRegShift32:
5905 return Error(Loc,
5906 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5907 case Match_AddSubRegShift64:
5908 return Error(Loc,
5909 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5910 case Match_InvalidFPImm:
5911 return Error(Loc,
5912 "expected compatible register or floating-point constant");
5913 case Match_InvalidMemoryIndexedSImm6:
5914 return Error(Loc, "index must be an integer in range [-32, 31].");
5915 case Match_InvalidMemoryIndexedSImm5:
5916 return Error(Loc, "index must be an integer in range [-16, 15].");
5917 case Match_InvalidMemoryIndexed1SImm4:
5918 return Error(Loc, "index must be an integer in range [-8, 7].");
5919 case Match_InvalidMemoryIndexed2SImm4:
5920 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5921 case Match_InvalidMemoryIndexed3SImm4:
5922 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5923 case Match_InvalidMemoryIndexed4SImm4:
5924 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5925 case Match_InvalidMemoryIndexed16SImm4:
5926 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5927 case Match_InvalidMemoryIndexed32SImm4:
5928 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5929 case Match_InvalidMemoryIndexed1SImm6:
5930 return Error(Loc, "index must be an integer in range [-32, 31].");
5931 case Match_InvalidMemoryIndexedSImm8:
5932 return Error(Loc, "index must be an integer in range [-128, 127].");
5933 case Match_InvalidMemoryIndexedSImm9:
5934 return Error(Loc, "index must be an integer in range [-256, 255].");
5935 case Match_InvalidMemoryIndexed16SImm9:
5936 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5937 case Match_InvalidMemoryIndexed8SImm10:
5938 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5939 case Match_InvalidMemoryIndexed4SImm7:
5940 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5941 case Match_InvalidMemoryIndexed8SImm7:
5942 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5943 case Match_InvalidMemoryIndexed16SImm7:
5944 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5945 case Match_InvalidMemoryIndexed8UImm5:
5946 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5947 case Match_InvalidMemoryIndexed8UImm3:
5948 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5949 case Match_InvalidMemoryIndexed4UImm5:
5950 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5951 case Match_InvalidMemoryIndexed2UImm5:
5952 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5953 case Match_InvalidMemoryIndexed8UImm6:
5954 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5955 case Match_InvalidMemoryIndexed16UImm6:
5956 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5957 case Match_InvalidMemoryIndexed4UImm6:
5958 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5959 case Match_InvalidMemoryIndexed2UImm6:
5960 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5961 case Match_InvalidMemoryIndexed1UImm6:
5962 return Error(Loc, "index must be in range [0, 63].");
5963 case Match_InvalidMemoryWExtend8:
5964 return Error(Loc,
5965 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5966 case Match_InvalidMemoryWExtend16:
5967 return Error(Loc,
5968 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5969 case Match_InvalidMemoryWExtend32:
5970 return Error(Loc,
5971 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5972 case Match_InvalidMemoryWExtend64:
5973 return Error(Loc,
5974 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5975 case Match_InvalidMemoryWExtend128:
5976 return Error(Loc,
5977 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5978 case Match_InvalidMemoryXExtend8:
5979 return Error(Loc,
5980 "expected 'lsl' or 'sxtx' with optional shift of #0");
5981 case Match_InvalidMemoryXExtend16:
5982 return Error(Loc,
5983 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5984 case Match_InvalidMemoryXExtend32:
5985 return Error(Loc,
5986 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5987 case Match_InvalidMemoryXExtend64:
5988 return Error(Loc,
5989 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5990 case Match_InvalidMemoryXExtend128:
5991 return Error(Loc,
5992 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5993 case Match_InvalidMemoryIndexed1:
5994 return Error(Loc, "index must be an integer in range [0, 4095].");
5995 case Match_InvalidMemoryIndexed2:
5996 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5997 case Match_InvalidMemoryIndexed4:
5998 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5999 case Match_InvalidMemoryIndexed8:
6000 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6001 case Match_InvalidMemoryIndexed16:
6002 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6003 case Match_InvalidImm0_0:
6004 return Error(Loc, "immediate must be 0.");
6005 case Match_InvalidImm0_1:
6006 return Error(Loc, "immediate must be an integer in range [0, 1].");
6007 case Match_InvalidImm0_3:
6008 return Error(Loc, "immediate must be an integer in range [0, 3].");
6009 case Match_InvalidImm0_7:
6010 return Error(Loc, "immediate must be an integer in range [0, 7].");
6011 case Match_InvalidImm0_15:
6012 return Error(Loc, "immediate must be an integer in range [0, 15].");
6013 case Match_InvalidImm0_31:
6014 return Error(Loc, "immediate must be an integer in range [0, 31].");
6015 case Match_InvalidImm0_63:
6016 return Error(Loc, "immediate must be an integer in range [0, 63].");
6017 case Match_InvalidImm0_127:
6018 return Error(Loc, "immediate must be an integer in range [0, 127].");
6019 case Match_InvalidImm0_255:
6020 return Error(Loc, "immediate must be an integer in range [0, 255].");
6021 case Match_InvalidImm0_65535:
6022 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6023 case Match_InvalidImm1_8:
6024 return Error(Loc, "immediate must be an integer in range [1, 8].");
6025 case Match_InvalidImm1_16:
6026 return Error(Loc, "immediate must be an integer in range [1, 16].");
6027 case Match_InvalidImm1_32:
6028 return Error(Loc, "immediate must be an integer in range [1, 32].");
6029 case Match_InvalidImm1_64:
6030 return Error(Loc, "immediate must be an integer in range [1, 64].");
6031 case Match_InvalidImmM1_62:
6032 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6033 case Match_InvalidMemoryIndexedRange2UImm0:
6034 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6035 case Match_InvalidMemoryIndexedRange2UImm1:
6036 return Error(Loc, "vector select offset must be an immediate range of the "
6037 "form <immf>:<imml>, where the first "
6038 "immediate is a multiple of 2 in the range [0, 2], and "
6039 "the second immediate is immf + 1.");
6040 case Match_InvalidMemoryIndexedRange2UImm2:
6041 case Match_InvalidMemoryIndexedRange2UImm3:
6042 return Error(
6043 Loc,
6044 "vector select offset must be an immediate range of the form "
6045 "<immf>:<imml>, "
6046 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6047 "[0, 14] "
6048 "depending on the instruction, and the second immediate is immf + 1.");
6049 case Match_InvalidMemoryIndexedRange4UImm0:
6050 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6051 case Match_InvalidMemoryIndexedRange4UImm1:
6052 case Match_InvalidMemoryIndexedRange4UImm2:
6053 return Error(
6054 Loc,
6055 "vector select offset must be an immediate range of the form "
6056 "<immf>:<imml>, "
6057 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6058 "[0, 12] "
6059 "depending on the instruction, and the second immediate is immf + 3.");
6060 case Match_InvalidSVEAddSubImm8:
6061 return Error(Loc, "immediate must be an integer in range [0, 255]"
6062 " with a shift amount of 0");
6063 case Match_InvalidSVEAddSubImm16:
6064 case Match_InvalidSVEAddSubImm32:
6065 case Match_InvalidSVEAddSubImm64:
6066 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6067 "multiple of 256 in range [256, 65280]");
6068 case Match_InvalidSVECpyImm8:
6069 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6070 " with a shift amount of 0");
6071 case Match_InvalidSVECpyImm16:
6072 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6073 "multiple of 256 in range [-32768, 65280]");
6074 case Match_InvalidSVECpyImm32:
6075 case Match_InvalidSVECpyImm64:
6076 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6077 "multiple of 256 in range [-32768, 32512]");
6078 case Match_InvalidIndexRange0_0:
6079 return Error(Loc, "expected lane specifier '[0]'");
6080 case Match_InvalidIndexRange1_1:
6081 return Error(Loc, "expected lane specifier '[1]'");
6082 case Match_InvalidIndexRange0_15:
6083 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6084 case Match_InvalidIndexRange0_7:
6085 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6086 case Match_InvalidIndexRange0_3:
6087 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6088 case Match_InvalidIndexRange0_1:
6089 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6090 case Match_InvalidSVEIndexRange0_63:
6091 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6092 case Match_InvalidSVEIndexRange0_31:
6093 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6094 case Match_InvalidSVEIndexRange0_15:
6095 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6096 case Match_InvalidSVEIndexRange0_7:
6097 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6098 case Match_InvalidSVEIndexRange0_3:
6099 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6100 case Match_InvalidLabel:
6101 return Error(Loc, "expected label or encodable integer pc offset");
6102 case Match_MRS:
6103 return Error(Loc, "expected readable system register");
6104 case Match_MSR:
6105 case Match_InvalidSVCR:
6106 return Error(Loc, "expected writable system register or pstate");
6107 case Match_InvalidComplexRotationEven:
6108 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6109 case Match_InvalidComplexRotationOdd:
6110 return Error(Loc, "complex rotation must be 90 or 270.");
6111 case Match_MnemonicFail: {
6112 std::string Suggestion = AArch64MnemonicSpellCheck(
6113 ((AArch64Operand &)*Operands[0]).getToken(),
6114 ComputeAvailableFeatures(STI->getFeatureBits()));
6115 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6116 }
6117 case Match_InvalidGPR64shifted8:
6118 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6119 case Match_InvalidGPR64shifted16:
6120 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6121 case Match_InvalidGPR64shifted32:
6122 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6123 case Match_InvalidGPR64shifted64:
6124 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6125 case Match_InvalidGPR64shifted128:
6126 return Error(
6127 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6128 case Match_InvalidGPR64NoXZRshifted8:
6129 return Error(Loc, "register must be x0..x30 without shift");
6130 case Match_InvalidGPR64NoXZRshifted16:
6131 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6132 case Match_InvalidGPR64NoXZRshifted32:
6133 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6134 case Match_InvalidGPR64NoXZRshifted64:
6135 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6136 case Match_InvalidGPR64NoXZRshifted128:
6137 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6138 case Match_InvalidZPR32UXTW8:
6139 case Match_InvalidZPR32SXTW8:
6140 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6141 case Match_InvalidZPR32UXTW16:
6142 case Match_InvalidZPR32SXTW16:
6143 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6144 case Match_InvalidZPR32UXTW32:
6145 case Match_InvalidZPR32SXTW32:
6146 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6147 case Match_InvalidZPR32UXTW64:
6148 case Match_InvalidZPR32SXTW64:
6149 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6150 case Match_InvalidZPR64UXTW8:
6151 case Match_InvalidZPR64SXTW8:
6152 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6153 case Match_InvalidZPR64UXTW16:
6154 case Match_InvalidZPR64SXTW16:
6155 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6156 case Match_InvalidZPR64UXTW32:
6157 case Match_InvalidZPR64SXTW32:
6158 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6159 case Match_InvalidZPR64UXTW64:
6160 case Match_InvalidZPR64SXTW64:
6161 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6162 case Match_InvalidZPR32LSL8:
6163 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6164 case Match_InvalidZPR32LSL16:
6165 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6166 case Match_InvalidZPR32LSL32:
6167 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6168 case Match_InvalidZPR32LSL64:
6169 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6170 case Match_InvalidZPR64LSL8:
6171 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6172 case Match_InvalidZPR64LSL16:
6173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6174 case Match_InvalidZPR64LSL32:
6175 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6176 case Match_InvalidZPR64LSL64:
6177 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6178 case Match_InvalidZPR0:
6179 return Error(Loc, "expected register without element width suffix");
6180 case Match_InvalidZPR8:
6181 case Match_InvalidZPR16:
6182 case Match_InvalidZPR32:
6183 case Match_InvalidZPR64:
6184 case Match_InvalidZPR128:
6185 return Error(Loc, "invalid element width");
6186 case Match_InvalidZPR_3b8:
6187 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6188 case Match_InvalidZPR_3b16:
6189 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6190 case Match_InvalidZPR_3b32:
6191 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6192 case Match_InvalidZPR_4b8:
6193 return Error(Loc,
6194 "Invalid restricted vector register, expected z0.b..z15.b");
6195 case Match_InvalidZPR_4b16:
6196 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6197 case Match_InvalidZPR_4b32:
6198 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6199 case Match_InvalidZPR_4b64:
6200 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6201 case Match_InvalidZPRMul2_Lo8:
6202 return Error(Loc, "Invalid restricted vector register, expected even "
6203 "register in z0.b..z14.b");
6204 case Match_InvalidZPRMul2_Hi8:
6205 return Error(Loc, "Invalid restricted vector register, expected even "
6206 "register in z16.b..z30.b");
6207 case Match_InvalidZPRMul2_Lo16:
6208 return Error(Loc, "Invalid restricted vector register, expected even "
6209 "register in z0.h..z14.h");
6210 case Match_InvalidZPRMul2_Hi16:
6211 return Error(Loc, "Invalid restricted vector register, expected even "
6212 "register in z16.h..z30.h");
6213 case Match_InvalidZPRMul2_Lo32:
6214 return Error(Loc, "Invalid restricted vector register, expected even "
6215 "register in z0.s..z14.s");
6216 case Match_InvalidZPRMul2_Hi32:
6217 return Error(Loc, "Invalid restricted vector register, expected even "
6218 "register in z16.s..z30.s");
6219 case Match_InvalidZPRMul2_Lo64:
6220 return Error(Loc, "Invalid restricted vector register, expected even "
6221 "register in z0.d..z14.d");
6222 case Match_InvalidZPRMul2_Hi64:
6223 return Error(Loc, "Invalid restricted vector register, expected even "
6224 "register in z16.d..z30.d");
6225 case Match_InvalidZPR_K0:
6226 return Error(Loc, "invalid restricted vector register, expected register "
6227 "in z20..z23 or z28..z31");
6228 case Match_InvalidSVEPattern:
6229 return Error(Loc, "invalid predicate pattern");
6230 case Match_InvalidSVEPPRorPNRAnyReg:
6231 case Match_InvalidSVEPPRorPNRBReg:
6232 case Match_InvalidSVEPredicateAnyReg:
6233 case Match_InvalidSVEPredicateBReg:
6234 case Match_InvalidSVEPredicateHReg:
6235 case Match_InvalidSVEPredicateSReg:
6236 case Match_InvalidSVEPredicateDReg:
6237 return Error(Loc, "invalid predicate register.");
6238 case Match_InvalidSVEPredicate3bAnyReg:
6239 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6240 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6241 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6242 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6243 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6244 return Error(Loc, "Invalid predicate register, expected PN in range "
6245 "pn8..pn15 with element suffix.");
6246 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6247 return Error(Loc, "invalid restricted predicate-as-counter register "
6248 "expected pn8..pn15");
6249 case Match_InvalidSVEPNPredicateBReg:
6250 case Match_InvalidSVEPNPredicateHReg:
6251 case Match_InvalidSVEPNPredicateSReg:
6252 case Match_InvalidSVEPNPredicateDReg:
6253 return Error(Loc, "Invalid predicate register, expected PN in range "
6254 "pn0..pn15 with element suffix.");
6255 case Match_InvalidSVEVecLenSpecifier:
6256 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6257 case Match_InvalidSVEPredicateListMul2x8:
6258 case Match_InvalidSVEPredicateListMul2x16:
6259 case Match_InvalidSVEPredicateListMul2x32:
6260 case Match_InvalidSVEPredicateListMul2x64:
6261 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6262 "predicate registers, where the first vector is a multiple of 2 "
6263 "and with correct element type");
6264 case Match_InvalidSVEExactFPImmOperandHalfOne:
6265 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6266 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6267 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6268 case Match_InvalidSVEExactFPImmOperandZeroOne:
6269 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6270 case Match_InvalidMatrixTileVectorH8:
6271 case Match_InvalidMatrixTileVectorV8:
6272 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6273 case Match_InvalidMatrixTileVectorH16:
6274 case Match_InvalidMatrixTileVectorV16:
6275 return Error(Loc,
6276 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6277 case Match_InvalidMatrixTileVectorH32:
6278 case Match_InvalidMatrixTileVectorV32:
6279 return Error(Loc,
6280 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6281 case Match_InvalidMatrixTileVectorH64:
6282 case Match_InvalidMatrixTileVectorV64:
6283 return Error(Loc,
6284 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6285 case Match_InvalidMatrixTileVectorH128:
6286 case Match_InvalidMatrixTileVectorV128:
6287 return Error(Loc,
6288 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6289 case Match_InvalidMatrixTile16:
6290 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6291 case Match_InvalidMatrixTile32:
6292 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6293 case Match_InvalidMatrixTile64:
6294 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6295 case Match_InvalidMatrix:
6296 return Error(Loc, "invalid matrix operand, expected za");
6297 case Match_InvalidMatrix8:
6298 return Error(Loc, "invalid matrix operand, expected suffix .b");
6299 case Match_InvalidMatrix16:
6300 return Error(Loc, "invalid matrix operand, expected suffix .h");
6301 case Match_InvalidMatrix32:
6302 return Error(Loc, "invalid matrix operand, expected suffix .s");
6303 case Match_InvalidMatrix64:
6304 return Error(Loc, "invalid matrix operand, expected suffix .d");
6305 case Match_InvalidMatrixIndexGPR32_12_15:
6306 return Error(Loc, "operand must be a register in range [w12, w15]");
6307 case Match_InvalidMatrixIndexGPR32_8_11:
6308 return Error(Loc, "operand must be a register in range [w8, w11]");
6309 case Match_InvalidSVEVectorList2x8Mul2:
6310 case Match_InvalidSVEVectorList2x16Mul2:
6311 case Match_InvalidSVEVectorList2x32Mul2:
6312 case Match_InvalidSVEVectorList2x64Mul2:
6313 case Match_InvalidSVEVectorList2x128Mul2:
6314 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6315 "SVE vectors, where the first vector is a multiple of 2 "
6316 "and with matching element types");
6317 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6318 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6319 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6320 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6321 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6322 "SVE vectors in the range z0-z14, where the first vector "
6323 "is a multiple of 2 "
6324 "and with matching element types");
6325 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6326 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6327 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6328 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6329 return Error(Loc,
6330 "Invalid vector list, expected list with 2 consecutive "
6331 "SVE vectors in the range z16-z30, where the first vector "
6332 "is a multiple of 2 "
6333 "and with matching element types");
6334 case Match_InvalidSVEVectorList4x8Mul4:
6335 case Match_InvalidSVEVectorList4x16Mul4:
6336 case Match_InvalidSVEVectorList4x32Mul4:
6337 case Match_InvalidSVEVectorList4x64Mul4:
6338 case Match_InvalidSVEVectorList4x128Mul4:
6339 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6340 "SVE vectors, where the first vector is a multiple of 4 "
6341 "and with matching element types");
6342 case Match_InvalidLookupTable:
6343 return Error(Loc, "Invalid lookup table, expected zt0");
6344 case Match_InvalidSVEVectorListStrided2x8:
6345 case Match_InvalidSVEVectorListStrided2x16:
6346 case Match_InvalidSVEVectorListStrided2x32:
6347 case Match_InvalidSVEVectorListStrided2x64:
6348 return Error(
6349 Loc,
6350 "Invalid vector list, expected list with each SVE vector in the list "
6351 "8 registers apart, and the first register in the range [z0, z7] or "
6352 "[z16, z23] and with correct element type");
6353 case Match_InvalidSVEVectorListStrided4x8:
6354 case Match_InvalidSVEVectorListStrided4x16:
6355 case Match_InvalidSVEVectorListStrided4x32:
6356 case Match_InvalidSVEVectorListStrided4x64:
6357 return Error(
6358 Loc,
6359 "Invalid vector list, expected list with each SVE vector in the list "
6360 "4 registers apart, and the first register in the range [z0, z3] or "
6361 "[z16, z19] and with correct element type");
6362 case Match_AddSubLSLImm3ShiftLarge:
6363 return Error(Loc,
6364 "expected 'lsl' with optional integer in range [0, 7]");
6365 default:
6366 llvm_unreachable("unexpected error code!");
6367 }
6368}
6369
6370static const char *getSubtargetFeatureName(uint64_t Val);
6371
6372bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6374 MCStreamer &Out,
6376 bool MatchingInlineAsm) {
6377 assert(!Operands.empty() && "Unexpect empty operand list!");
6378 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6379 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6380
6381 StringRef Tok = Op.getToken();
6382 unsigned NumOperands = Operands.size();
6383
6384 if (NumOperands == 4 && Tok == "lsl") {
6385 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6386 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6387 if (Op2.isScalarReg() && Op3.isImm()) {
6388 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6389 if (Op3CE) {
6390 uint64_t Op3Val = Op3CE->getValue();
6391 uint64_t NewOp3Val = 0;
6392 uint64_t NewOp4Val = 0;
6393 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6394 Op2.getReg())) {
6395 NewOp3Val = (32 - Op3Val) & 0x1f;
6396 NewOp4Val = 31 - Op3Val;
6397 } else {
6398 NewOp3Val = (64 - Op3Val) & 0x3f;
6399 NewOp4Val = 63 - Op3Val;
6400 }
6401
6402 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6403 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6404
6405 Operands[0] =
6406 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6407 Operands.push_back(AArch64Operand::CreateImm(
6408 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6409 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6410 Op3.getEndLoc(), getContext());
6411 }
6412 }
6413 } else if (NumOperands == 4 && Tok == "bfc") {
6414 // FIXME: Horrible hack to handle BFC->BFM alias.
6415 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6416 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6417 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6418
6419 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6420 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6421 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6422
6423 if (LSBCE && WidthCE) {
6424 uint64_t LSB = LSBCE->getValue();
6425 uint64_t Width = WidthCE->getValue();
6426
6427 uint64_t RegWidth = 0;
6428 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6429 Op1.getReg()))
6430 RegWidth = 64;
6431 else
6432 RegWidth = 32;
6433
6434 if (LSB >= RegWidth)
6435 return Error(LSBOp.getStartLoc(),
6436 "expected integer in range [0, 31]");
6437 if (Width < 1 || Width > RegWidth)
6438 return Error(WidthOp.getStartLoc(),
6439 "expected integer in range [1, 32]");
6440
6441 uint64_t ImmR = 0;
6442 if (RegWidth == 32)
6443 ImmR = (32 - LSB) & 0x1f;
6444 else
6445 ImmR = (64 - LSB) & 0x3f;
6446
6447 uint64_t ImmS = Width - 1;
6448
6449 if (ImmR != 0 && ImmS >= ImmR)
6450 return Error(WidthOp.getStartLoc(),
6451 "requested insert overflows register");
6452
6453 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6454 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6455 Operands[0] =
6456 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6457 Operands[2] = AArch64Operand::CreateReg(
6458 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6459 SMLoc(), SMLoc(), getContext());
6460 Operands[3] = AArch64Operand::CreateImm(
6461 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6462 Operands.emplace_back(
6463 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6464 WidthOp.getEndLoc(), getContext()));
6465 }
6466 }
6467 } else if (NumOperands == 5) {
6468 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6469 // UBFIZ -> UBFM aliases.
6470 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6471 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6472 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6473 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6474
6475 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6476 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6477 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6478
6479 if (Op3CE && Op4CE) {
6480 uint64_t Op3Val = Op3CE->getValue();
6481 uint64_t Op4Val = Op4CE->getValue();
6482
6483 uint64_t RegWidth = 0;
6484 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6485 Op1.getReg()))
6486 RegWidth = 64;
6487 else
6488 RegWidth = 32;
6489
6490 if (Op3Val >= RegWidth)
6491 return Error(Op3.getStartLoc(),
6492 "expected integer in range [0, 31]");
6493 if (Op4Val < 1 || Op4Val > RegWidth)
6494 return Error(Op4.getStartLoc(),
6495 "expected integer in range [1, 32]");
6496
6497 uint64_t NewOp3Val = 0;
6498 if (RegWidth == 32)
6499 NewOp3Val = (32 - Op3Val) & 0x1f;
6500 else
6501 NewOp3Val = (64 - Op3Val) & 0x3f;
6502
6503 uint64_t NewOp4Val = Op4Val - 1;
6504
6505 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6506 return Error(Op4.getStartLoc(),
6507 "requested insert overflows register");
6508
6509 const MCExpr *NewOp3 =
6510 MCConstantExpr::create(NewOp3Val, getContext());
6511 const MCExpr *NewOp4 =
6512 MCConstantExpr::create(NewOp4Val, getContext());
6513 Operands[3] = AArch64Operand::CreateImm(
6514 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6515 Operands[4] = AArch64Operand::CreateImm(
6516 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6517 if (Tok == "bfi")
6518 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6519 getContext());
6520 else if (Tok == "sbfiz")
6521 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6522 getContext());
6523 else if (Tok == "ubfiz")
6524 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6525 getContext());
6526 else
6527 llvm_unreachable("No valid mnemonic for alias?");
6528 }
6529 }
6530
6531 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6532 // UBFX -> UBFM aliases.
6533 } else if (NumOperands == 5 &&
6534 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6535 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6536 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6537 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6538
6539 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6540 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6541 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6542
6543 if (Op3CE && Op4CE) {
6544 uint64_t Op3Val = Op3CE->getValue();
6545 uint64_t Op4Val = Op4CE->getValue();
6546
6547 uint64_t RegWidth = 0;
6548 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6549 Op1.getReg()))
6550 RegWidth = 64;
6551 else
6552 RegWidth = 32;
6553
6554 if (Op3Val >= RegWidth)
6555 return Error(Op3.getStartLoc(),
6556 "expected integer in range [0, 31]");
6557 if (Op4Val < 1 || Op4Val > RegWidth)
6558 return Error(Op4.getStartLoc(),
6559 "expected integer in range [1, 32]");
6560
6561 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6562
6563 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6564 return Error(Op4.getStartLoc(),
6565 "requested extract overflows register");
6566
6567 const MCExpr *NewOp4 =
6568 MCConstantExpr::create(NewOp4Val, getContext());
6569 Operands[4] = AArch64Operand::CreateImm(
6570 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6571 if (Tok == "bfxil")
6572 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6573 getContext());
6574 else if (Tok == "sbfx")
6575 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6576 getContext());
6577 else if (Tok == "ubfx")
6578 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6579 getContext());
6580 else
6581 llvm_unreachable("No valid mnemonic for alias?");
6582 }
6583 }
6584 }
6585 }
6586
6587 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6588 // instruction for FP registers correctly in some rare circumstances. Convert
6589 // it to a safe instruction and warn (because silently changing someone's
6590 // assembly is rude).
6591 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6592 NumOperands == 4 && Tok == "movi") {
6593 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6594 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6595 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6596 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6597 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6598 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6599 if (Suffix.lower() == ".2d" &&
6600 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6601 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6602 " correctly on this CPU, converting to equivalent movi.16b");
6603 // Switch the suffix to .16b.
6604 unsigned Idx = Op1.isToken() ? 1 : 2;
6605 Operands[Idx] =
6606 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6607 }
6608 }
6609 }
6610
6611 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6612 // InstAlias can't quite handle this since the reg classes aren't
6613 // subclasses.
6614 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6615 // The source register can be Wn here, but the matcher expects a
6616 // GPR64. Twiddle it here if necessary.
6617 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6618 if (Op.isScalarReg()) {
6619 MCRegister Reg = getXRegFromWReg(Op.getReg());
6620 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6621 Op.getStartLoc(), Op.getEndLoc(),
6622 getContext());
6623 }
6624 }
6625 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6626 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6627 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6628 if (Op.isScalarReg() &&
6629 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6630 Op.getReg())) {
6631 // The source register can be Wn here, but the matcher expects a
6632 // GPR64. Twiddle it here if necessary.
6633 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6634 if (Op.isScalarReg()) {
6635 MCRegister Reg = getXRegFromWReg(Op.getReg());
6636 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6637 Op.getStartLoc(),
6638 Op.getEndLoc(), getContext());
6639 }
6640 }
6641 }
6642 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6643 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6644 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6645 if (Op.isScalarReg() &&
6646 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6647 Op.getReg())) {
6648 // The source register can be Wn here, but the matcher expects a
6649 // GPR32. Twiddle it here if necessary.
6650 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6651 if (Op.isScalarReg()) {
6652 MCRegister Reg = getWRegFromXReg(Op.getReg());
6653 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6654 Op.getStartLoc(),
6655 Op.getEndLoc(), getContext());
6656 }
6657 }
6658 }
6659
6660 MCInst Inst;
6661 FeatureBitset MissingFeatures;
6662 // First try to match against the secondary set of tables containing the
6663 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6664 unsigned MatchResult =
6665 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6666 MatchingInlineAsm, 1);
6667
6668 // If that fails, try against the alternate table containing long-form NEON:
6669 // "fadd v0.2s, v1.2s, v2.2s"
6670 if (MatchResult != Match_Success) {
6671 // But first, save the short-form match result: we can use it in case the
6672 // long-form match also fails.
6673 auto ShortFormNEONErrorInfo = ErrorInfo;
6674 auto ShortFormNEONMatchResult = MatchResult;
6675 auto ShortFormNEONMissingFeatures = MissingFeatures;
6676
6677 MatchResult =
6678 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6679 MatchingInlineAsm, 0);
6680
6681 // Now, both matches failed, and the long-form match failed on the mnemonic
6682 // suffix token operand. The short-form match failure is probably more
6683 // relevant: use it instead.
6684 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6685 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6686 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6687 MatchResult = ShortFormNEONMatchResult;
6688 ErrorInfo = ShortFormNEONErrorInfo;
6689 MissingFeatures = ShortFormNEONMissingFeatures;
6690 }
6691 }
6692
6693 switch (MatchResult) {
6694 case Match_Success: {
6695 // Perform range checking and other semantic validations
6696 SmallVector<SMLoc, 8> OperandLocs;
6697 NumOperands = Operands.size();
6698 for (unsigned i = 1; i < NumOperands; ++i)
6699 OperandLocs.push_back(Operands[i]->getStartLoc());
6700 if (validateInstruction(Inst, IDLoc, OperandLocs))
6701 return true;
6702
6703 Inst.setLoc(IDLoc);
6704 Out.emitInstruction(Inst, getSTI());
6705 return false;
6706 }
6707 case Match_MissingFeature: {
6708 assert(MissingFeatures.any() && "Unknown missing feature!");
6709 // Special case the error message for the very common case where only
6710 // a single subtarget feature is missing (neon, e.g.).
6711 std::string Msg = "instruction requires:";
6712 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6713 if (MissingFeatures[i]) {
6714 Msg += " ";
6715 Msg += getSubtargetFeatureName(i);
6716 }
6717 }
6718 return Error(IDLoc, Msg);
6719 }
6720 case Match_MnemonicFail:
6721 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6722 case Match_InvalidOperand: {
6723 SMLoc ErrorLoc = IDLoc;
6724
6725 if (ErrorInfo != ~0ULL) {
6726 if (ErrorInfo >= Operands.size())
6727 return Error(IDLoc, "too few operands for instruction",
6728 SMRange(IDLoc, getTok().getLoc()));
6729
6730 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6731 if (ErrorLoc == SMLoc())
6732 ErrorLoc = IDLoc;
6733 }
6734 // If the match failed on a suffix token operand, tweak the diagnostic
6735 // accordingly.
6736 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6737 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6738 MatchResult = Match_InvalidSuffix;
6739
6740 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6741 }
6742 case Match_InvalidTiedOperand:
6743 case Match_InvalidMemoryIndexed1:
6744 case Match_InvalidMemoryIndexed2:
6745 case Match_InvalidMemoryIndexed4:
6746 case Match_InvalidMemoryIndexed8:
6747 case Match_InvalidMemoryIndexed16:
6748 case Match_InvalidCondCode:
6749 case Match_AddSubLSLImm3ShiftLarge:
6750 case Match_AddSubRegExtendSmall:
6751 case Match_AddSubRegExtendLarge:
6752 case Match_AddSubSecondSource:
6753 case Match_LogicalSecondSource:
6754 case Match_AddSubRegShift32:
6755 case Match_AddSubRegShift64:
6756 case Match_InvalidMovImm32Shift:
6757 case Match_InvalidMovImm64Shift:
6758 case Match_InvalidFPImm:
6759 case Match_InvalidMemoryWExtend8:
6760 case Match_InvalidMemoryWExtend16:
6761 case Match_InvalidMemoryWExtend32:
6762 case Match_InvalidMemoryWExtend64:
6763 case Match_InvalidMemoryWExtend128:
6764 case Match_InvalidMemoryXExtend8:
6765 case Match_InvalidMemoryXExtend16:
6766 case Match_InvalidMemoryXExtend32:
6767 case Match_InvalidMemoryXExtend64:
6768 case Match_InvalidMemoryXExtend128:
6769 case Match_InvalidMemoryIndexed1SImm4:
6770 case Match_InvalidMemoryIndexed2SImm4:
6771 case Match_InvalidMemoryIndexed3SImm4:
6772 case Match_InvalidMemoryIndexed4SImm4:
6773 case Match_InvalidMemoryIndexed1SImm6:
6774 case Match_InvalidMemoryIndexed16SImm4:
6775 case Match_InvalidMemoryIndexed32SImm4:
6776 case Match_InvalidMemoryIndexed4SImm7:
6777 case Match_InvalidMemoryIndexed8SImm7:
6778 case Match_InvalidMemoryIndexed16SImm7:
6779 case Match_InvalidMemoryIndexed8UImm5:
6780 case Match_InvalidMemoryIndexed8UImm3:
6781 case Match_InvalidMemoryIndexed4UImm5:
6782 case Match_InvalidMemoryIndexed2UImm5:
6783 case Match_InvalidMemoryIndexed1UImm6:
6784 case Match_InvalidMemoryIndexed2UImm6:
6785 case Match_InvalidMemoryIndexed4UImm6:
6786 case Match_InvalidMemoryIndexed8UImm6:
6787 case Match_InvalidMemoryIndexed16UImm6:
6788 case Match_InvalidMemoryIndexedSImm6:
6789 case Match_InvalidMemoryIndexedSImm5:
6790 case Match_InvalidMemoryIndexedSImm8:
6791 case Match_InvalidMemoryIndexedSImm9:
6792 case Match_InvalidMemoryIndexed16SImm9:
6793 case Match_InvalidMemoryIndexed8SImm10:
6794 case Match_InvalidImm0_0:
6795 case Match_InvalidImm0_1:
6796 case Match_InvalidImm0_3:
6797 case Match_InvalidImm0_7:
6798 case Match_InvalidImm0_15:
6799 case Match_InvalidImm0_31:
6800 case Match_InvalidImm0_63:
6801 case Match_InvalidImm0_127:
6802 case Match_InvalidImm0_255:
6803 case Match_InvalidImm0_65535:
6804 case Match_InvalidImm1_8:
6805 case Match_InvalidImm1_16:
6806 case Match_InvalidImm1_32:
6807 case Match_InvalidImm1_64:
6808 case Match_InvalidImmM1_62:
6809 case Match_InvalidMemoryIndexedRange2UImm0:
6810 case Match_InvalidMemoryIndexedRange2UImm1:
6811 case Match_InvalidMemoryIndexedRange2UImm2:
6812 case Match_InvalidMemoryIndexedRange2UImm3:
6813 case Match_InvalidMemoryIndexedRange4UImm0:
6814 case Match_InvalidMemoryIndexedRange4UImm1:
6815 case Match_InvalidMemoryIndexedRange4UImm2:
6816 case Match_InvalidSVEAddSubImm8:
6817 case Match_InvalidSVEAddSubImm16:
6818 case Match_InvalidSVEAddSubImm32:
6819 case Match_InvalidSVEAddSubImm64:
6820 case Match_InvalidSVECpyImm8:
6821 case Match_InvalidSVECpyImm16:
6822 case Match_InvalidSVECpyImm32:
6823 case Match_InvalidSVECpyImm64:
6824 case Match_InvalidIndexRange0_0:
6825 case Match_InvalidIndexRange1_1:
6826 case Match_InvalidIndexRange0_15:
6827 case Match_InvalidIndexRange0_7:
6828 case Match_InvalidIndexRange0_3:
6829 case Match_InvalidIndexRange0_1:
6830 case Match_InvalidSVEIndexRange0_63:
6831 case Match_InvalidSVEIndexRange0_31:
6832 case Match_InvalidSVEIndexRange0_15:
6833 case Match_InvalidSVEIndexRange0_7:
6834 case Match_InvalidSVEIndexRange0_3:
6835 case Match_InvalidLabel:
6836 case Match_InvalidComplexRotationEven:
6837 case Match_InvalidComplexRotationOdd:
6838 case Match_InvalidGPR64shifted8:
6839 case Match_InvalidGPR64shifted16:
6840 case Match_InvalidGPR64shifted32:
6841 case Match_InvalidGPR64shifted64:
6842 case Match_InvalidGPR64shifted128:
6843 case Match_InvalidGPR64NoXZRshifted8:
6844 case Match_InvalidGPR64NoXZRshifted16:
6845 case Match_InvalidGPR64NoXZRshifted32:
6846 case Match_InvalidGPR64NoXZRshifted64:
6847 case Match_InvalidGPR64NoXZRshifted128:
6848 case Match_InvalidZPR32UXTW8:
6849 case Match_InvalidZPR32UXTW16:
6850 case Match_InvalidZPR32UXTW32:
6851 case Match_InvalidZPR32UXTW64:
6852 case Match_InvalidZPR32SXTW8:
6853 case Match_InvalidZPR32SXTW16:
6854 case Match_InvalidZPR32SXTW32:
6855 case Match_InvalidZPR32SXTW64:
6856 case Match_InvalidZPR64UXTW8:
6857 case Match_InvalidZPR64SXTW8:
6858 case Match_InvalidZPR64UXTW16:
6859 case Match_InvalidZPR64SXTW16:
6860 case Match_InvalidZPR64UXTW32:
6861 case Match_InvalidZPR64SXTW32:
6862 case Match_InvalidZPR64UXTW64:
6863 case Match_InvalidZPR64SXTW64:
6864 case Match_InvalidZPR32LSL8:
6865 case Match_InvalidZPR32LSL16:
6866 case Match_InvalidZPR32LSL32:
6867 case Match_InvalidZPR32LSL64:
6868 case Match_InvalidZPR64LSL8:
6869 case Match_InvalidZPR64LSL16:
6870 case Match_InvalidZPR64LSL32:
6871 case Match_InvalidZPR64LSL64:
6872 case Match_InvalidZPR0:
6873 case Match_InvalidZPR8:
6874 case Match_InvalidZPR16:
6875 case Match_InvalidZPR32:
6876 case Match_InvalidZPR64:
6877 case Match_InvalidZPR128:
6878 case Match_InvalidZPR_3b8:
6879 case Match_InvalidZPR_3b16:
6880 case Match_InvalidZPR_3b32:
6881 case Match_InvalidZPR_4b8:
6882 case Match_InvalidZPR_4b16:
6883 case Match_InvalidZPR_4b32:
6884 case Match_InvalidZPR_4b64:
6885 case Match_InvalidSVEPPRorPNRAnyReg:
6886 case Match_InvalidSVEPPRorPNRBReg:
6887 case Match_InvalidSVEPredicateAnyReg:
6888 case Match_InvalidSVEPattern:
6889 case Match_InvalidSVEVecLenSpecifier:
6890 case Match_InvalidSVEPredicateBReg:
6891 case Match_InvalidSVEPredicateHReg:
6892 case Match_InvalidSVEPredicateSReg:
6893 case Match_InvalidSVEPredicateDReg:
6894 case Match_InvalidSVEPredicate3bAnyReg:
6895 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6896 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6897 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6898 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6899 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6900 case Match_InvalidSVEPNPredicateBReg:
6901 case Match_InvalidSVEPNPredicateHReg:
6902 case Match_InvalidSVEPNPredicateSReg:
6903 case Match_InvalidSVEPNPredicateDReg:
6904 case Match_InvalidSVEPredicateListMul2x8:
6905 case Match_InvalidSVEPredicateListMul2x16:
6906 case Match_InvalidSVEPredicateListMul2x32:
6907 case Match_InvalidSVEPredicateListMul2x64:
6908 case Match_InvalidSVEExactFPImmOperandHalfOne:
6909 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6910 case Match_InvalidSVEExactFPImmOperandZeroOne:
6911 case Match_InvalidMatrixTile16:
6912 case Match_InvalidMatrixTile32:
6913 case Match_InvalidMatrixTile64:
6914 case Match_InvalidMatrix:
6915 case Match_InvalidMatrix8:
6916 case Match_InvalidMatrix16:
6917 case Match_InvalidMatrix32:
6918 case Match_InvalidMatrix64:
6919 case Match_InvalidMatrixTileVectorH8:
6920 case Match_InvalidMatrixTileVectorH16:
6921 case Match_InvalidMatrixTileVectorH32:
6922 case Match_InvalidMatrixTileVectorH64:
6923 case Match_InvalidMatrixTileVectorH128:
6924 case Match_InvalidMatrixTileVectorV8:
6925 case Match_InvalidMatrixTileVectorV16:
6926 case Match_InvalidMatrixTileVectorV32:
6927 case Match_InvalidMatrixTileVectorV64:
6928 case Match_InvalidMatrixTileVectorV128:
6929 case Match_InvalidSVCR:
6930 case Match_InvalidMatrixIndexGPR32_12_15:
6931 case Match_InvalidMatrixIndexGPR32_8_11:
6932 case Match_InvalidLookupTable:
6933 case Match_InvalidZPRMul2_Lo8:
6934 case Match_InvalidZPRMul2_Hi8:
6935 case Match_InvalidZPRMul2_Lo16:
6936 case Match_InvalidZPRMul2_Hi16:
6937 case Match_InvalidZPRMul2_Lo32:
6938 case Match_InvalidZPRMul2_Hi32:
6939 case Match_InvalidZPRMul2_Lo64:
6940 case Match_InvalidZPRMul2_Hi64:
6941 case Match_InvalidZPR_K0:
6942 case Match_InvalidSVEVectorList2x8Mul2:
6943 case Match_InvalidSVEVectorList2x16Mul2:
6944 case Match_InvalidSVEVectorList2x32Mul2:
6945 case Match_InvalidSVEVectorList2x64Mul2:
6946 case Match_InvalidSVEVectorList2x128Mul2:
6947 case Match_InvalidSVEVectorList4x8Mul4:
6948 case Match_InvalidSVEVectorList4x16Mul4:
6949 case Match_InvalidSVEVectorList4x32Mul4:
6950 case Match_InvalidSVEVectorList4x64Mul4:
6951 case Match_InvalidSVEVectorList4x128Mul4:
6952 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6953 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6954 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6955 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6956 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6957 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6958 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6959 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6960 case Match_InvalidSVEVectorListStrided2x8:
6961 case Match_InvalidSVEVectorListStrided2x16:
6962 case Match_InvalidSVEVectorListStrided2x32:
6963 case Match_InvalidSVEVectorListStrided2x64:
6964 case Match_InvalidSVEVectorListStrided4x8:
6965 case Match_InvalidSVEVectorListStrided4x16:
6966 case Match_InvalidSVEVectorListStrided4x32:
6967 case Match_InvalidSVEVectorListStrided4x64:
6968 case Match_MSR:
6969 case Match_MRS: {
6970 if (ErrorInfo >= Operands.size())
6971 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6972 // Any time we get here, there's nothing fancy to do. Just get the
6973 // operand SMLoc and display the diagnostic.
6974 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6975 if (ErrorLoc == SMLoc())
6976 ErrorLoc = IDLoc;
6977 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6978 }
6979 }
6980
6981 llvm_unreachable("Implement any new match types added!");
6982}
6983
6984/// ParseDirective parses the arm specific directives
6985bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6986 const MCContext::Environment Format = getContext().getObjectFileType();
6987 bool IsMachO = Format == MCContext::IsMachO;
6988 bool IsCOFF = Format == MCContext::IsCOFF;
6989
6990 auto IDVal = DirectiveID.getIdentifier().lower();
6991 SMLoc Loc = DirectiveID.getLoc();
6992 if (IDVal == ".arch")
6993 parseDirectiveArch(Loc);
6994 else if (IDVal == ".cpu")
6995 parseDirectiveCPU(Loc);
6996 else if (IDVal == ".tlsdesccall")
6997 parseDirectiveTLSDescCall(Loc);
6998 else if (IDVal == ".ltorg" || IDVal == ".pool")
6999 parseDirectiveLtorg(Loc);
7000 else if (IDVal == ".unreq")
7001 parseDirectiveUnreq(Loc);
7002 else if (IDVal == ".inst")
7003 parseDirectiveInst(Loc);
7004 else if (IDVal == ".cfi_negate_ra_state")
7005 parseDirectiveCFINegateRAState();
7006 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7007 parseDirectiveCFINegateRAStateWithPC();
7008 else if (IDVal == ".cfi_b_key_frame")
7009 parseDirectiveCFIBKeyFrame();
7010 else if (IDVal == ".cfi_mte_tagged_frame")
7011 parseDirectiveCFIMTETaggedFrame();
7012 else if (IDVal == ".arch_extension")
7013 parseDirectiveArchExtension(Loc);
7014 else if (IDVal == ".variant_pcs")
7015 parseDirectiveVariantPCS(Loc);
7016 else if (IsMachO) {
7017 if (IDVal == MCLOHDirectiveName())
7018 parseDirectiveLOH(IDVal, Loc);
7019 else
7020 return true;
7021 } else if (IsCOFF) {
7022 if (IDVal == ".seh_stackalloc")
7023 parseDirectiveSEHAllocStack(Loc);
7024 else if (IDVal == ".seh_endprologue")
7025 parseDirectiveSEHPrologEnd(Loc);
7026 else if (IDVal == ".seh_save_r19r20_x")
7027 parseDirectiveSEHSaveR19R20X(Loc);
7028 else if (IDVal == ".seh_save_fplr")
7029 parseDirectiveSEHSaveFPLR(Loc);
7030 else if (IDVal == ".seh_save_fplr_x")
7031 parseDirectiveSEHSaveFPLRX(Loc);
7032 else if (IDVal == ".seh_save_reg")
7033 parseDirectiveSEHSaveReg(Loc);
7034 else if (IDVal == ".seh_save_reg_x")
7035 parseDirectiveSEHSaveRegX(Loc);
7036 else if (IDVal == ".seh_save_regp")
7037 parseDirectiveSEHSaveRegP(Loc);
7038 else if (IDVal == ".seh_save_regp_x")
7039 parseDirectiveSEHSaveRegPX(Loc);
7040 else if (IDVal == ".seh_save_lrpair")
7041 parseDirectiveSEHSaveLRPair(Loc);
7042 else if (IDVal == ".seh_save_freg")
7043 parseDirectiveSEHSaveFReg(Loc);
7044 else if (IDVal == ".seh_save_freg_x")
7045 parseDirectiveSEHSaveFRegX(Loc);
7046 else if (IDVal == ".seh_save_fregp")
7047 parseDirectiveSEHSaveFRegP(Loc);
7048 else if (IDVal == ".seh_save_fregp_x")
7049 parseDirectiveSEHSaveFRegPX(Loc);
7050 else if (IDVal == ".seh_set_fp")
7051 parseDirectiveSEHSetFP(Loc);
7052 else if (IDVal == ".seh_add_fp")
7053 parseDirectiveSEHAddFP(Loc);
7054 else if (IDVal == ".seh_nop")
7055 parseDirectiveSEHNop(Loc);
7056 else if (IDVal == ".seh_save_next")
7057 parseDirectiveSEHSaveNext(Loc);
7058 else if (IDVal == ".seh_startepilogue")
7059 parseDirectiveSEHEpilogStart(Loc);
7060 else if (IDVal == ".seh_endepilogue")
7061 parseDirectiveSEHEpilogEnd(Loc);
7062 else if (IDVal == ".seh_trap_frame")
7063 parseDirectiveSEHTrapFrame(Loc);
7064 else if (IDVal == ".seh_pushframe")
7065 parseDirectiveSEHMachineFrame(Loc);
7066 else if (IDVal == ".seh_context")
7067 parseDirectiveSEHContext(Loc);
7068 else if (IDVal == ".seh_ec_context")
7069 parseDirectiveSEHECContext(Loc);
7070 else if (IDVal == ".seh_clear_unwound_to_call")
7071 parseDirectiveSEHClearUnwoundToCall(Loc);
7072 else if (IDVal == ".seh_pac_sign_lr")
7073 parseDirectiveSEHPACSignLR(Loc);
7074 else if (IDVal == ".seh_save_any_reg")
7075 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7076 else if (IDVal == ".seh_save_any_reg_p")
7077 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7078 else if (IDVal == ".seh_save_any_reg_x")
7079 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7080 else if (IDVal == ".seh_save_any_reg_px")
7081 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7082 else
7083 return true;
7084 } else
7085 return true;
7086 return false;
7087}
7088
7089static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7090 SmallVector<StringRef, 4> &RequestedExtensions) {
7091 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7092 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7093
7094 if (!NoCrypto && Crypto) {
7095 // Map 'generic' (and others) to sha2 and aes, because
7096 // that was the traditional meaning of crypto.
7097 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7098 ArchInfo == AArch64::ARMV8_3A) {
7099 RequestedExtensions.push_back("sha2");
7100 RequestedExtensions.push_back("aes");
7101 }
7102 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7103 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7104 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7105 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7106 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7107 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7108 RequestedExtensions.push_back("sm4");
7109 RequestedExtensions.push_back("sha3");
7110 RequestedExtensions.push_back("sha2");
7111 RequestedExtensions.push_back("aes");
7112 }
7113 } else if (NoCrypto) {
7114 // Map 'generic' (and others) to sha2 and aes, because
7115 // that was the traditional meaning of crypto.
7116 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7117 ArchInfo == AArch64::ARMV8_3A) {
7118 RequestedExtensions.push_back("nosha2");
7119 RequestedExtensions.push_back("noaes");
7120 }
7121 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7122 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7123 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7124 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7125 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7126 ArchInfo == AArch64::ARMV9_4A) {
7127 RequestedExtensions.push_back("nosm4");
7128 RequestedExtensions.push_back("nosha3");
7129 RequestedExtensions.push_back("nosha2");
7130 RequestedExtensions.push_back("noaes");
7131 }
7132 }
7133}
7134
7136 return SMLoc::getFromPointer(L.getPointer() + Offset);
7137}
7138
7139/// parseDirectiveArch
7140/// ::= .arch token
7141bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7142 SMLoc CurLoc = getLoc();
7143
7144 StringRef Arch, ExtensionString;
7145 std::tie(Arch, ExtensionString) =
7146 getParser().parseStringToEndOfStatement().trim().split('+');
7147
7148 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7149 if (!ArchInfo)
7150 return Error(CurLoc, "unknown arch name");
7151
7152 if (parseToken(AsmToken::EndOfStatement))
7153 return true;
7154
7155 // Get the architecture and extension features.
7156 std::vector<StringRef> AArch64Features;
7157 AArch64Features.push_back(ArchInfo->ArchFeature);
7158 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7159
7160 MCSubtargetInfo &STI = copySTI();
7161 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7162 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7163 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7164
7165 SmallVector<StringRef, 4> RequestedExtensions;
7166 if (!ExtensionString.empty())
7167 ExtensionString.split(RequestedExtensions, '+');
7168
7169 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7170 CurLoc = incrementLoc(CurLoc, Arch.size());
7171
7172 for (auto Name : RequestedExtensions) {
7173 // Advance source location past '+'.
7174 CurLoc = incrementLoc(CurLoc, 1);
7175
7176 bool EnableFeature = !Name.consume_front_insensitive("no");
7177
7178 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7179 return Extension.Name == Name;
7180 });
7181
7182 if (It == std::end(ExtensionMap))
7183 return Error(CurLoc, "unsupported architectural extension: " + Name);
7184
7185 if (EnableFeature)
7186 STI.SetFeatureBitsTransitively(It->Features);
7187 else
7188 STI.ClearFeatureBitsTransitively(It->Features);
7189 CurLoc = incrementLoc(CurLoc, Name.size());
7190 }
7191 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7192 setAvailableFeatures(Features);
7193 return false;
7194}
7195
7196/// parseDirectiveArchExtension
7197/// ::= .arch_extension [no]feature
7198bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7199 SMLoc ExtLoc = getLoc();
7200
7201 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7202
7203 if (parseEOL())
7204 return true;
7205
7206 bool EnableFeature = true;
7207 if (Name.starts_with_insensitive("no")) {
7208 EnableFeature = false;
7209 Name = Name.substr(2);
7210 }
7211
7212 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7213 return Extension.Name == Name;
7214 });
7215
7216 if (It == std::end(ExtensionMap))
7217 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7218
7219 MCSubtargetInfo &STI = copySTI();
7220 if (EnableFeature)
7221 STI.SetFeatureBitsTransitively(It->Features);
7222 else
7223 STI.ClearFeatureBitsTransitively(It->Features);
7224 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7225 setAvailableFeatures(Features);
7226 return false;
7227}
7228
7229/// parseDirectiveCPU
7230/// ::= .cpu id
7231bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7232 SMLoc CurLoc = getLoc();
7233
7234 StringRef CPU, ExtensionString;
7235 std::tie(CPU, ExtensionString) =
7236 getParser().parseStringToEndOfStatement().trim().split('+');
7237
7238 if (parseToken(AsmToken::EndOfStatement))
7239 return true;
7240
7241 SmallVector<StringRef, 4> RequestedExtensions;
7242 if (!ExtensionString.empty())
7243 ExtensionString.split(RequestedExtensions, '+');
7244
7246 if (!CpuArch) {
7247 Error(CurLoc, "unknown CPU name");
7248 return false;
7249 }
7250 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7251
7252 MCSubtargetInfo &STI = copySTI();
7253 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7254 CurLoc = incrementLoc(CurLoc, CPU.size());
7255
7256 for (auto Name : RequestedExtensions) {
7257 // Advance source location past '+'.
7258 CurLoc = incrementLoc(CurLoc, 1);
7259
7260 bool EnableFeature = !Name.consume_front_insensitive("no");
7261
7262 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7263 return Extension.Name == Name;
7264 });
7265
7266 if (It == std::end(ExtensionMap))
7267 return Error(CurLoc, "unsupported architectural extension: " + Name);
7268
7269 if (EnableFeature)
7270 STI.SetFeatureBitsTransitively(It->Features);
7271 else
7272 STI.ClearFeatureBitsTransitively(It->Features);
7273 CurLoc = incrementLoc(CurLoc, Name.size());
7274 }
7275 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7276 setAvailableFeatures(Features);
7277 return false;
7278}
7279
7280/// parseDirectiveInst
7281/// ::= .inst opcode [, ...]
7282bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7283 if (getLexer().is(AsmToken::EndOfStatement))
7284 return Error(Loc, "expected expression following '.inst' directive");
7285
7286 auto parseOp = [&]() -> bool {
7287 SMLoc L = getLoc();
7288 const MCExpr *Expr = nullptr;
7289 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7290 return true;
7291 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7292 if (check(!Value, L, "expected constant expression"))
7293 return true;
7294 getTargetStreamer().emitInst(Value->getValue());
7295 return false;
7296 };
7297
7298 return parseMany(parseOp);
7299}
7300
7301// parseDirectiveTLSDescCall:
7302// ::= .tlsdesccall symbol
7303bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7305 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7306 parseToken(AsmToken::EndOfStatement))
7307 return true;
7308
7309 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7310 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7311 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7312
7313 MCInst Inst;
7314 Inst.setOpcode(AArch64::TLSDESCCALL);
7316
7317 getParser().getStreamer().emitInstruction(Inst, getSTI());
7318 return false;
7319}
7320
7321/// ::= .loh <lohName | lohId> label1, ..., labelN
7322/// The number of arguments depends on the loh identifier.
7323bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7325 if (getTok().isNot(AsmToken::Identifier)) {
7326 if (getTok().isNot(AsmToken::Integer))
7327 return TokError("expected an identifier or a number in directive");
7328 // We successfully get a numeric value for the identifier.
7329 // Check if it is valid.
7330 int64_t Id = getTok().getIntVal();
7331 if (Id <= -1U && !isValidMCLOHType(Id))
7332 return TokError("invalid numeric identifier in directive");
7333 Kind = (MCLOHType)Id;
7334 } else {
7335 StringRef Name = getTok().getIdentifier();
7336 // We successfully parse an identifier.
7337 // Check if it is a recognized one.
7338 int Id = MCLOHNameToId(Name);
7339
7340 if (Id == -1)
7341 return TokError("invalid identifier in directive");
7342 Kind = (MCLOHType)Id;
7343 }
7344 // Consume the identifier.
7345 Lex();
7346 // Get the number of arguments of this LOH.
7347 int NbArgs = MCLOHIdToNbArgs(Kind);
7348
7349 assert(NbArgs != -1 && "Invalid number of arguments");
7350
7352 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7354 if (getParser().parseIdentifier(Name))
7355 return TokError("expected identifier in directive");
7356 Args.push_back(getContext().getOrCreateSymbol(Name));
7357
7358 if (Idx + 1 == NbArgs)
7359 break;
7360 if (parseComma())
7361 return true;
7362 }
7363 if (parseEOL())
7364 return true;
7365
7366 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7367 return false;
7368}
7369
7370/// parseDirectiveLtorg
7371/// ::= .ltorg | .pool
7372bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7373 if (parseEOL())
7374 return true;
7375 getTargetStreamer().emitCurrentConstantPool();
7376 return false;
7377}
7378
7379/// parseDirectiveReq
7380/// ::= name .req registername
7381bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7382 Lex(); // Eat the '.req' token.
7383 SMLoc SRegLoc = getLoc();
7384 RegKind RegisterKind = RegKind::Scalar;
7385 MCRegister RegNum;
7386 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7387
7388 if (!ParseRes.isSuccess()) {
7390 RegisterKind = RegKind::NeonVector;
7391 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7392
7393 if (ParseRes.isFailure())
7394 return true;
7395
7396 if (ParseRes.isSuccess() && !Kind.empty())
7397 return Error(SRegLoc, "vector register without type specifier expected");
7398 }
7399
7400 if (!ParseRes.isSuccess()) {
7402 RegisterKind = RegKind::SVEDataVector;
7403 ParseRes =
7404 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7405
7406 if (ParseRes.isFailure())
7407 return true;
7408
7409 if (ParseRes.isSuccess() && !Kind.empty())
7410 return Error(SRegLoc,
7411 "sve vector register without type specifier expected");
7412 }
7413
7414 if (!ParseRes.isSuccess()) {
7416 RegisterKind = RegKind::SVEPredicateVector;
7417 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7418
7419 if (ParseRes.isFailure())
7420 return true;
7421
7422 if (ParseRes.isSuccess() && !Kind.empty())
7423 return Error(SRegLoc,
7424 "sve predicate register without type specifier expected");
7425 }
7426
7427 if (!ParseRes.isSuccess())
7428 return Error(SRegLoc, "register name or alias expected");
7429
7430 // Shouldn't be anything else.
7431 if (parseEOL())
7432 return true;
7433
7434 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7435 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7436 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7437
7438 return false;
7439}
7440
7441/// parseDirectiveUneq
7442/// ::= .unreq registername
7443bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7444 if (getTok().isNot(AsmToken::Identifier))
7445 return TokError("unexpected input in .unreq directive.");
7446 RegisterReqs.erase(getTok().getIdentifier().lower());
7447 Lex(); // Eat the identifier.
7448 return parseToken(AsmToken::EndOfStatement);
7449}
7450
7451bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7452 if (parseEOL())
7453 return true;
7454 getStreamer().emitCFINegateRAState();
7455 return false;
7456}
7457
7458bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7459 if (parseEOL())
7460 return true;
7461 getStreamer().emitCFINegateRAStateWithPC();
7462 return false;
7463}
7464
7465/// parseDirectiveCFIBKeyFrame
7466/// ::= .cfi_b_key
7467bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7468 if (parseEOL())
7469 return true;
7470 getStreamer().emitCFIBKeyFrame();
7471 return false;
7472}
7473
7474/// parseDirectiveCFIMTETaggedFrame
7475/// ::= .cfi_mte_tagged_frame
7476bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7477 if (parseEOL())
7478 return true;
7479 getStreamer().emitCFIMTETaggedFrame();
7480 return false;
7481}
7482
7483/// parseDirectiveVariantPCS
7484/// ::= .variant_pcs symbolname
7485bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7487 if (getParser().parseIdentifier(Name))
7488 return TokError("expected symbol name");
7489 if (parseEOL())
7490 return true;
7491 getTargetStreamer().emitDirectiveVariantPCS(
7492 getContext().getOrCreateSymbol(Name));
7493 return false;
7494}
7495
7496/// parseDirectiveSEHAllocStack
7497/// ::= .seh_stackalloc
7498bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7499 int64_t Size;
7500 if (parseImmExpr(Size))
7501 return true;
7502 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7503 return false;
7504}
7505
7506/// parseDirectiveSEHPrologEnd
7507/// ::= .seh_endprologue
7508bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7509 getTargetStreamer().emitARM64WinCFIPrologEnd();
7510 return false;
7511}
7512
7513/// parseDirectiveSEHSaveR19R20X
7514/// ::= .seh_save_r19r20_x
7515bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7516 int64_t Offset;
7517 if (parseImmExpr(Offset))
7518 return true;
7519 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7520 return false;
7521}
7522
7523/// parseDirectiveSEHSaveFPLR
7524/// ::= .seh_save_fplr
7525bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7526 int64_t Offset;
7527 if (parseImmExpr(Offset))
7528 return true;
7529 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7530 return false;
7531}
7532
7533/// parseDirectiveSEHSaveFPLRX
7534/// ::= .seh_save_fplr_x
7535bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7536 int64_t Offset;
7537 if (parseImmExpr(Offset))
7538 return true;
7539 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7540 return false;
7541}
7542
7543/// parseDirectiveSEHSaveReg
7544/// ::= .seh_save_reg
7545bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7546 unsigned Reg;
7547 int64_t Offset;
7548 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7549 parseComma() || parseImmExpr(Offset))
7550 return true;
7551 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7552 return false;
7553}
7554
7555/// parseDirectiveSEHSaveRegX
7556/// ::= .seh_save_reg_x
7557bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7558 unsigned Reg;
7559 int64_t Offset;
7560 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7561 parseComma() || parseImmExpr(Offset))
7562 return true;
7563 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7564 return false;
7565}
7566
7567/// parseDirectiveSEHSaveRegP
7568/// ::= .seh_save_regp
7569bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7570 unsigned Reg;
7571 int64_t Offset;
7572 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7573 parseComma() || parseImmExpr(Offset))
7574 return true;
7575 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7576 return false;
7577}
7578
7579/// parseDirectiveSEHSaveRegPX
7580/// ::= .seh_save_regp_x
7581bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7582 unsigned Reg;
7583 int64_t Offset;
7584 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7585 parseComma() || parseImmExpr(Offset))
7586 return true;
7587 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7588 return false;
7589}
7590
7591/// parseDirectiveSEHSaveLRPair
7592/// ::= .seh_save_lrpair
7593bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7594 unsigned Reg;
7595 int64_t Offset;
7596 L = getLoc();
7597 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7598 parseComma() || parseImmExpr(Offset))
7599 return true;
7600 if (check(((Reg - 19) % 2 != 0), L,
7601 "expected register with even offset from x19"))
7602 return true;
7603 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7604 return false;
7605}
7606
7607/// parseDirectiveSEHSaveFReg
7608/// ::= .seh_save_freg
7609bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7610 unsigned Reg;
7611 int64_t Offset;
7612 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7613 parseComma() || parseImmExpr(Offset))
7614 return true;
7615 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7616 return false;
7617}
7618
7619/// parseDirectiveSEHSaveFRegX
7620/// ::= .seh_save_freg_x
7621bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7622 unsigned Reg;
7623 int64_t Offset;
7624 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7625 parseComma() || parseImmExpr(Offset))
7626 return true;
7627 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7628 return false;
7629}
7630
7631/// parseDirectiveSEHSaveFRegP
7632/// ::= .seh_save_fregp
7633bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7634 unsigned Reg;
7635 int64_t Offset;
7636 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7637 parseComma() || parseImmExpr(Offset))
7638 return true;
7639 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7640 return false;
7641}
7642
7643/// parseDirectiveSEHSaveFRegPX
7644/// ::= .seh_save_fregp_x
7645bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7646 unsigned Reg;
7647 int64_t Offset;
7648 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7649 parseComma() || parseImmExpr(Offset))
7650 return true;
7651 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7652 return false;
7653}
7654
7655/// parseDirectiveSEHSetFP
7656/// ::= .seh_set_fp
7657bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7658 getTargetStreamer().emitARM64WinCFISetFP();
7659 return false;
7660}
7661
7662/// parseDirectiveSEHAddFP
7663/// ::= .seh_add_fp
7664bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7665 int64_t Size;
7666 if (parseImmExpr(Size))
7667 return true;
7668 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7669 return false;
7670}
7671
7672/// parseDirectiveSEHNop
7673/// ::= .seh_nop
7674bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7675 getTargetStreamer().emitARM64WinCFINop();
7676 return false;
7677}
7678
7679/// parseDirectiveSEHSaveNext
7680/// ::= .seh_save_next
7681bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7682 getTargetStreamer().emitARM64WinCFISaveNext();
7683 return false;
7684}
7685
7686/// parseDirectiveSEHEpilogStart
7687/// ::= .seh_startepilogue
7688bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7689 getTargetStreamer().emitARM64WinCFIEpilogStart();
7690 return false;
7691}
7692
7693/// parseDirectiveSEHEpilogEnd
7694/// ::= .seh_endepilogue
7695bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7696 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7697 return false;
7698}
7699
7700/// parseDirectiveSEHTrapFrame
7701/// ::= .seh_trap_frame
7702bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7703 getTargetStreamer().emitARM64WinCFITrapFrame();
7704 return false;
7705}
7706
7707/// parseDirectiveSEHMachineFrame
7708/// ::= .seh_pushframe
7709bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7710 getTargetStreamer().emitARM64WinCFIMachineFrame();
7711 return false;
7712}
7713
7714/// parseDirectiveSEHContext
7715/// ::= .seh_context
7716bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7717 getTargetStreamer().emitARM64WinCFIContext();
7718 return false;
7719}
7720
7721/// parseDirectiveSEHECContext
7722/// ::= .seh_ec_context
7723bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7724 getTargetStreamer().emitARM64WinCFIECContext();
7725 return false;
7726}
7727
7728/// parseDirectiveSEHClearUnwoundToCall
7729/// ::= .seh_clear_unwound_to_call
7730bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7731 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7732 return false;
7733}
7734
7735/// parseDirectiveSEHPACSignLR
7736/// ::= .seh_pac_sign_lr
7737bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7738 getTargetStreamer().emitARM64WinCFIPACSignLR();
7739 return false;
7740}
7741
7742/// parseDirectiveSEHSaveAnyReg
7743/// ::= .seh_save_any_reg
7744/// ::= .seh_save_any_reg_p
7745/// ::= .seh_save_any_reg_x
7746/// ::= .seh_save_any_reg_px
7747bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7748 bool Writeback) {
7750 SMLoc Start, End;
7751 int64_t Offset;
7752 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7753 parseComma() || parseImmExpr(Offset))
7754 return true;
7755
7756 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7757 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7758 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7759 return Error(L, "invalid save_any_reg offset");
7760 unsigned EncodedReg;
7761 if (Reg == AArch64::FP)
7762 EncodedReg = 29;
7763 else if (Reg == AArch64::LR)
7764 EncodedReg = 30;
7765 else
7766 EncodedReg = Reg - AArch64::X0;
7767 if (Paired) {
7768 if (Reg == AArch64::LR)
7769 return Error(Start, "lr cannot be paired with another register");
7770 if (Writeback)
7771 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7772 else
7773 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7774 } else {
7775 if (Writeback)
7776 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7777 else
7778 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7779 }
7780 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7781 unsigned EncodedReg = Reg - AArch64::D0;
7782 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7783 return Error(L, "invalid save_any_reg offset");
7784 if (Paired) {
7785 if (Reg == AArch64::D31)
7786 return Error(Start, "d31 cannot be paired with another register");
7787 if (Writeback)
7788 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7789 else
7790 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7791 } else {
7792 if (Writeback)
7793 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7794 else
7795 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7796 }
7797 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7798 unsigned EncodedReg = Reg - AArch64::Q0;
7799 if (Offset < 0 || Offset % 16)
7800 return Error(L, "invalid save_any_reg offset");
7801 if (Paired) {
7802 if (Reg == AArch64::Q31)
7803 return Error(Start, "q31 cannot be paired with another register");
7804 if (Writeback)
7805 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7806 else
7807 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7808 } else {
7809 if (Writeback)
7810 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7811 else
7812 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7813 }
7814 } else {
7815 return Error(Start, "save_any_reg register must be x, q or d register");
7816 }
7817 return false;
7818}
7819
7820bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7821 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7822 if (!parseAuthExpr(Res, EndLoc))
7823 return false;
7824 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7825}
7826
7827/// parseAuthExpr
7828/// ::= _sym@AUTH(ib,123[,addr])
7829/// ::= (_sym + 5)@AUTH(ib,123[,addr])
7830/// ::= (_sym - 5)@AUTH(ib,123[,addr])
7831bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7832 MCAsmParser &Parser = getParser();
7833 MCContext &Ctx = getContext();
7834
7835 AsmToken Tok = Parser.getTok();
7836
7837 // Look for '_sym@AUTH' ...
7838 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7839 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7840 if (SymName.contains('@'))
7841 return TokError(
7842 "combination of @AUTH with other modifiers not supported");
7843 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7844
7845 Parser.Lex(); // Eat the identifier.
7846 } else {
7847 // ... or look for a more complex symbol reference, such as ...
7849
7850 // ... '"_long sym"@AUTH' ...
7851 if (Tok.is(AsmToken::String))
7852 Tokens.resize(2);
7853 // ... or '(_sym + 5)@AUTH'.
7854 else if (Tok.is(AsmToken::LParen))
7855 Tokens.resize(6);
7856 else
7857 return true;
7858
7859 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7860 return true;
7861
7862 // In either case, the expression ends with '@' 'AUTH'.
7863 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7864 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7865 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7866 return true;
7867
7868 if (Tok.is(AsmToken::String)) {
7869 StringRef SymName;
7870 if (Parser.parseIdentifier(SymName))
7871 return true;
7872 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7873 } else {
7874 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7875 return true;
7876 }
7877
7878 Parser.Lex(); // '@'
7879 Parser.Lex(); // 'AUTH'
7880 }
7881
7882 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7883 if (parseToken(AsmToken::LParen, "expected '('"))
7884 return true;
7885
7886 if (Parser.getTok().isNot(AsmToken::Identifier))
7887 return TokError("expected key name");
7888
7889 StringRef KeyStr = Parser.getTok().getIdentifier();
7890 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7891 if (!KeyIDOrNone)
7892 return TokError("invalid key '" + KeyStr + "'");
7893 Parser.Lex();
7894
7895 if (parseToken(AsmToken::Comma, "expected ','"))
7896 return true;
7897
7898 if (Parser.getTok().isNot(AsmToken::Integer))
7899 return TokError("expected integer discriminator");
7900 int64_t Discriminator = Parser.getTok().getIntVal();
7901
7902 if (!isUInt<16>(Discriminator))
7903 return TokError("integer discriminator " + Twine(Discriminator) +
7904 " out of range [0, 0xFFFF]");
7905 Parser.Lex();
7906
7907 bool UseAddressDiversity = false;
7908 if (Parser.getTok().is(AsmToken::Comma)) {
7909 Parser.Lex();
7910 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7911 Parser.getTok().getIdentifier() != "addr")
7912 return TokError("expected 'addr'");
7913 UseAddressDiversity = true;
7914 Parser.Lex();
7915 }
7916
7917 EndLoc = Parser.getTok().getEndLoc();
7918 if (parseToken(AsmToken::RParen, "expected ')'"))
7919 return true;
7920
7921 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7922 UseAddressDiversity, Ctx);
7923 return false;
7924}
7925
7926bool
7927AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7928 AArch64MCExpr::VariantKind &ELFRefKind,
7929 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7930 int64_t &Addend) {
7931 ELFRefKind = AArch64MCExpr::VK_INVALID;
7932 DarwinRefKind = MCSymbolRefExpr::VK_None;
7933 Addend = 0;
7934
7935 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7936 ELFRefKind = AE->getKind();
7937 Expr = AE->getSubExpr();
7938 }
7939
7940 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7941 if (SE) {
7942 // It's a simple symbol reference with no addend.
7943 DarwinRefKind = SE->getKind();
7944 return true;
7945 }
7946
7947 // Check that it looks like a symbol + an addend
7948 MCValue Res;
7949 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7950 if (!Relocatable || Res.getSymB())
7951 return false;
7952
7953 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7954 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7955 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7956 return false;
7957
7958 if (Res.getSymA())
7959 DarwinRefKind = Res.getSymA()->getKind();
7960 Addend = Res.getConstant();
7961
7962 // It's some symbol reference + a constant addend, but really
7963 // shouldn't use both Darwin and ELF syntax.
7964 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7965 DarwinRefKind == MCSymbolRefExpr::VK_None;
7966}
7967
7968/// Force static initialization.
7975}
7976
7977#define GET_REGISTER_MATCHER
7978#define GET_SUBTARGET_FEATURE_NAME
7979#define GET_MATCHER_IMPLEMENTATION
7980#define GET_MNEMONIC_SPELL_CHECKER
7981#include "AArch64GenAsmMatcher.inc"
7982
7983// Define this matcher function after the auto-generated include so we
7984// have the match class enum definitions.
7985unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7986 unsigned Kind) {
7987 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7988
7989 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7990 if (!Op.isImm())
7991 return Match_InvalidOperand;
7992 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7993 if (!CE)
7994 return Match_InvalidOperand;
7995 if (CE->getValue() == ExpectedVal)
7996 return Match_Success;
7997 return Match_InvalidOperand;
7998 };
7999
8000 switch (Kind) {
8001 default:
8002 return Match_InvalidOperand;
8003 case MCK_MPR:
8004 // If the Kind is a token for the MPR register class which has the "za"
8005 // register (SME accumulator array), check if the asm is a literal "za"
8006 // token. This is for the "smstart za" alias that defines the register
8007 // as a literal token.
8008 if (Op.isTokenEqual("za"))
8009 return Match_Success;
8010 return Match_InvalidOperand;
8011
8012 // If the kind is a token for a literal immediate, check if our asm operand
8013 // matches. This is for InstAliases which have a fixed-value immediate in
8014 // the asm string, such as hints which are parsed into a specific
8015 // instruction definition.
8016#define MATCH_HASH(N) \
8017 case MCK__HASH_##N: \
8018 return MatchesOpImmediate(N);
8019 MATCH_HASH(0)
8020 MATCH_HASH(1)
8021 MATCH_HASH(2)
8022 MATCH_HASH(3)
8023 MATCH_HASH(4)
8024 MATCH_HASH(6)
8025 MATCH_HASH(7)
8026 MATCH_HASH(8)
8027 MATCH_HASH(10)
8028 MATCH_HASH(12)
8029 MATCH_HASH(14)
8030 MATCH_HASH(16)
8031 MATCH_HASH(24)
8032 MATCH_HASH(25)
8033 MATCH_HASH(26)
8034 MATCH_HASH(27)
8035 MATCH_HASH(28)
8036 MATCH_HASH(29)
8037 MATCH_HASH(30)
8038 MATCH_HASH(31)
8039 MATCH_HASH(32)
8040 MATCH_HASH(40)
8041 MATCH_HASH(48)
8042 MATCH_HASH(64)
8043#undef MATCH_HASH
8044#define MATCH_HASH_MINUS(N) \
8045 case MCK__HASH__MINUS_##N: \
8046 return MatchesOpImmediate(-N);
8050#undef MATCH_HASH_MINUS
8051 }
8052}
8053
8054ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8055
8056 SMLoc S = getLoc();
8057
8058 if (getTok().isNot(AsmToken::Identifier))
8059 return Error(S, "expected register");
8060
8061 MCRegister FirstReg;
8062 ParseStatus Res = tryParseScalarRegister(FirstReg);
8063 if (!Res.isSuccess())
8064 return Error(S, "expected first even register of a consecutive same-size "
8065 "even/odd register pair");
8066
8067 const MCRegisterClass &WRegClass =
8068 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8069 const MCRegisterClass &XRegClass =
8070 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8071
8072 bool isXReg = XRegClass.contains(FirstReg),
8073 isWReg = WRegClass.contains(FirstReg);
8074 if (!isXReg && !isWReg)
8075 return Error(S, "expected first even register of a consecutive same-size "
8076 "even/odd register pair");
8077
8078 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8079 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8080
8081 if (FirstEncoding & 0x1)
8082 return Error(S, "expected first even register of a consecutive same-size "
8083 "even/odd register pair");
8084
8085 if (getTok().isNot(AsmToken::Comma))
8086 return Error(getLoc(), "expected comma");
8087 // Eat the comma
8088 Lex();
8089
8090 SMLoc E = getLoc();
8091 MCRegister SecondReg;
8092 Res = tryParseScalarRegister(SecondReg);
8093 if (!Res.isSuccess())
8094 return Error(E, "expected second odd register of a consecutive same-size "
8095 "even/odd register pair");
8096
8097 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8098 (isXReg && !XRegClass.contains(SecondReg)) ||
8099 (isWReg && !WRegClass.contains(SecondReg)))
8100 return Error(E, "expected second odd register of a consecutive same-size "
8101 "even/odd register pair");
8102
8103 MCRegister Pair;
8104 if (isXReg) {
8105 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8106 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8107 } else {
8108 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8109 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8110 }
8111
8112 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8113 getLoc(), getContext()));
8114
8115 return ParseStatus::Success;
8116}
8117
8118template <bool ParseShiftExtend, bool ParseSuffix>
8119ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8120 const SMLoc S = getLoc();
8121 // Check for a SVE vector register specifier first.
8122 MCRegister RegNum;
8124
8125 ParseStatus Res =
8126 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8127
8128 if (!Res.isSuccess())
8129 return Res;
8130
8131 if (ParseSuffix && Kind.empty())
8132 return ParseStatus::NoMatch;
8133
8134 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8135 if (!KindRes)
8136 return ParseStatus::NoMatch;
8137
8138 unsigned ElementWidth = KindRes->second;
8139
8140 // No shift/extend is the default.
8141 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8142 Operands.push_back(AArch64Operand::CreateVectorReg(
8143 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8144
8145 ParseStatus Res = tryParseVectorIndex(Operands);
8146 if (Res.isFailure())
8147 return ParseStatus::Failure;
8148 return ParseStatus::Success;
8149 }
8150
8151 // Eat the comma
8152 Lex();
8153
8154 // Match the shift
8156 Res = tryParseOptionalShiftExtend(ExtOpnd);
8157 if (!Res.isSuccess())
8158 return Res;
8159
8160 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8161 Operands.push_back(AArch64Operand::CreateVectorReg(
8162 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8163 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8164 Ext->hasShiftExtendAmount()));
8165
8166 return ParseStatus::Success;
8167}
8168
8169ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8170 MCAsmParser &Parser = getParser();
8171
8172 SMLoc SS = getLoc();
8173 const AsmToken &TokE = getTok();
8174 bool IsHash = TokE.is(AsmToken::Hash);
8175
8176 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8177 return ParseStatus::NoMatch;
8178
8179 int64_t Pattern;
8180 if (IsHash) {
8181 Lex(); // Eat hash
8182
8183 // Parse the immediate operand.
8184 const MCExpr *ImmVal;
8185 SS = getLoc();
8186 if (Parser.parseExpression(ImmVal))
8187 return ParseStatus::Failure;
8188
8189 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8190 if (!MCE)
8191 return TokError("invalid operand for instruction");
8192
8193 Pattern = MCE->getValue();
8194 } else {
8195 // Parse the pattern
8196 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8197 if (!Pat)
8198 return ParseStatus::NoMatch;
8199
8200 Lex();
8201 Pattern = Pat->Encoding;
8202 assert(Pattern >= 0 && Pattern < 32);
8203 }
8204
8205 Operands.push_back(
8206 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8207 SS, getLoc(), getContext()));
8208
8209 return ParseStatus::Success;
8210}
8211
8213AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8214 int64_t Pattern;
8215 SMLoc SS = getLoc();
8216 const AsmToken &TokE = getTok();
8217 // Parse the pattern
8218 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8219 TokE.getString());
8220 if (!Pat)
8221 return ParseStatus::NoMatch;
8222
8223 Lex();
8224 Pattern = Pat->Encoding;
8225 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8226
8227 Operands.push_back(
8228 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8229 SS, getLoc(), getContext()));
8230
8231 return ParseStatus::Success;
8232}
8233
8234ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8235 SMLoc SS = getLoc();
8236
8237 MCRegister XReg;
8238 if (!tryParseScalarRegister(XReg).isSuccess())
8239 return ParseStatus::NoMatch;
8240
8241 MCContext &ctx = getContext();
8242 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8243 MCRegister X8Reg = RI->getMatchingSuperReg(
8244 XReg, AArch64::x8sub_0,
8245 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8246 if (!X8Reg)
8247 return Error(SS,
8248 "expected an even-numbered x-register in the range [x0,x22]");
8249
8250 Operands.push_back(
8251 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8252 return ParseStatus::Success;
8253}
8254
8255ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8256 SMLoc S = getLoc();
8257
8258 if (getTok().isNot(AsmToken::Integer))
8259 return ParseStatus::NoMatch;
8260
8261 if (getLexer().peekTok().isNot(AsmToken::Colon))
8262 return ParseStatus::NoMatch;
8263
8264 const MCExpr *ImmF;
8265 if (getParser().parseExpression(ImmF))
8266 return ParseStatus::NoMatch;
8267
8268 if (getTok().isNot(AsmToken::Colon))
8269 return ParseStatus::NoMatch;
8270
8271 Lex(); // Eat ':'
8272 if (getTok().isNot(AsmToken::Integer))
8273 return ParseStatus::NoMatch;
8274
8275 SMLoc E = getTok().getLoc();
8276 const MCExpr *ImmL;
8277 if (getParser().parseExpression(ImmL))
8278 return ParseStatus::NoMatch;
8279
8280 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8281 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8282
8283 Operands.push_back(
8284 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8285 return ParseStatus::Success;
8286}
8287
8288template <int Adj>
8289ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8290 SMLoc S = getLoc();
8291
8292 parseOptionalToken(AsmToken::Hash);
8293 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8294
8295 if (getTok().isNot(AsmToken::Integer))
8296 return ParseStatus::NoMatch;
8297
8298 const MCExpr *Ex;
8299 if (getParser().parseExpression(Ex))
8300 return ParseStatus::NoMatch;
8301
8302 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8303 if (IsNegative)
8304 Imm = -Imm;
8305
8306 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8307 // return a value, which is certain to trigger a error message about invalid
8308 // immediate range instead of a non-descriptive invalid operand error.
8309 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8310 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8311 Imm = -2;
8312 else
8313 Imm += Adj;
8314
8315 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8316 Operands.push_back(AArch64Operand::CreateImm(
8317 MCConstantExpr::create(Imm, getContext()), S, E, getContext()));
8318
8319 return ParseStatus::Success;
8320}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1346
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:435
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:432
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:355
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:819
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
unsigned getNumOperands() const
Definition: MCInst.h:209
void setLoc(SMLoc loc)
Definition: MCInst.h:204
unsigned getOpcode() const
Definition: MCInst.h:199
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
int64_t getImm() const
Definition: MCInst.h:81
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
VariantKind getKind() const
Definition: MCExpr.h:413
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
Target specific streamer interface.
Definition: MCStreamer.h:94
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:308
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:609
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:118
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:589
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:815
std::string lower() const
Definition: StringRef.cpp:113
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:616
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:176
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:400
bool isWindowsArm64EC() const
Definition: Triple.h:651
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define INT64_MIN
Definition: DataTypes.h:74
#define INT64_MAX
Definition: DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
const SysReg * lookupSysRegByName(StringRef)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const ArchInfo * parseArch(StringRef Arch)
const ArchInfo * getArchForCpu(StringRef CPU)
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition: COFF.h:844
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1602
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:212
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1099
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:170
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
Target & getTheAArch64_32Target()
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
Target & getTheARM64Target()
DWARFExpression::Operation Op
static MCRegister getWRegFromXReg(MCRegister Reg)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired