LLVM 20.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
39#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSymbol.h"
43#include "llvm/MC/MCValue.h"
49#include "llvm/Support/SMLoc.h"
53#include <cassert>
54#include <cctype>
55#include <cstdint>
56#include <cstdio>
57#include <optional>
58#include <string>
59#include <tuple>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65namespace {
66
67enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75};
76
77enum class MatrixKind { Array, Tile, Row, Col };
78
79enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83};
84
85class AArch64AsmParser : public MCTargetAsmParser {
86private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
91
92 class PrefixInfo {
93 public:
94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
133 bool isActive() const { return Active; }
134 bool isPredicated() const { return Predicated; }
135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
139 MCRegister getDstReg() const { return Dst; }
140 MCRegister getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 MCRegister Dst;
150 MCRegister Pg;
151 } NextPrefix;
152
153 AArch64TargetStreamer &getTargetStreamer() {
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFINegateRAStateWithPC();
199 bool parseDirectiveCFIBKeyFrame();
200 bool parseDirectiveCFIMTETaggedFrame();
201
202 bool parseDirectiveVariantPCS(SMLoc L);
203
204 bool parseDirectiveSEHAllocStack(SMLoc L);
205 bool parseDirectiveSEHPrologEnd(SMLoc L);
206 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
207 bool parseDirectiveSEHSaveFPLR(SMLoc L);
208 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
209 bool parseDirectiveSEHSaveReg(SMLoc L);
210 bool parseDirectiveSEHSaveRegX(SMLoc L);
211 bool parseDirectiveSEHSaveRegP(SMLoc L);
212 bool parseDirectiveSEHSaveRegPX(SMLoc L);
213 bool parseDirectiveSEHSaveLRPair(SMLoc L);
214 bool parseDirectiveSEHSaveFReg(SMLoc L);
215 bool parseDirectiveSEHSaveFRegX(SMLoc L);
216 bool parseDirectiveSEHSaveFRegP(SMLoc L);
217 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
218 bool parseDirectiveSEHSetFP(SMLoc L);
219 bool parseDirectiveSEHAddFP(SMLoc L);
220 bool parseDirectiveSEHNop(SMLoc L);
221 bool parseDirectiveSEHSaveNext(SMLoc L);
222 bool parseDirectiveSEHEpilogStart(SMLoc L);
223 bool parseDirectiveSEHEpilogEnd(SMLoc L);
224 bool parseDirectiveSEHTrapFrame(SMLoc L);
225 bool parseDirectiveSEHMachineFrame(SMLoc L);
226 bool parseDirectiveSEHContext(SMLoc L);
227 bool parseDirectiveSEHECContext(SMLoc L);
228 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
229 bool parseDirectiveSEHPACSignLR(SMLoc L);
230 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
231 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
232 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
233
234 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
236 unsigned getNumRegsForRegKind(RegKind K);
237 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
240 bool MatchingInlineAsm) override;
241 /// @name Auto-generated Match Functions
242 /// {
243
244#define GET_ASSEMBLER_HEADER
245#include "AArch64GenAsmMatcher.inc"
246
247 /// }
248
249 ParseStatus tryParseScalarRegister(MCRegister &Reg);
250 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
251 RegKind MatchKind);
252 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
253 ParseStatus tryParseSVCR(OperandVector &Operands);
254 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
255 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
256 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
257 ParseStatus tryParseSysReg(OperandVector &Operands);
258 ParseStatus tryParseSysCROperand(OperandVector &Operands);
259 template <bool IsSVEPrefetch = false>
260 ParseStatus tryParsePrefetch(OperandVector &Operands);
261 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
262 ParseStatus tryParsePSBHint(OperandVector &Operands);
263 ParseStatus tryParseBTIHint(OperandVector &Operands);
264 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
265 ParseStatus tryParseAdrLabel(OperandVector &Operands);
266 template <bool AddFPZeroAsLiteral>
267 ParseStatus tryParseFPImm(OperandVector &Operands);
268 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
269 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
270 bool tryParseNeonVectorRegister(OperandVector &Operands);
271 ParseStatus tryParseVectorIndex(OperandVector &Operands);
272 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
273 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
274 template <bool ParseShiftExtend,
275 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
276 ParseStatus tryParseGPROperand(OperandVector &Operands);
277 ParseStatus tryParseZTOperand(OperandVector &Operands);
278 template <bool ParseShiftExtend, bool ParseSuffix>
279 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
280 template <RegKind RK>
281 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
283 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
284 template <RegKind VectorKind>
285 ParseStatus tryParseVectorList(OperandVector &Operands,
286 bool ExpectMatch = false);
287 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
288 ParseStatus tryParseSVEPattern(OperandVector &Operands);
289 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
290 ParseStatus tryParseGPR64x8(OperandVector &Operands);
291 ParseStatus tryParseImmRange(OperandVector &Operands);
292 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
293 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
294
295public:
296 enum AArch64MatchResultTy {
297 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
298#define GET_OPERAND_DIAGNOSTIC_TYPES
299#include "AArch64GenAsmMatcher.inc"
300 };
301 bool IsILP32;
302 bool IsWindowsArm64EC;
303
304 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
305 const MCInstrInfo &MII, const MCTargetOptions &Options)
306 : MCTargetAsmParser(Options, STI, MII) {
308 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
311 if (S.getTargetStreamer() == nullptr)
313
314 // Alias .hword/.word/.[dx]word to the target-independent
315 // .2byte/.4byte/.8byte directives as they have the same form and
316 // semantics:
317 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
318 Parser.addAliasForDirective(".hword", ".2byte");
319 Parser.addAliasForDirective(".word", ".4byte");
320 Parser.addAliasForDirective(".dword", ".8byte");
321 Parser.addAliasForDirective(".xword", ".8byte");
322
323 // Initialize the set of available features.
324 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
325 }
326
327 bool areEqualRegs(const MCParsedAsmOperand &Op1,
328 const MCParsedAsmOperand &Op2) const override;
330 SMLoc NameLoc, OperandVector &Operands) override;
331 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
333 SMLoc &EndLoc) override;
334 bool ParseDirective(AsmToken DirectiveID) override;
336 unsigned Kind) override;
337
338 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
339
340 static bool classifySymbolRef(const MCExpr *Expr,
341 AArch64MCExpr::VariantKind &ELFRefKind,
342 MCSymbolRefExpr::VariantKind &DarwinRefKind,
343 int64_t &Addend);
344};
345
346/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
347/// instruction.
348class AArch64Operand : public MCParsedAsmOperand {
349private:
350 enum KindTy {
351 k_Immediate,
352 k_ShiftedImm,
353 k_ImmRange,
354 k_CondCode,
355 k_Register,
356 k_MatrixRegister,
357 k_MatrixTileList,
358 k_SVCR,
359 k_VectorList,
360 k_VectorIndex,
361 k_Token,
362 k_SysReg,
363 k_SysCR,
364 k_Prefetch,
365 k_ShiftExtend,
366 k_FPImm,
367 k_Barrier,
368 k_PSBHint,
369 k_PHint,
370 k_BTIHint,
371 } Kind;
372
373 SMLoc StartLoc, EndLoc;
374
375 struct TokOp {
376 const char *Data;
377 unsigned Length;
378 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
379 };
380
381 // Separate shift/extend operand.
382 struct ShiftExtendOp {
384 unsigned Amount;
385 bool HasExplicitAmount;
386 };
387
388 struct RegOp {
389 unsigned RegNum;
390 RegKind Kind;
391 int ElementWidth;
392
393 // The register may be allowed as a different register class,
394 // e.g. for GPR64as32 or GPR32as64.
395 RegConstraintEqualityTy EqualityTy;
396
397 // In some cases the shift/extend needs to be explicitly parsed together
398 // with the register, rather than as a separate operand. This is needed
399 // for addressing modes where the instruction as a whole dictates the
400 // scaling/extend, rather than specific bits in the instruction.
401 // By parsing them as a single operand, we avoid the need to pass an
402 // extra operand in all CodeGen patterns (because all operands need to
403 // have an associated value), and we avoid the need to update TableGen to
404 // accept operands that have no associated bits in the instruction.
405 //
406 // An added benefit of parsing them together is that the assembler
407 // can give a sensible diagnostic if the scaling is not correct.
408 //
409 // The default is 'lsl #0' (HasExplicitAmount = false) if no
410 // ShiftExtend is specified.
411 ShiftExtendOp ShiftExtend;
412 };
413
414 struct MatrixRegOp {
415 unsigned RegNum;
416 unsigned ElementWidth;
417 MatrixKind Kind;
418 };
419
420 struct MatrixTileListOp {
421 unsigned RegMask = 0;
422 };
423
424 struct VectorListOp {
425 unsigned RegNum;
426 unsigned Count;
427 unsigned Stride;
428 unsigned NumElements;
429 unsigned ElementWidth;
430 RegKind RegisterKind;
431 };
432
433 struct VectorIndexOp {
434 int Val;
435 };
436
437 struct ImmOp {
438 const MCExpr *Val;
439 };
440
441 struct ShiftedImmOp {
442 const MCExpr *Val;
443 unsigned ShiftAmount;
444 };
445
446 struct ImmRangeOp {
447 unsigned First;
448 unsigned Last;
449 };
450
451 struct CondCodeOp {
453 };
454
455 struct FPImmOp {
456 uint64_t Val; // APFloat value bitcasted to uint64_t.
457 bool IsExact; // describes whether parsed value was exact.
458 };
459
460 struct BarrierOp {
461 const char *Data;
462 unsigned Length;
463 unsigned Val; // Not the enum since not all values have names.
464 bool HasnXSModifier;
465 };
466
467 struct SysRegOp {
468 const char *Data;
469 unsigned Length;
470 uint32_t MRSReg;
471 uint32_t MSRReg;
472 uint32_t PStateField;
473 };
474
475 struct SysCRImmOp {
476 unsigned Val;
477 };
478
479 struct PrefetchOp {
480 const char *Data;
481 unsigned Length;
482 unsigned Val;
483 };
484
485 struct PSBHintOp {
486 const char *Data;
487 unsigned Length;
488 unsigned Val;
489 };
490 struct PHintOp {
491 const char *Data;
492 unsigned Length;
493 unsigned Val;
494 };
495 struct BTIHintOp {
496 const char *Data;
497 unsigned Length;
498 unsigned Val;
499 };
500
501 struct SVCROp {
502 const char *Data;
503 unsigned Length;
504 unsigned PStateField;
505 };
506
507 union {
508 struct TokOp Tok;
509 struct RegOp Reg;
510 struct MatrixRegOp MatrixReg;
511 struct MatrixTileListOp MatrixTileList;
512 struct VectorListOp VectorList;
513 struct VectorIndexOp VectorIndex;
514 struct ImmOp Imm;
515 struct ShiftedImmOp ShiftedImm;
516 struct ImmRangeOp ImmRange;
517 struct CondCodeOp CondCode;
518 struct FPImmOp FPImm;
519 struct BarrierOp Barrier;
520 struct SysRegOp SysReg;
521 struct SysCRImmOp SysCRImm;
522 struct PrefetchOp Prefetch;
523 struct PSBHintOp PSBHint;
524 struct PHintOp PHint;
525 struct BTIHintOp BTIHint;
526 struct ShiftExtendOp ShiftExtend;
527 struct SVCROp SVCR;
528 };
529
530 // Keep the MCContext around as the MCExprs may need manipulated during
531 // the add<>Operands() calls.
532 MCContext &Ctx;
533
534public:
535 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
536
537 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
538 Kind = o.Kind;
539 StartLoc = o.StartLoc;
540 EndLoc = o.EndLoc;
541 switch (Kind) {
542 case k_Token:
543 Tok = o.Tok;
544 break;
545 case k_Immediate:
546 Imm = o.Imm;
547 break;
548 case k_ShiftedImm:
549 ShiftedImm = o.ShiftedImm;
550 break;
551 case k_ImmRange:
552 ImmRange = o.ImmRange;
553 break;
554 case k_CondCode:
555 CondCode = o.CondCode;
556 break;
557 case k_FPImm:
558 FPImm = o.FPImm;
559 break;
560 case k_Barrier:
561 Barrier = o.Barrier;
562 break;
563 case k_Register:
564 Reg = o.Reg;
565 break;
566 case k_MatrixRegister:
567 MatrixReg = o.MatrixReg;
568 break;
569 case k_MatrixTileList:
570 MatrixTileList = o.MatrixTileList;
571 break;
572 case k_VectorList:
573 VectorList = o.VectorList;
574 break;
575 case k_VectorIndex:
576 VectorIndex = o.VectorIndex;
577 break;
578 case k_SysReg:
579 SysReg = o.SysReg;
580 break;
581 case k_SysCR:
582 SysCRImm = o.SysCRImm;
583 break;
584 case k_Prefetch:
585 Prefetch = o.Prefetch;
586 break;
587 case k_PSBHint:
588 PSBHint = o.PSBHint;
589 break;
590 case k_PHint:
591 PHint = o.PHint;
592 break;
593 case k_BTIHint:
594 BTIHint = o.BTIHint;
595 break;
596 case k_ShiftExtend:
597 ShiftExtend = o.ShiftExtend;
598 break;
599 case k_SVCR:
600 SVCR = o.SVCR;
601 break;
602 }
603 }
604
605 /// getStartLoc - Get the location of the first token of this operand.
606 SMLoc getStartLoc() const override { return StartLoc; }
607 /// getEndLoc - Get the location of the last token of this operand.
608 SMLoc getEndLoc() const override { return EndLoc; }
609
610 StringRef getToken() const {
611 assert(Kind == k_Token && "Invalid access!");
612 return StringRef(Tok.Data, Tok.Length);
613 }
614
615 bool isTokenSuffix() const {
616 assert(Kind == k_Token && "Invalid access!");
617 return Tok.IsSuffix;
618 }
619
620 const MCExpr *getImm() const {
621 assert(Kind == k_Immediate && "Invalid access!");
622 return Imm.Val;
623 }
624
625 const MCExpr *getShiftedImmVal() const {
626 assert(Kind == k_ShiftedImm && "Invalid access!");
627 return ShiftedImm.Val;
628 }
629
630 unsigned getShiftedImmShift() const {
631 assert(Kind == k_ShiftedImm && "Invalid access!");
632 return ShiftedImm.ShiftAmount;
633 }
634
635 unsigned getFirstImmVal() const {
636 assert(Kind == k_ImmRange && "Invalid access!");
637 return ImmRange.First;
638 }
639
640 unsigned getLastImmVal() const {
641 assert(Kind == k_ImmRange && "Invalid access!");
642 return ImmRange.Last;
643 }
644
646 assert(Kind == k_CondCode && "Invalid access!");
647 return CondCode.Code;
648 }
649
650 APFloat getFPImm() const {
651 assert (Kind == k_FPImm && "Invalid access!");
652 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
653 }
654
655 bool getFPImmIsExact() const {
656 assert (Kind == k_FPImm && "Invalid access!");
657 return FPImm.IsExact;
658 }
659
660 unsigned getBarrier() const {
661 assert(Kind == k_Barrier && "Invalid access!");
662 return Barrier.Val;
663 }
664
665 StringRef getBarrierName() const {
666 assert(Kind == k_Barrier && "Invalid access!");
667 return StringRef(Barrier.Data, Barrier.Length);
668 }
669
670 bool getBarriernXSModifier() const {
671 assert(Kind == k_Barrier && "Invalid access!");
672 return Barrier.HasnXSModifier;
673 }
674
675 MCRegister getReg() const override {
676 assert(Kind == k_Register && "Invalid access!");
677 return Reg.RegNum;
678 }
679
680 unsigned getMatrixReg() const {
681 assert(Kind == k_MatrixRegister && "Invalid access!");
682 return MatrixReg.RegNum;
683 }
684
685 unsigned getMatrixElementWidth() const {
686 assert(Kind == k_MatrixRegister && "Invalid access!");
687 return MatrixReg.ElementWidth;
688 }
689
690 MatrixKind getMatrixKind() const {
691 assert(Kind == k_MatrixRegister && "Invalid access!");
692 return MatrixReg.Kind;
693 }
694
695 unsigned getMatrixTileListRegMask() const {
696 assert(isMatrixTileList() && "Invalid access!");
697 return MatrixTileList.RegMask;
698 }
699
700 RegConstraintEqualityTy getRegEqualityTy() const {
701 assert(Kind == k_Register && "Invalid access!");
702 return Reg.EqualityTy;
703 }
704
705 unsigned getVectorListStart() const {
706 assert(Kind == k_VectorList && "Invalid access!");
707 return VectorList.RegNum;
708 }
709
710 unsigned getVectorListCount() const {
711 assert(Kind == k_VectorList && "Invalid access!");
712 return VectorList.Count;
713 }
714
715 unsigned getVectorListStride() const {
716 assert(Kind == k_VectorList && "Invalid access!");
717 return VectorList.Stride;
718 }
719
720 int getVectorIndex() const {
721 assert(Kind == k_VectorIndex && "Invalid access!");
722 return VectorIndex.Val;
723 }
724
725 StringRef getSysReg() const {
726 assert(Kind == k_SysReg && "Invalid access!");
727 return StringRef(SysReg.Data, SysReg.Length);
728 }
729
730 unsigned getSysCR() const {
731 assert(Kind == k_SysCR && "Invalid access!");
732 return SysCRImm.Val;
733 }
734
735 unsigned getPrefetch() const {
736 assert(Kind == k_Prefetch && "Invalid access!");
737 return Prefetch.Val;
738 }
739
740 unsigned getPSBHint() const {
741 assert(Kind == k_PSBHint && "Invalid access!");
742 return PSBHint.Val;
743 }
744
745 unsigned getPHint() const {
746 assert(Kind == k_PHint && "Invalid access!");
747 return PHint.Val;
748 }
749
750 StringRef getPSBHintName() const {
751 assert(Kind == k_PSBHint && "Invalid access!");
752 return StringRef(PSBHint.Data, PSBHint.Length);
753 }
754
755 StringRef getPHintName() const {
756 assert(Kind == k_PHint && "Invalid access!");
757 return StringRef(PHint.Data, PHint.Length);
758 }
759
760 unsigned getBTIHint() const {
761 assert(Kind == k_BTIHint && "Invalid access!");
762 return BTIHint.Val;
763 }
764
765 StringRef getBTIHintName() const {
766 assert(Kind == k_BTIHint && "Invalid access!");
767 return StringRef(BTIHint.Data, BTIHint.Length);
768 }
769
770 StringRef getSVCR() const {
771 assert(Kind == k_SVCR && "Invalid access!");
772 return StringRef(SVCR.Data, SVCR.Length);
773 }
774
775 StringRef getPrefetchName() const {
776 assert(Kind == k_Prefetch && "Invalid access!");
777 return StringRef(Prefetch.Data, Prefetch.Length);
778 }
779
780 AArch64_AM::ShiftExtendType getShiftExtendType() const {
781 if (Kind == k_ShiftExtend)
782 return ShiftExtend.Type;
783 if (Kind == k_Register)
784 return Reg.ShiftExtend.Type;
785 llvm_unreachable("Invalid access!");
786 }
787
788 unsigned getShiftExtendAmount() const {
789 if (Kind == k_ShiftExtend)
790 return ShiftExtend.Amount;
791 if (Kind == k_Register)
792 return Reg.ShiftExtend.Amount;
793 llvm_unreachable("Invalid access!");
794 }
795
796 bool hasShiftExtendAmount() const {
797 if (Kind == k_ShiftExtend)
798 return ShiftExtend.HasExplicitAmount;
799 if (Kind == k_Register)
800 return Reg.ShiftExtend.HasExplicitAmount;
801 llvm_unreachable("Invalid access!");
802 }
803
804 bool isImm() const override { return Kind == k_Immediate; }
805 bool isMem() const override { return false; }
806
807 bool isUImm6() const {
808 if (!isImm())
809 return false;
810 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
811 if (!MCE)
812 return false;
813 int64_t Val = MCE->getValue();
814 return (Val >= 0 && Val < 64);
815 }
816
817 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
818
819 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
820 return isImmScaled<Bits, Scale>(true);
821 }
822
823 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
824 DiagnosticPredicate isUImmScaled() const {
825 if (IsRange && isImmRange() &&
826 (getLastImmVal() != getFirstImmVal() + Offset))
827 return DiagnosticPredicateTy::NoMatch;
828
829 return isImmScaled<Bits, Scale, IsRange>(false);
830 }
831
832 template <int Bits, int Scale, bool IsRange = false>
833 DiagnosticPredicate isImmScaled(bool Signed) const {
834 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
835 (isImmRange() && !IsRange))
836 return DiagnosticPredicateTy::NoMatch;
837
838 int64_t Val;
839 if (isImmRange())
840 Val = getFirstImmVal();
841 else {
842 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
843 if (!MCE)
844 return DiagnosticPredicateTy::NoMatch;
845 Val = MCE->getValue();
846 }
847
848 int64_t MinVal, MaxVal;
849 if (Signed) {
850 int64_t Shift = Bits - 1;
851 MinVal = (int64_t(1) << Shift) * -Scale;
852 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
853 } else {
854 MinVal = 0;
855 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
856 }
857
858 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
859 return DiagnosticPredicateTy::Match;
860
861 return DiagnosticPredicateTy::NearMatch;
862 }
863
864 DiagnosticPredicate isSVEPattern() const {
865 if (!isImm())
866 return DiagnosticPredicateTy::NoMatch;
867 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
868 if (!MCE)
869 return DiagnosticPredicateTy::NoMatch;
870 int64_t Val = MCE->getValue();
871 if (Val >= 0 && Val < 32)
872 return DiagnosticPredicateTy::Match;
873 return DiagnosticPredicateTy::NearMatch;
874 }
875
876 DiagnosticPredicate isSVEVecLenSpecifier() const {
877 if (!isImm())
878 return DiagnosticPredicateTy::NoMatch;
879 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
880 if (!MCE)
881 return DiagnosticPredicateTy::NoMatch;
882 int64_t Val = MCE->getValue();
883 if (Val >= 0 && Val <= 1)
884 return DiagnosticPredicateTy::Match;
885 return DiagnosticPredicateTy::NearMatch;
886 }
887
888 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
890 MCSymbolRefExpr::VariantKind DarwinRefKind;
891 int64_t Addend;
892 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
893 Addend)) {
894 // If we don't understand the expression, assume the best and
895 // let the fixup and relocation code deal with it.
896 return true;
897 }
898
899 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
900 ELFRefKind == AArch64MCExpr::VK_LO12 ||
901 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
902 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
903 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
904 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
905 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
906 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
908 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
910 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
911 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
912 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
913 // Note that we don't range-check the addend. It's adjusted modulo page
914 // size when converted, so there is no "out of range" condition when using
915 // @pageoff.
916 return true;
917 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
918 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
919 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
920 return Addend == 0;
921 }
922
923 return false;
924 }
925
926 template <int Scale> bool isUImm12Offset() const {
927 if (!isImm())
928 return false;
929
930 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
931 if (!MCE)
932 return isSymbolicUImm12Offset(getImm());
933
934 int64_t Val = MCE->getValue();
935 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
936 }
937
938 template <int N, int M>
939 bool isImmInRange() const {
940 if (!isImm())
941 return false;
942 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
943 if (!MCE)
944 return false;
945 int64_t Val = MCE->getValue();
946 return (Val >= N && Val <= M);
947 }
948
949 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
950 // a logical immediate can always be represented when inverted.
951 template <typename T>
952 bool isLogicalImm() const {
953 if (!isImm())
954 return false;
955 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
956 if (!MCE)
957 return false;
958
959 int64_t Val = MCE->getValue();
960 // Avoid left shift by 64 directly.
961 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
962 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
963 if ((Val & Upper) && (Val & Upper) != Upper)
964 return false;
965
966 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
967 }
968
969 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
970
971 bool isImmRange() const { return Kind == k_ImmRange; }
972
973 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
974 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
975 /// immediate that can be shifted by 'Shift'.
976 template <unsigned Width>
977 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
978 if (isShiftedImm() && Width == getShiftedImmShift())
979 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
980 return std::make_pair(CE->getValue(), Width);
981
982 if (isImm())
983 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
984 int64_t Val = CE->getValue();
985 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
986 return std::make_pair(Val >> Width, Width);
987 else
988 return std::make_pair(Val, 0u);
989 }
990
991 return {};
992 }
993
994 bool isAddSubImm() const {
995 if (!isShiftedImm() && !isImm())
996 return false;
997
998 const MCExpr *Expr;
999
1000 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1001 if (isShiftedImm()) {
1002 unsigned Shift = ShiftedImm.ShiftAmount;
1003 Expr = ShiftedImm.Val;
1004 if (Shift != 0 && Shift != 12)
1005 return false;
1006 } else {
1007 Expr = getImm();
1008 }
1009
1010 AArch64MCExpr::VariantKind ELFRefKind;
1011 MCSymbolRefExpr::VariantKind DarwinRefKind;
1012 int64_t Addend;
1013 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
1014 DarwinRefKind, Addend)) {
1015 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
1016 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
1017 (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) ||
1018 ELFRefKind == AArch64MCExpr::VK_LO12 ||
1019 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
1020 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
1021 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
1022 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
1023 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
1024 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
1025 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
1026 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
1028 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
1029 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1030 }
1031
1032 // If it's a constant, it should be a real immediate in range.
1033 if (auto ShiftedVal = getShiftedVal<12>())
1034 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1035
1036 // If it's an expression, we hope for the best and let the fixup/relocation
1037 // code deal with it.
1038 return true;
1039 }
1040
1041 bool isAddSubImmNeg() const {
1042 if (!isShiftedImm() && !isImm())
1043 return false;
1044
1045 // Otherwise it should be a real negative immediate in range.
1046 if (auto ShiftedVal = getShiftedVal<12>())
1047 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1048
1049 return false;
1050 }
1051
1052 // Signed value in the range -128 to +127. For element widths of
1053 // 16 bits or higher it may also be a signed multiple of 256 in the
1054 // range -32768 to +32512.
1055 // For element-width of 8 bits a range of -128 to 255 is accepted,
1056 // since a copy of a byte can be either signed/unsigned.
1057 template <typename T>
1058 DiagnosticPredicate isSVECpyImm() const {
1059 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1060 return DiagnosticPredicateTy::NoMatch;
1061
1062 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1063 std::is_same<int8_t, T>::value;
1064 if (auto ShiftedImm = getShiftedVal<8>())
1065 if (!(IsByte && ShiftedImm->second) &&
1066 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1067 << ShiftedImm->second))
1068 return DiagnosticPredicateTy::Match;
1069
1070 return DiagnosticPredicateTy::NearMatch;
1071 }
1072
1073 // Unsigned value in the range 0 to 255. For element widths of
1074 // 16 bits or higher it may also be a signed multiple of 256 in the
1075 // range 0 to 65280.
1076 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1077 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1078 return DiagnosticPredicateTy::NoMatch;
1079
1080 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1081 std::is_same<int8_t, T>::value;
1082 if (auto ShiftedImm = getShiftedVal<8>())
1083 if (!(IsByte && ShiftedImm->second) &&
1084 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1085 << ShiftedImm->second))
1086 return DiagnosticPredicateTy::Match;
1087
1088 return DiagnosticPredicateTy::NearMatch;
1089 }
1090
1091 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1092 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1093 return DiagnosticPredicateTy::Match;
1094 return DiagnosticPredicateTy::NoMatch;
1095 }
1096
1097 bool isCondCode() const { return Kind == k_CondCode; }
1098
1099 bool isSIMDImmType10() const {
1100 if (!isImm())
1101 return false;
1102 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1103 if (!MCE)
1104 return false;
1106 }
1107
1108 template<int N>
1109 bool isBranchTarget() const {
1110 if (!isImm())
1111 return false;
1112 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1113 if (!MCE)
1114 return true;
1115 int64_t Val = MCE->getValue();
1116 if (Val & 0x3)
1117 return false;
1118 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1119 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1120 }
1121
1122 bool
1123 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1124 if (!isImm())
1125 return false;
1126
1127 AArch64MCExpr::VariantKind ELFRefKind;
1128 MCSymbolRefExpr::VariantKind DarwinRefKind;
1129 int64_t Addend;
1130 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1131 DarwinRefKind, Addend)) {
1132 return false;
1133 }
1134 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1135 return false;
1136
1137 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1138 }
1139
1140 bool isMovWSymbolG3() const {
1142 }
1143
1144 bool isMovWSymbolG2() const {
1145 return isMovWSymbol(
1150 }
1151
1152 bool isMovWSymbolG1() const {
1153 return isMovWSymbol(
1159 }
1160
1161 bool isMovWSymbolG0() const {
1162 return isMovWSymbol(
1168 }
1169
1170 template<int RegWidth, int Shift>
1171 bool isMOVZMovAlias() const {
1172 if (!isImm()) return false;
1173
1174 const MCExpr *E = getImm();
1175 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1176 uint64_t Value = CE->getValue();
1177
1178 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1179 }
1180 // Only supports the case of Shift being 0 if an expression is used as an
1181 // operand
1182 return !Shift && E;
1183 }
1184
1185 template<int RegWidth, int Shift>
1186 bool isMOVNMovAlias() const {
1187 if (!isImm()) return false;
1188
1189 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1190 if (!CE) return false;
1191 uint64_t Value = CE->getValue();
1192
1193 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1194 }
1195
1196 bool isFPImm() const {
1197 return Kind == k_FPImm &&
1198 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1199 }
1200
1201 bool isBarrier() const {
1202 return Kind == k_Barrier && !getBarriernXSModifier();
1203 }
1204 bool isBarriernXS() const {
1205 return Kind == k_Barrier && getBarriernXSModifier();
1206 }
1207 bool isSysReg() const { return Kind == k_SysReg; }
1208
1209 bool isMRSSystemRegister() const {
1210 if (!isSysReg()) return false;
1211
1212 return SysReg.MRSReg != -1U;
1213 }
1214
1215 bool isMSRSystemRegister() const {
1216 if (!isSysReg()) return false;
1217 return SysReg.MSRReg != -1U;
1218 }
1219
1220 bool isSystemPStateFieldWithImm0_1() const {
1221 if (!isSysReg()) return false;
1222 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1223 }
1224
1225 bool isSystemPStateFieldWithImm0_15() const {
1226 if (!isSysReg())
1227 return false;
1228 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1229 }
1230
1231 bool isSVCR() const {
1232 if (Kind != k_SVCR)
1233 return false;
1234 return SVCR.PStateField != -1U;
1235 }
1236
1237 bool isReg() const override {
1238 return Kind == k_Register;
1239 }
1240
1241 bool isVectorList() const { return Kind == k_VectorList; }
1242
1243 bool isScalarReg() const {
1244 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1245 }
1246
1247 bool isNeonVectorReg() const {
1248 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1249 }
1250
1251 bool isNeonVectorRegLo() const {
1252 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1253 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1254 Reg.RegNum) ||
1255 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1256 Reg.RegNum));
1257 }
1258
1259 bool isNeonVectorReg0to7() const {
1260 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1261 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1262 Reg.RegNum));
1263 }
1264
1265 bool isMatrix() const { return Kind == k_MatrixRegister; }
1266 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1267
1268 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1269 RegKind RK;
1270 switch (Class) {
1271 case AArch64::PPRRegClassID:
1272 case AArch64::PPR_3bRegClassID:
1273 case AArch64::PPR_p8to15RegClassID:
1274 case AArch64::PNRRegClassID:
1275 case AArch64::PNR_p8to15RegClassID:
1276 case AArch64::PPRorPNRRegClassID:
1277 RK = RegKind::SVEPredicateAsCounter;
1278 break;
1279 default:
1280 llvm_unreachable("Unsupport register class");
1281 }
1282
1283 return (Kind == k_Register && Reg.Kind == RK) &&
1284 AArch64MCRegisterClasses[Class].contains(getReg());
1285 }
1286
1287 template <unsigned Class> bool isSVEVectorReg() const {
1288 RegKind RK;
1289 switch (Class) {
1290 case AArch64::ZPRRegClassID:
1291 case AArch64::ZPR_3bRegClassID:
1292 case AArch64::ZPR_4bRegClassID:
1293 case AArch64::ZPRMul2_LoRegClassID:
1294 case AArch64::ZPRMul2_HiRegClassID:
1295 case AArch64::ZPR_KRegClassID:
1296 RK = RegKind::SVEDataVector;
1297 break;
1298 case AArch64::PPRRegClassID:
1299 case AArch64::PPR_3bRegClassID:
1300 case AArch64::PPR_p8to15RegClassID:
1301 case AArch64::PNRRegClassID:
1302 case AArch64::PNR_p8to15RegClassID:
1303 case AArch64::PPRorPNRRegClassID:
1304 RK = RegKind::SVEPredicateVector;
1305 break;
1306 default:
1307 llvm_unreachable("Unsupport register class");
1308 }
1309
1310 return (Kind == k_Register && Reg.Kind == RK) &&
1311 AArch64MCRegisterClasses[Class].contains(getReg());
1312 }
1313
1314 template <unsigned Class> bool isFPRasZPR() const {
1315 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1316 AArch64MCRegisterClasses[Class].contains(getReg());
1317 }
1318
1319 template <int ElementWidth, unsigned Class>
1320 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1321 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1322 return DiagnosticPredicateTy::NoMatch;
1323
1324 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1325 return DiagnosticPredicateTy::Match;
1326
1327 return DiagnosticPredicateTy::NearMatch;
1328 }
1329
1330 template <int ElementWidth, unsigned Class>
1331 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1332 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1333 Reg.Kind != RegKind::SVEPredicateVector))
1334 return DiagnosticPredicateTy::NoMatch;
1335
1336 if ((isSVEPredicateAsCounterReg<Class>() ||
1337 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1338 Reg.ElementWidth == ElementWidth)
1339 return DiagnosticPredicateTy::Match;
1340
1341 return DiagnosticPredicateTy::NearMatch;
1342 }
1343
1344 template <int ElementWidth, unsigned Class>
1345 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1346 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1347 return DiagnosticPredicateTy::NoMatch;
1348
1349 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1350 return DiagnosticPredicateTy::Match;
1351
1352 return DiagnosticPredicateTy::NearMatch;
1353 }
1354
1355 template <int ElementWidth, unsigned Class>
1356 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1357 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1358 return DiagnosticPredicateTy::NoMatch;
1359
1360 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1361 return DiagnosticPredicateTy::Match;
1362
1363 return DiagnosticPredicateTy::NearMatch;
1364 }
1365
1366 template <int ElementWidth, unsigned Class,
1367 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1368 bool ShiftWidthAlwaysSame>
1369 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1370 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1371 if (!VectorMatch.isMatch())
1372 return DiagnosticPredicateTy::NoMatch;
1373
1374 // Give a more specific diagnostic when the user has explicitly typed in
1375 // a shift-amount that does not match what is expected, but for which
1376 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1377 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1378 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1379 ShiftExtendTy == AArch64_AM::SXTW) &&
1380 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1381 return DiagnosticPredicateTy::NoMatch;
1382
1383 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1384 return DiagnosticPredicateTy::Match;
1385
1386 return DiagnosticPredicateTy::NearMatch;
1387 }
1388
1389 bool isGPR32as64() const {
1390 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1391 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1392 }
1393
1394 bool isGPR64as32() const {
1395 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1396 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1397 }
1398
1399 bool isGPR64x8() const {
1400 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1401 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1402 Reg.RegNum);
1403 }
1404
1405 bool isWSeqPair() const {
1406 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1407 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1408 Reg.RegNum);
1409 }
1410
1411 bool isXSeqPair() const {
1412 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1413 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1414 Reg.RegNum);
1415 }
1416
1417 bool isSyspXzrPair() const {
1418 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1419 }
1420
1421 template<int64_t Angle, int64_t Remainder>
1422 DiagnosticPredicate isComplexRotation() const {
1423 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1424
1425 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1426 if (!CE) return DiagnosticPredicateTy::NoMatch;
1427 uint64_t Value = CE->getValue();
1428
1429 if (Value % Angle == Remainder && Value <= 270)
1430 return DiagnosticPredicateTy::Match;
1431 return DiagnosticPredicateTy::NearMatch;
1432 }
1433
1434 template <unsigned RegClassID> bool isGPR64() const {
1435 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1436 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1437 }
1438
1439 template <unsigned RegClassID, int ExtWidth>
1440 DiagnosticPredicate isGPR64WithShiftExtend() const {
1441 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1442 return DiagnosticPredicateTy::NoMatch;
1443
1444 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1445 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1446 return DiagnosticPredicateTy::Match;
1447 return DiagnosticPredicateTy::NearMatch;
1448 }
1449
1450 /// Is this a vector list with the type implicit (presumably attached to the
1451 /// instruction itself)?
1452 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1453 bool isImplicitlyTypedVectorList() const {
1454 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1455 VectorList.NumElements == 0 &&
1456 VectorList.RegisterKind == VectorKind &&
1457 (!IsConsecutive || (VectorList.Stride == 1));
1458 }
1459
1460 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1461 unsigned ElementWidth, unsigned Stride = 1>
1462 bool isTypedVectorList() const {
1463 if (Kind != k_VectorList)
1464 return false;
1465 if (VectorList.Count != NumRegs)
1466 return false;
1467 if (VectorList.RegisterKind != VectorKind)
1468 return false;
1469 if (VectorList.ElementWidth != ElementWidth)
1470 return false;
1471 if (VectorList.Stride != Stride)
1472 return false;
1473 return VectorList.NumElements == NumElements;
1474 }
1475
1476 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1477 unsigned ElementWidth, unsigned RegClass>
1478 DiagnosticPredicate isTypedVectorListMultiple() const {
1479 bool Res =
1480 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1481 if (!Res)
1482 return DiagnosticPredicateTy::NoMatch;
1483 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1484 return DiagnosticPredicateTy::NearMatch;
1485 return DiagnosticPredicateTy::Match;
1486 }
1487
1488 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1489 unsigned ElementWidth>
1490 DiagnosticPredicate isTypedVectorListStrided() const {
1491 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1492 ElementWidth, Stride>();
1493 if (!Res)
1494 return DiagnosticPredicateTy::NoMatch;
1495 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1496 ((VectorList.RegNum >= AArch64::Z16) &&
1497 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1498 return DiagnosticPredicateTy::Match;
1499 return DiagnosticPredicateTy::NoMatch;
1500 }
1501
1502 template <int Min, int Max>
1503 DiagnosticPredicate isVectorIndex() const {
1504 if (Kind != k_VectorIndex)
1505 return DiagnosticPredicateTy::NoMatch;
1506 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1507 return DiagnosticPredicateTy::Match;
1508 return DiagnosticPredicateTy::NearMatch;
1509 }
1510
1511 bool isToken() const override { return Kind == k_Token; }
1512
1513 bool isTokenEqual(StringRef Str) const {
1514 return Kind == k_Token && getToken() == Str;
1515 }
1516 bool isSysCR() const { return Kind == k_SysCR; }
1517 bool isPrefetch() const { return Kind == k_Prefetch; }
1518 bool isPSBHint() const { return Kind == k_PSBHint; }
1519 bool isPHint() const { return Kind == k_PHint; }
1520 bool isBTIHint() const { return Kind == k_BTIHint; }
1521 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1522 bool isShifter() const {
1523 if (!isShiftExtend())
1524 return false;
1525
1526 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1527 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1528 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1529 ST == AArch64_AM::MSL);
1530 }
1531
1532 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1533 if (Kind != k_FPImm)
1534 return DiagnosticPredicateTy::NoMatch;
1535
1536 if (getFPImmIsExact()) {
1537 // Lookup the immediate from table of supported immediates.
1538 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1539 assert(Desc && "Unknown enum value");
1540
1541 // Calculate its FP value.
1542 APFloat RealVal(APFloat::IEEEdouble());
1543 auto StatusOrErr =
1544 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1545 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1546 llvm_unreachable("FP immediate is not exact");
1547
1548 if (getFPImm().bitwiseIsEqual(RealVal))
1549 return DiagnosticPredicateTy::Match;
1550 }
1551
1552 return DiagnosticPredicateTy::NearMatch;
1553 }
1554
1555 template <unsigned ImmA, unsigned ImmB>
1556 DiagnosticPredicate isExactFPImm() const {
1557 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1558 if ((Res = isExactFPImm<ImmA>()))
1559 return DiagnosticPredicateTy::Match;
1560 if ((Res = isExactFPImm<ImmB>()))
1561 return DiagnosticPredicateTy::Match;
1562 return Res;
1563 }
1564
1565 bool isExtend() const {
1566 if (!isShiftExtend())
1567 return false;
1568
1569 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1570 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1571 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1572 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1573 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1574 ET == AArch64_AM::LSL) &&
1575 getShiftExtendAmount() <= 4;
1576 }
1577
1578 bool isExtend64() const {
1579 if (!isExtend())
1580 return false;
1581 // Make sure the extend expects a 32-bit source register.
1582 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1583 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1584 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1585 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1586 }
1587
1588 bool isExtendLSL64() const {
1589 if (!isExtend())
1590 return false;
1591 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1592 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1593 ET == AArch64_AM::LSL) &&
1594 getShiftExtendAmount() <= 4;
1595 }
1596
1597 bool isLSLImm3Shift() const {
1598 if (!isShiftExtend())
1599 return false;
1600 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1601 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1602 }
1603
1604 template<int Width> bool isMemXExtend() const {
1605 if (!isExtend())
1606 return false;
1607 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1608 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1609 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1610 getShiftExtendAmount() == 0);
1611 }
1612
1613 template<int Width> bool isMemWExtend() const {
1614 if (!isExtend())
1615 return false;
1616 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1617 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1618 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1619 getShiftExtendAmount() == 0);
1620 }
1621
1622 template <unsigned width>
1623 bool isArithmeticShifter() const {
1624 if (!isShifter())
1625 return false;
1626
1627 // An arithmetic shifter is LSL, LSR, or ASR.
1628 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1629 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1630 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1631 }
1632
1633 template <unsigned width>
1634 bool isLogicalShifter() const {
1635 if (!isShifter())
1636 return false;
1637
1638 // A logical shifter is LSL, LSR, ASR or ROR.
1639 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1640 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1641 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1642 getShiftExtendAmount() < width;
1643 }
1644
1645 bool isMovImm32Shifter() const {
1646 if (!isShifter())
1647 return false;
1648
1649 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1650 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1651 if (ST != AArch64_AM::LSL)
1652 return false;
1653 uint64_t Val = getShiftExtendAmount();
1654 return (Val == 0 || Val == 16);
1655 }
1656
1657 bool isMovImm64Shifter() const {
1658 if (!isShifter())
1659 return false;
1660
1661 // A MOVi shifter is LSL of 0 or 16.
1662 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1663 if (ST != AArch64_AM::LSL)
1664 return false;
1665 uint64_t Val = getShiftExtendAmount();
1666 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1667 }
1668
1669 bool isLogicalVecShifter() const {
1670 if (!isShifter())
1671 return false;
1672
1673 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1674 unsigned Shift = getShiftExtendAmount();
1675 return getShiftExtendType() == AArch64_AM::LSL &&
1676 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1677 }
1678
1679 bool isLogicalVecHalfWordShifter() const {
1680 if (!isLogicalVecShifter())
1681 return false;
1682
1683 // A logical vector shifter is a left shift by 0 or 8.
1684 unsigned Shift = getShiftExtendAmount();
1685 return getShiftExtendType() == AArch64_AM::LSL &&
1686 (Shift == 0 || Shift == 8);
1687 }
1688
1689 bool isMoveVecShifter() const {
1690 if (!isShiftExtend())
1691 return false;
1692
1693 // A logical vector shifter is a left shift by 8 or 16.
1694 unsigned Shift = getShiftExtendAmount();
1695 return getShiftExtendType() == AArch64_AM::MSL &&
1696 (Shift == 8 || Shift == 16);
1697 }
1698
1699 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1700 // to LDUR/STUR when the offset is not legal for the former but is for
1701 // the latter. As such, in addition to checking for being a legal unscaled
1702 // address, also check that it is not a legal scaled address. This avoids
1703 // ambiguity in the matcher.
1704 template<int Width>
1705 bool isSImm9OffsetFB() const {
1706 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1707 }
1708
1709 bool isAdrpLabel() const {
1710 // Validation was handled during parsing, so we just verify that
1711 // something didn't go haywire.
1712 if (!isImm())
1713 return false;
1714
1715 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1716 int64_t Val = CE->getValue();
1717 int64_t Min = - (4096 * (1LL << (21 - 1)));
1718 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1719 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1720 }
1721
1722 return true;
1723 }
1724
1725 bool isAdrLabel() const {
1726 // Validation was handled during parsing, so we just verify that
1727 // something didn't go haywire.
1728 if (!isImm())
1729 return false;
1730
1731 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1732 int64_t Val = CE->getValue();
1733 int64_t Min = - (1LL << (21 - 1));
1734 int64_t Max = ((1LL << (21 - 1)) - 1);
1735 return Val >= Min && Val <= Max;
1736 }
1737
1738 return true;
1739 }
1740
1741 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1742 DiagnosticPredicate isMatrixRegOperand() const {
1743 if (!isMatrix())
1744 return DiagnosticPredicateTy::NoMatch;
1745 if (getMatrixKind() != Kind ||
1746 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1747 EltSize != getMatrixElementWidth())
1748 return DiagnosticPredicateTy::NearMatch;
1749 return DiagnosticPredicateTy::Match;
1750 }
1751
1752 bool isPAuthPCRelLabel16Operand() const {
1753 // PAuth PCRel16 operands are similar to regular branch targets, but only
1754 // negative values are allowed for concrete immediates as signing instr
1755 // should be in a lower address.
1756 if (!isImm())
1757 return false;
1758 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1759 if (!MCE)
1760 return true;
1761 int64_t Val = MCE->getValue();
1762 if (Val & 0b11)
1763 return false;
1764 return (Val <= 0) && (Val > -(1 << 18));
1765 }
1766
1767 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1768 // Add as immediates when possible. Null MCExpr = 0.
1769 if (!Expr)
1771 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1772 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1773 else
1775 }
1776
1777 void addRegOperands(MCInst &Inst, unsigned N) const {
1778 assert(N == 1 && "Invalid number of operands!");
1780 }
1781
1782 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1783 assert(N == 1 && "Invalid number of operands!");
1784 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1785 }
1786
1787 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1788 assert(N == 1 && "Invalid number of operands!");
1789 assert(
1790 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1791
1792 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1793 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1794 RI->getEncodingValue(getReg()));
1795
1797 }
1798
1799 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1800 assert(N == 1 && "Invalid number of operands!");
1801 assert(
1802 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1803
1804 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1805 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1806 RI->getEncodingValue(getReg()));
1807
1809 }
1810
1811 template <int Width>
1812 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1813 unsigned Base;
1814 switch (Width) {
1815 case 8: Base = AArch64::B0; break;
1816 case 16: Base = AArch64::H0; break;
1817 case 32: Base = AArch64::S0; break;
1818 case 64: Base = AArch64::D0; break;
1819 case 128: Base = AArch64::Q0; break;
1820 default:
1821 llvm_unreachable("Unsupported width");
1822 }
1823 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1824 }
1825
1826 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1827 assert(N == 1 && "Invalid number of operands!");
1828 unsigned Reg = getReg();
1829 // Normalise to PPR
1830 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1831 Reg = Reg - AArch64::PN0 + AArch64::P0;
1833 }
1834
1835 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1836 assert(N == 1 && "Invalid number of operands!");
1837 Inst.addOperand(
1838 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1839 }
1840
1841 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!");
1843 assert(
1844 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1845 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1846 }
1847
1848 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1849 assert(N == 1 && "Invalid number of operands!");
1850 assert(
1851 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1853 }
1854
1855 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1856 assert(N == 1 && "Invalid number of operands!");
1858 }
1859
1860 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1861 assert(N == 1 && "Invalid number of operands!");
1863 }
1864
1865 enum VecListIndexType {
1866 VecListIdx_DReg = 0,
1867 VecListIdx_QReg = 1,
1868 VecListIdx_ZReg = 2,
1869 VecListIdx_PReg = 3,
1870 };
1871
1872 template <VecListIndexType RegTy, unsigned NumRegs,
1873 bool IsConsecutive = false>
1874 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1875 assert(N == 1 && "Invalid number of operands!");
1876 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1877 "Expected consecutive registers");
1878 static const unsigned FirstRegs[][5] = {
1879 /* DReg */ { AArch64::Q0,
1880 AArch64::D0, AArch64::D0_D1,
1881 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1882 /* QReg */ { AArch64::Q0,
1883 AArch64::Q0, AArch64::Q0_Q1,
1884 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1885 /* ZReg */ { AArch64::Z0,
1886 AArch64::Z0, AArch64::Z0_Z1,
1887 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1888 /* PReg */ { AArch64::P0,
1889 AArch64::P0, AArch64::P0_P1 }
1890 };
1891
1892 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1893 " NumRegs must be <= 4 for ZRegs");
1894
1895 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1896 " NumRegs must be <= 2 for PRegs");
1897
1898 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1899 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1900 FirstRegs[(unsigned)RegTy][0]));
1901 }
1902
1903 template <unsigned NumRegs>
1904 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1905 assert(N == 1 && "Invalid number of operands!");
1906 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1907
1908 switch (NumRegs) {
1909 case 2:
1910 if (getVectorListStart() < AArch64::Z16) {
1911 assert((getVectorListStart() < AArch64::Z8) &&
1912 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1914 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1915 } else {
1916 assert((getVectorListStart() < AArch64::Z24) &&
1917 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1919 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1920 }
1921 break;
1922 case 4:
1923 if (getVectorListStart() < AArch64::Z16) {
1924 assert((getVectorListStart() < AArch64::Z4) &&
1925 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1927 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1928 } else {
1929 assert((getVectorListStart() < AArch64::Z20) &&
1930 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1932 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1933 }
1934 break;
1935 default:
1936 llvm_unreachable("Unsupported number of registers for strided vec list");
1937 }
1938 }
1939
1940 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1941 assert(N == 1 && "Invalid number of operands!");
1942 unsigned RegMask = getMatrixTileListRegMask();
1943 assert(RegMask <= 0xFF && "Invalid mask!");
1944 Inst.addOperand(MCOperand::createImm(RegMask));
1945 }
1946
1947 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 1 && "Invalid number of operands!");
1949 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1950 }
1951
1952 template <unsigned ImmIs0, unsigned ImmIs1>
1953 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1954 assert(N == 1 && "Invalid number of operands!");
1955 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1956 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1957 }
1958
1959 void addImmOperands(MCInst &Inst, unsigned N) const {
1960 assert(N == 1 && "Invalid number of operands!");
1961 // If this is a pageoff symrefexpr with an addend, adjust the addend
1962 // to be only the page-offset portion. Otherwise, just add the expr
1963 // as-is.
1964 addExpr(Inst, getImm());
1965 }
1966
1967 template <int Shift>
1968 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1969 assert(N == 2 && "Invalid number of operands!");
1970 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1971 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1972 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1973 } else if (isShiftedImm()) {
1974 addExpr(Inst, getShiftedImmVal());
1975 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1976 } else {
1977 addExpr(Inst, getImm());
1979 }
1980 }
1981
1982 template <int Shift>
1983 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1984 assert(N == 2 && "Invalid number of operands!");
1985 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1986 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1987 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1988 } else
1989 llvm_unreachable("Not a shifted negative immediate");
1990 }
1991
1992 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1993 assert(N == 1 && "Invalid number of operands!");
1995 }
1996
1997 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1998 assert(N == 1 && "Invalid number of operands!");
1999 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2000 if (!MCE)
2001 addExpr(Inst, getImm());
2002 else
2003 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
2004 }
2005
2006 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2007 addImmOperands(Inst, N);
2008 }
2009
2010 template<int Scale>
2011 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2012 assert(N == 1 && "Invalid number of operands!");
2013 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2014
2015 if (!MCE) {
2016 Inst.addOperand(MCOperand::createExpr(getImm()));
2017 return;
2018 }
2019 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2020 }
2021
2022 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2023 assert(N == 1 && "Invalid number of operands!");
2024 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2026 }
2027
2028 template <int Scale>
2029 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2030 assert(N == 1 && "Invalid number of operands!");
2031 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2032 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2033 }
2034
2035 template <int Scale>
2036 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2037 assert(N == 1 && "Invalid number of operands!");
2038 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2039 }
2040
2041 template <typename T>
2042 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2043 assert(N == 1 && "Invalid number of operands!");
2044 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2045 std::make_unsigned_t<T> Val = MCE->getValue();
2046 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2047 Inst.addOperand(MCOperand::createImm(encoding));
2048 }
2049
2050 template <typename T>
2051 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2052 assert(N == 1 && "Invalid number of operands!");
2053 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2054 std::make_unsigned_t<T> Val = ~MCE->getValue();
2055 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2056 Inst.addOperand(MCOperand::createImm(encoding));
2057 }
2058
2059 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2060 assert(N == 1 && "Invalid number of operands!");
2061 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2063 Inst.addOperand(MCOperand::createImm(encoding));
2064 }
2065
2066 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2067 // Branch operands don't encode the low bits, so shift them off
2068 // here. If it's a label, however, just put it on directly as there's
2069 // not enough information now to do anything.
2070 assert(N == 1 && "Invalid number of operands!");
2071 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2072 if (!MCE) {
2073 addExpr(Inst, getImm());
2074 return;
2075 }
2076 assert(MCE && "Invalid constant immediate operand!");
2077 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2078 }
2079
2080 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2081 // PC-relative operands don't encode the low bits, so shift them off
2082 // here. If it's a label, however, just put it on directly as there's
2083 // not enough information now to do anything.
2084 assert(N == 1 && "Invalid number of operands!");
2085 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2086 if (!MCE) {
2087 addExpr(Inst, getImm());
2088 return;
2089 }
2090 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2091 }
2092
2093 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2094 // Branch operands don't encode the low bits, so shift them off
2095 // here. If it's a label, however, just put it on directly as there's
2096 // not enough information now to do anything.
2097 assert(N == 1 && "Invalid number of operands!");
2098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2099 if (!MCE) {
2100 addExpr(Inst, getImm());
2101 return;
2102 }
2103 assert(MCE && "Invalid constant immediate operand!");
2104 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2105 }
2106
2107 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2108 // Branch operands don't encode the low bits, so shift them off
2109 // here. If it's a label, however, just put it on directly as there's
2110 // not enough information now to do anything.
2111 assert(N == 1 && "Invalid number of operands!");
2112 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2113 if (!MCE) {
2114 addExpr(Inst, getImm());
2115 return;
2116 }
2117 assert(MCE && "Invalid constant immediate operand!");
2118 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2119 }
2120
2121 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2122 // Branch operands don't encode the low bits, so shift them off
2123 // here. If it's a label, however, just put it on directly as there's
2124 // not enough information now to do anything.
2125 assert(N == 1 && "Invalid number of operands!");
2126 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2127 if (!MCE) {
2128 addExpr(Inst, getImm());
2129 return;
2130 }
2131 assert(MCE && "Invalid constant immediate operand!");
2132 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2133 }
2134
2135 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2136 assert(N == 1 && "Invalid number of operands!");
2138 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2139 }
2140
2141 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2142 assert(N == 1 && "Invalid number of operands!");
2143 Inst.addOperand(MCOperand::createImm(getBarrier()));
2144 }
2145
2146 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2147 assert(N == 1 && "Invalid number of operands!");
2148 Inst.addOperand(MCOperand::createImm(getBarrier()));
2149 }
2150
2151 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2153
2154 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2155 }
2156
2157 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2158 assert(N == 1 && "Invalid number of operands!");
2159
2160 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2161 }
2162
2163 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2164 assert(N == 1 && "Invalid number of operands!");
2165
2166 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2167 }
2168
2169 void addSVCROperands(MCInst &Inst, unsigned N) const {
2170 assert(N == 1 && "Invalid number of operands!");
2171
2172 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2173 }
2174
2175 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2176 assert(N == 1 && "Invalid number of operands!");
2177
2178 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2179 }
2180
2181 void addSysCROperands(MCInst &Inst, unsigned N) const {
2182 assert(N == 1 && "Invalid number of operands!");
2183 Inst.addOperand(MCOperand::createImm(getSysCR()));
2184 }
2185
2186 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2187 assert(N == 1 && "Invalid number of operands!");
2188 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2189 }
2190
2191 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2192 assert(N == 1 && "Invalid number of operands!");
2193 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2194 }
2195
2196 void addPHintOperands(MCInst &Inst, unsigned N) const {
2197 assert(N == 1 && "Invalid number of operands!");
2198 Inst.addOperand(MCOperand::createImm(getPHint()));
2199 }
2200
2201 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2202 assert(N == 1 && "Invalid number of operands!");
2203 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2204 }
2205
2206 void addShifterOperands(MCInst &Inst, unsigned N) const {
2207 assert(N == 1 && "Invalid number of operands!");
2208 unsigned Imm =
2209 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2211 }
2212
2213 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2214 assert(N == 1 && "Invalid number of operands!");
2215 unsigned Imm = getShiftExtendAmount();
2217 }
2218
2219 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2220 assert(N == 1 && "Invalid number of operands!");
2221
2222 if (!isScalarReg())
2223 return;
2224
2225 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2226 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2228 if (Reg != AArch64::XZR)
2229 llvm_unreachable("wrong register");
2230
2231 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2232 }
2233
2234 void addExtendOperands(MCInst &Inst, unsigned N) const {
2235 assert(N == 1 && "Invalid number of operands!");
2236 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2237 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2238 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2240 }
2241
2242 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2243 assert(N == 1 && "Invalid number of operands!");
2244 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2245 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2246 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2248 }
2249
2250 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2251 assert(N == 2 && "Invalid number of operands!");
2252 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2253 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2254 Inst.addOperand(MCOperand::createImm(IsSigned));
2255 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2256 }
2257
2258 // For 8-bit load/store instructions with a register offset, both the
2259 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2260 // they're disambiguated by whether the shift was explicit or implicit rather
2261 // than its size.
2262 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2263 assert(N == 2 && "Invalid number of operands!");
2264 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2265 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2266 Inst.addOperand(MCOperand::createImm(IsSigned));
2267 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2268 }
2269
2270 template<int Shift>
2271 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2272 assert(N == 1 && "Invalid number of operands!");
2273
2274 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2275 if (CE) {
2276 uint64_t Value = CE->getValue();
2277 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2278 } else {
2279 addExpr(Inst, getImm());
2280 }
2281 }
2282
2283 template<int Shift>
2284 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2285 assert(N == 1 && "Invalid number of operands!");
2286
2287 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2288 uint64_t Value = CE->getValue();
2289 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2290 }
2291
2292 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2293 assert(N == 1 && "Invalid number of operands!");
2294 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2295 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2296 }
2297
2298 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2299 assert(N == 1 && "Invalid number of operands!");
2300 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2301 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2302 }
2303
2304 void print(raw_ostream &OS) const override;
2305
2306 static std::unique_ptr<AArch64Operand>
2307 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2308 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2309 Op->Tok.Data = Str.data();
2310 Op->Tok.Length = Str.size();
2311 Op->Tok.IsSuffix = IsSuffix;
2312 Op->StartLoc = S;
2313 Op->EndLoc = S;
2314 return Op;
2315 }
2316
2317 static std::unique_ptr<AArch64Operand>
2318 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2319 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2321 unsigned ShiftAmount = 0,
2322 unsigned HasExplicitAmount = false) {
2323 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2324 Op->Reg.RegNum = RegNum;
2325 Op->Reg.Kind = Kind;
2326 Op->Reg.ElementWidth = 0;
2327 Op->Reg.EqualityTy = EqTy;
2328 Op->Reg.ShiftExtend.Type = ExtTy;
2329 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2330 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2331 Op->StartLoc = S;
2332 Op->EndLoc = E;
2333 return Op;
2334 }
2335
2336 static std::unique_ptr<AArch64Operand>
2337 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2338 SMLoc S, SMLoc E, MCContext &Ctx,
2340 unsigned ShiftAmount = 0,
2341 unsigned HasExplicitAmount = false) {
2342 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2343 Kind == RegKind::SVEPredicateVector ||
2344 Kind == RegKind::SVEPredicateAsCounter) &&
2345 "Invalid vector kind");
2346 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2347 HasExplicitAmount);
2348 Op->Reg.ElementWidth = ElementWidth;
2349 return Op;
2350 }
2351
2352 static std::unique_ptr<AArch64Operand>
2353 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2354 unsigned NumElements, unsigned ElementWidth,
2355 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2356 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2357 Op->VectorList.RegNum = RegNum;
2358 Op->VectorList.Count = Count;
2359 Op->VectorList.Stride = Stride;
2360 Op->VectorList.NumElements = NumElements;
2361 Op->VectorList.ElementWidth = ElementWidth;
2362 Op->VectorList.RegisterKind = RegisterKind;
2363 Op->StartLoc = S;
2364 Op->EndLoc = E;
2365 return Op;
2366 }
2367
2368 static std::unique_ptr<AArch64Operand>
2369 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2370 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2371 Op->VectorIndex.Val = Idx;
2372 Op->StartLoc = S;
2373 Op->EndLoc = E;
2374 return Op;
2375 }
2376
2377 static std::unique_ptr<AArch64Operand>
2378 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2379 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2380 Op->MatrixTileList.RegMask = RegMask;
2381 Op->StartLoc = S;
2382 Op->EndLoc = E;
2383 return Op;
2384 }
2385
2386 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2387 const unsigned ElementWidth) {
2388 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2389 RegMap = {
2390 {{0, AArch64::ZAB0},
2391 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2392 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2393 {{8, AArch64::ZAB0},
2394 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2395 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2396 {{16, AArch64::ZAH0},
2397 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2398 {{16, AArch64::ZAH1},
2399 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2400 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2401 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2402 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2403 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2404 };
2405
2406 if (ElementWidth == 64)
2407 OutRegs.insert(Reg);
2408 else {
2409 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2410 assert(!Regs.empty() && "Invalid tile or element width!");
2411 for (auto OutReg : Regs)
2412 OutRegs.insert(OutReg);
2413 }
2414 }
2415
2416 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2417 SMLoc E, MCContext &Ctx) {
2418 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2419 Op->Imm.Val = Val;
2420 Op->StartLoc = S;
2421 Op->EndLoc = E;
2422 return Op;
2423 }
2424
2425 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2426 unsigned ShiftAmount,
2427 SMLoc S, SMLoc E,
2428 MCContext &Ctx) {
2429 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2430 Op->ShiftedImm .Val = Val;
2431 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2432 Op->StartLoc = S;
2433 Op->EndLoc = E;
2434 return Op;
2435 }
2436
2437 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2438 unsigned Last, SMLoc S,
2439 SMLoc E,
2440 MCContext &Ctx) {
2441 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2442 Op->ImmRange.First = First;
2443 Op->ImmRange.Last = Last;
2444 Op->EndLoc = E;
2445 return Op;
2446 }
2447
2448 static std::unique_ptr<AArch64Operand>
2449 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2450 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2451 Op->CondCode.Code = Code;
2452 Op->StartLoc = S;
2453 Op->EndLoc = E;
2454 return Op;
2455 }
2456
2457 static std::unique_ptr<AArch64Operand>
2458 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2459 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2460 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2461 Op->FPImm.IsExact = IsExact;
2462 Op->StartLoc = S;
2463 Op->EndLoc = S;
2464 return Op;
2465 }
2466
2467 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2468 StringRef Str,
2469 SMLoc S,
2470 MCContext &Ctx,
2471 bool HasnXSModifier) {
2472 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2473 Op->Barrier.Val = Val;
2474 Op->Barrier.Data = Str.data();
2475 Op->Barrier.Length = Str.size();
2476 Op->Barrier.HasnXSModifier = HasnXSModifier;
2477 Op->StartLoc = S;
2478 Op->EndLoc = S;
2479 return Op;
2480 }
2481
2482 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2483 uint32_t MRSReg,
2484 uint32_t MSRReg,
2485 uint32_t PStateField,
2486 MCContext &Ctx) {
2487 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2488 Op->SysReg.Data = Str.data();
2489 Op->SysReg.Length = Str.size();
2490 Op->SysReg.MRSReg = MRSReg;
2491 Op->SysReg.MSRReg = MSRReg;
2492 Op->SysReg.PStateField = PStateField;
2493 Op->StartLoc = S;
2494 Op->EndLoc = S;
2495 return Op;
2496 }
2497
2498 static std::unique_ptr<AArch64Operand>
2499 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2500 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2501 Op->PHint.Val = Val;
2502 Op->PHint.Data = Str.data();
2503 Op->PHint.Length = Str.size();
2504 Op->StartLoc = S;
2505 Op->EndLoc = S;
2506 return Op;
2507 }
2508
2509 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2510 SMLoc E, MCContext &Ctx) {
2511 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2512 Op->SysCRImm.Val = Val;
2513 Op->StartLoc = S;
2514 Op->EndLoc = E;
2515 return Op;
2516 }
2517
2518 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2519 StringRef Str,
2520 SMLoc S,
2521 MCContext &Ctx) {
2522 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2523 Op->Prefetch.Val = Val;
2524 Op->Barrier.Data = Str.data();
2525 Op->Barrier.Length = Str.size();
2526 Op->StartLoc = S;
2527 Op->EndLoc = S;
2528 return Op;
2529 }
2530
2531 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2532 StringRef Str,
2533 SMLoc S,
2534 MCContext &Ctx) {
2535 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2536 Op->PSBHint.Val = Val;
2537 Op->PSBHint.Data = Str.data();
2538 Op->PSBHint.Length = Str.size();
2539 Op->StartLoc = S;
2540 Op->EndLoc = S;
2541 return Op;
2542 }
2543
2544 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2545 StringRef Str,
2546 SMLoc S,
2547 MCContext &Ctx) {
2548 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2549 Op->BTIHint.Val = Val | 32;
2550 Op->BTIHint.Data = Str.data();
2551 Op->BTIHint.Length = Str.size();
2552 Op->StartLoc = S;
2553 Op->EndLoc = S;
2554 return Op;
2555 }
2556
2557 static std::unique_ptr<AArch64Operand>
2558 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2559 SMLoc S, SMLoc E, MCContext &Ctx) {
2560 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2561 Op->MatrixReg.RegNum = RegNum;
2562 Op->MatrixReg.ElementWidth = ElementWidth;
2563 Op->MatrixReg.Kind = Kind;
2564 Op->StartLoc = S;
2565 Op->EndLoc = E;
2566 return Op;
2567 }
2568
2569 static std::unique_ptr<AArch64Operand>
2570 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2571 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2572 Op->SVCR.PStateField = PStateField;
2573 Op->SVCR.Data = Str.data();
2574 Op->SVCR.Length = Str.size();
2575 Op->StartLoc = S;
2576 Op->EndLoc = S;
2577 return Op;
2578 }
2579
2580 static std::unique_ptr<AArch64Operand>
2581 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2582 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2583 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2584 Op->ShiftExtend.Type = ShOp;
2585 Op->ShiftExtend.Amount = Val;
2586 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2587 Op->StartLoc = S;
2588 Op->EndLoc = E;
2589 return Op;
2590 }
2591};
2592
2593} // end anonymous namespace.
2594
2595void AArch64Operand::print(raw_ostream &OS) const {
2596 switch (Kind) {
2597 case k_FPImm:
2598 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2599 if (!getFPImmIsExact())
2600 OS << " (inexact)";
2601 OS << ">";
2602 break;
2603 case k_Barrier: {
2604 StringRef Name = getBarrierName();
2605 if (!Name.empty())
2606 OS << "<barrier " << Name << ">";
2607 else
2608 OS << "<barrier invalid #" << getBarrier() << ">";
2609 break;
2610 }
2611 case k_Immediate:
2612 OS << *getImm();
2613 break;
2614 case k_ShiftedImm: {
2615 unsigned Shift = getShiftedImmShift();
2616 OS << "<shiftedimm ";
2617 OS << *getShiftedImmVal();
2618 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2619 break;
2620 }
2621 case k_ImmRange: {
2622 OS << "<immrange ";
2623 OS << getFirstImmVal();
2624 OS << ":" << getLastImmVal() << ">";
2625 break;
2626 }
2627 case k_CondCode:
2628 OS << "<condcode " << getCondCode() << ">";
2629 break;
2630 case k_VectorList: {
2631 OS << "<vectorlist ";
2632 unsigned Reg = getVectorListStart();
2633 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2634 OS << Reg + i * getVectorListStride() << " ";
2635 OS << ">";
2636 break;
2637 }
2638 case k_VectorIndex:
2639 OS << "<vectorindex " << getVectorIndex() << ">";
2640 break;
2641 case k_SysReg:
2642 OS << "<sysreg: " << getSysReg() << '>';
2643 break;
2644 case k_Token:
2645 OS << "'" << getToken() << "'";
2646 break;
2647 case k_SysCR:
2648 OS << "c" << getSysCR();
2649 break;
2650 case k_Prefetch: {
2651 StringRef Name = getPrefetchName();
2652 if (!Name.empty())
2653 OS << "<prfop " << Name << ">";
2654 else
2655 OS << "<prfop invalid #" << getPrefetch() << ">";
2656 break;
2657 }
2658 case k_PSBHint:
2659 OS << getPSBHintName();
2660 break;
2661 case k_PHint:
2662 OS << getPHintName();
2663 break;
2664 case k_BTIHint:
2665 OS << getBTIHintName();
2666 break;
2667 case k_MatrixRegister:
2668 OS << "<matrix " << getMatrixReg() << ">";
2669 break;
2670 case k_MatrixTileList: {
2671 OS << "<matrixlist ";
2672 unsigned RegMask = getMatrixTileListRegMask();
2673 unsigned MaxBits = 8;
2674 for (unsigned I = MaxBits; I > 0; --I)
2675 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2676 OS << '>';
2677 break;
2678 }
2679 case k_SVCR: {
2680 OS << getSVCR();
2681 break;
2682 }
2683 case k_Register:
2684 OS << "<register " << getReg() << ">";
2685 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2686 break;
2687 [[fallthrough]];
2688 case k_ShiftExtend:
2689 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2690 << getShiftExtendAmount();
2691 if (!hasShiftExtendAmount())
2692 OS << "<imp>";
2693 OS << '>';
2694 break;
2695 }
2696}
2697
2698/// @name Auto-generated Match Functions
2699/// {
2700
2702
2703/// }
2704
2706 return StringSwitch<unsigned>(Name.lower())
2707 .Case("v0", AArch64::Q0)
2708 .Case("v1", AArch64::Q1)
2709 .Case("v2", AArch64::Q2)
2710 .Case("v3", AArch64::Q3)
2711 .Case("v4", AArch64::Q4)
2712 .Case("v5", AArch64::Q5)
2713 .Case("v6", AArch64::Q6)
2714 .Case("v7", AArch64::Q7)
2715 .Case("v8", AArch64::Q8)
2716 .Case("v9", AArch64::Q9)
2717 .Case("v10", AArch64::Q10)
2718 .Case("v11", AArch64::Q11)
2719 .Case("v12", AArch64::Q12)
2720 .Case("v13", AArch64::Q13)
2721 .Case("v14", AArch64::Q14)
2722 .Case("v15", AArch64::Q15)
2723 .Case("v16", AArch64::Q16)
2724 .Case("v17", AArch64::Q17)
2725 .Case("v18", AArch64::Q18)
2726 .Case("v19", AArch64::Q19)
2727 .Case("v20", AArch64::Q20)
2728 .Case("v21", AArch64::Q21)
2729 .Case("v22", AArch64::Q22)
2730 .Case("v23", AArch64::Q23)
2731 .Case("v24", AArch64::Q24)
2732 .Case("v25", AArch64::Q25)
2733 .Case("v26", AArch64::Q26)
2734 .Case("v27", AArch64::Q27)
2735 .Case("v28", AArch64::Q28)
2736 .Case("v29", AArch64::Q29)
2737 .Case("v30", AArch64::Q30)
2738 .Case("v31", AArch64::Q31)
2739 .Default(0);
2740}
2741
2742/// Returns an optional pair of (#elements, element-width) if Suffix
2743/// is a valid vector kind. Where the number of elements in a vector
2744/// or the vector width is implicit or explicitly unknown (but still a
2745/// valid suffix kind), 0 is used.
2746static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2747 RegKind VectorKind) {
2748 std::pair<int, int> Res = {-1, -1};
2749
2750 switch (VectorKind) {
2751 case RegKind::NeonVector:
2753 .Case("", {0, 0})
2754 .Case(".1d", {1, 64})
2755 .Case(".1q", {1, 128})
2756 // '.2h' needed for fp16 scalar pairwise reductions
2757 .Case(".2h", {2, 16})
2758 .Case(".2b", {2, 8})
2759 .Case(".2s", {2, 32})
2760 .Case(".2d", {2, 64})
2761 // '.4b' is another special case for the ARMv8.2a dot product
2762 // operand
2763 .Case(".4b", {4, 8})
2764 .Case(".4h", {4, 16})
2765 .Case(".4s", {4, 32})
2766 .Case(".8b", {8, 8})
2767 .Case(".8h", {8, 16})
2768 .Case(".16b", {16, 8})
2769 // Accept the width neutral ones, too, for verbose syntax. If
2770 // those aren't used in the right places, the token operand won't
2771 // match so all will work out.
2772 .Case(".b", {0, 8})
2773 .Case(".h", {0, 16})
2774 .Case(".s", {0, 32})
2775 .Case(".d", {0, 64})
2776 .Default({-1, -1});
2777 break;
2778 case RegKind::SVEPredicateAsCounter:
2779 case RegKind::SVEPredicateVector:
2780 case RegKind::SVEDataVector:
2781 case RegKind::Matrix:
2783 .Case("", {0, 0})
2784 .Case(".b", {0, 8})
2785 .Case(".h", {0, 16})
2786 .Case(".s", {0, 32})
2787 .Case(".d", {0, 64})
2788 .Case(".q", {0, 128})
2789 .Default({-1, -1});
2790 break;
2791 default:
2792 llvm_unreachable("Unsupported RegKind");
2793 }
2794
2795 if (Res == std::make_pair(-1, -1))
2796 return std::nullopt;
2797
2798 return std::optional<std::pair<int, int>>(Res);
2799}
2800
2801static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2802 return parseVectorKind(Suffix, VectorKind).has_value();
2803}
2804
2806 return StringSwitch<unsigned>(Name.lower())
2807 .Case("z0", AArch64::Z0)
2808 .Case("z1", AArch64::Z1)
2809 .Case("z2", AArch64::Z2)
2810 .Case("z3", AArch64::Z3)
2811 .Case("z4", AArch64::Z4)
2812 .Case("z5", AArch64::Z5)
2813 .Case("z6", AArch64::Z6)
2814 .Case("z7", AArch64::Z7)
2815 .Case("z8", AArch64::Z8)
2816 .Case("z9", AArch64::Z9)
2817 .Case("z10", AArch64::Z10)
2818 .Case("z11", AArch64::Z11)
2819 .Case("z12", AArch64::Z12)
2820 .Case("z13", AArch64::Z13)
2821 .Case("z14", AArch64::Z14)
2822 .Case("z15", AArch64::Z15)
2823 .Case("z16", AArch64::Z16)
2824 .Case("z17", AArch64::Z17)
2825 .Case("z18", AArch64::Z18)
2826 .Case("z19", AArch64::Z19)
2827 .Case("z20", AArch64::Z20)
2828 .Case("z21", AArch64::Z21)
2829 .Case("z22", AArch64::Z22)
2830 .Case("z23", AArch64::Z23)
2831 .Case("z24", AArch64::Z24)
2832 .Case("z25", AArch64::Z25)
2833 .Case("z26", AArch64::Z26)
2834 .Case("z27", AArch64::Z27)
2835 .Case("z28", AArch64::Z28)
2836 .Case("z29", AArch64::Z29)
2837 .Case("z30", AArch64::Z30)
2838 .Case("z31", AArch64::Z31)
2839 .Default(0);
2840}
2841
2843 return StringSwitch<unsigned>(Name.lower())
2844 .Case("p0", AArch64::P0)
2845 .Case("p1", AArch64::P1)
2846 .Case("p2", AArch64::P2)
2847 .Case("p3", AArch64::P3)
2848 .Case("p4", AArch64::P4)
2849 .Case("p5", AArch64::P5)
2850 .Case("p6", AArch64::P6)
2851 .Case("p7", AArch64::P7)
2852 .Case("p8", AArch64::P8)
2853 .Case("p9", AArch64::P9)
2854 .Case("p10", AArch64::P10)
2855 .Case("p11", AArch64::P11)
2856 .Case("p12", AArch64::P12)
2857 .Case("p13", AArch64::P13)
2858 .Case("p14", AArch64::P14)
2859 .Case("p15", AArch64::P15)
2860 .Default(0);
2861}
2862
2864 return StringSwitch<unsigned>(Name.lower())
2865 .Case("pn0", AArch64::PN0)
2866 .Case("pn1", AArch64::PN1)
2867 .Case("pn2", AArch64::PN2)
2868 .Case("pn3", AArch64::PN3)
2869 .Case("pn4", AArch64::PN4)
2870 .Case("pn5", AArch64::PN5)
2871 .Case("pn6", AArch64::PN6)
2872 .Case("pn7", AArch64::PN7)
2873 .Case("pn8", AArch64::PN8)
2874 .Case("pn9", AArch64::PN9)
2875 .Case("pn10", AArch64::PN10)
2876 .Case("pn11", AArch64::PN11)
2877 .Case("pn12", AArch64::PN12)
2878 .Case("pn13", AArch64::PN13)
2879 .Case("pn14", AArch64::PN14)
2880 .Case("pn15", AArch64::PN15)
2881 .Default(0);
2882}
2883
2885 return StringSwitch<unsigned>(Name.lower())
2886 .Case("za0.d", AArch64::ZAD0)
2887 .Case("za1.d", AArch64::ZAD1)
2888 .Case("za2.d", AArch64::ZAD2)
2889 .Case("za3.d", AArch64::ZAD3)
2890 .Case("za4.d", AArch64::ZAD4)
2891 .Case("za5.d", AArch64::ZAD5)
2892 .Case("za6.d", AArch64::ZAD6)
2893 .Case("za7.d", AArch64::ZAD7)
2894 .Case("za0.s", AArch64::ZAS0)
2895 .Case("za1.s", AArch64::ZAS1)
2896 .Case("za2.s", AArch64::ZAS2)
2897 .Case("za3.s", AArch64::ZAS3)
2898 .Case("za0.h", AArch64::ZAH0)
2899 .Case("za1.h", AArch64::ZAH1)
2900 .Case("za0.b", AArch64::ZAB0)
2901 .Default(0);
2902}
2903
2905 return StringSwitch<unsigned>(Name.lower())
2906 .Case("za", AArch64::ZA)
2907 .Case("za0.q", AArch64::ZAQ0)
2908 .Case("za1.q", AArch64::ZAQ1)
2909 .Case("za2.q", AArch64::ZAQ2)
2910 .Case("za3.q", AArch64::ZAQ3)
2911 .Case("za4.q", AArch64::ZAQ4)
2912 .Case("za5.q", AArch64::ZAQ5)
2913 .Case("za6.q", AArch64::ZAQ6)
2914 .Case("za7.q", AArch64::ZAQ7)
2915 .Case("za8.q", AArch64::ZAQ8)
2916 .Case("za9.q", AArch64::ZAQ9)
2917 .Case("za10.q", AArch64::ZAQ10)
2918 .Case("za11.q", AArch64::ZAQ11)
2919 .Case("za12.q", AArch64::ZAQ12)
2920 .Case("za13.q", AArch64::ZAQ13)
2921 .Case("za14.q", AArch64::ZAQ14)
2922 .Case("za15.q", AArch64::ZAQ15)
2923 .Case("za0.d", AArch64::ZAD0)
2924 .Case("za1.d", AArch64::ZAD1)
2925 .Case("za2.d", AArch64::ZAD2)
2926 .Case("za3.d", AArch64::ZAD3)
2927 .Case("za4.d", AArch64::ZAD4)
2928 .Case("za5.d", AArch64::ZAD5)
2929 .Case("za6.d", AArch64::ZAD6)
2930 .Case("za7.d", AArch64::ZAD7)
2931 .Case("za0.s", AArch64::ZAS0)
2932 .Case("za1.s", AArch64::ZAS1)
2933 .Case("za2.s", AArch64::ZAS2)
2934 .Case("za3.s", AArch64::ZAS3)
2935 .Case("za0.h", AArch64::ZAH0)
2936 .Case("za1.h", AArch64::ZAH1)
2937 .Case("za0.b", AArch64::ZAB0)
2938 .Case("za0h.q", AArch64::ZAQ0)
2939 .Case("za1h.q", AArch64::ZAQ1)
2940 .Case("za2h.q", AArch64::ZAQ2)
2941 .Case("za3h.q", AArch64::ZAQ3)
2942 .Case("za4h.q", AArch64::ZAQ4)
2943 .Case("za5h.q", AArch64::ZAQ5)
2944 .Case("za6h.q", AArch64::ZAQ6)
2945 .Case("za7h.q", AArch64::ZAQ7)
2946 .Case("za8h.q", AArch64::ZAQ8)
2947 .Case("za9h.q", AArch64::ZAQ9)
2948 .Case("za10h.q", AArch64::ZAQ10)
2949 .Case("za11h.q", AArch64::ZAQ11)
2950 .Case("za12h.q", AArch64::ZAQ12)
2951 .Case("za13h.q", AArch64::ZAQ13)
2952 .Case("za14h.q", AArch64::ZAQ14)
2953 .Case("za15h.q", AArch64::ZAQ15)
2954 .Case("za0h.d", AArch64::ZAD0)
2955 .Case("za1h.d", AArch64::ZAD1)
2956 .Case("za2h.d", AArch64::ZAD2)
2957 .Case("za3h.d", AArch64::ZAD3)
2958 .Case("za4h.d", AArch64::ZAD4)
2959 .Case("za5h.d", AArch64::ZAD5)
2960 .Case("za6h.d", AArch64::ZAD6)
2961 .Case("za7h.d", AArch64::ZAD7)
2962 .Case("za0h.s", AArch64::ZAS0)
2963 .Case("za1h.s", AArch64::ZAS1)
2964 .Case("za2h.s", AArch64::ZAS2)
2965 .Case("za3h.s", AArch64::ZAS3)
2966 .Case("za0h.h", AArch64::ZAH0)
2967 .Case("za1h.h", AArch64::ZAH1)
2968 .Case("za0h.b", AArch64::ZAB0)
2969 .Case("za0v.q", AArch64::ZAQ0)
2970 .Case("za1v.q", AArch64::ZAQ1)
2971 .Case("za2v.q", AArch64::ZAQ2)
2972 .Case("za3v.q", AArch64::ZAQ3)
2973 .Case("za4v.q", AArch64::ZAQ4)
2974 .Case("za5v.q", AArch64::ZAQ5)
2975 .Case("za6v.q", AArch64::ZAQ6)
2976 .Case("za7v.q", AArch64::ZAQ7)
2977 .Case("za8v.q", AArch64::ZAQ8)
2978 .Case("za9v.q", AArch64::ZAQ9)
2979 .Case("za10v.q", AArch64::ZAQ10)
2980 .Case("za11v.q", AArch64::ZAQ11)
2981 .Case("za12v.q", AArch64::ZAQ12)
2982 .Case("za13v.q", AArch64::ZAQ13)
2983 .Case("za14v.q", AArch64::ZAQ14)
2984 .Case("za15v.q", AArch64::ZAQ15)
2985 .Case("za0v.d", AArch64::ZAD0)
2986 .Case("za1v.d", AArch64::ZAD1)
2987 .Case("za2v.d", AArch64::ZAD2)
2988 .Case("za3v.d", AArch64::ZAD3)
2989 .Case("za4v.d", AArch64::ZAD4)
2990 .Case("za5v.d", AArch64::ZAD5)
2991 .Case("za6v.d", AArch64::ZAD6)
2992 .Case("za7v.d", AArch64::ZAD7)
2993 .Case("za0v.s", AArch64::ZAS0)
2994 .Case("za1v.s", AArch64::ZAS1)
2995 .Case("za2v.s", AArch64::ZAS2)
2996 .Case("za3v.s", AArch64::ZAS3)
2997 .Case("za0v.h", AArch64::ZAH0)
2998 .Case("za1v.h", AArch64::ZAH1)
2999 .Case("za0v.b", AArch64::ZAB0)
3000 .Default(0);
3001}
3002
3003bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
3004 SMLoc &EndLoc) {
3005 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
3006}
3007
3008ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3009 SMLoc &EndLoc) {
3010 StartLoc = getLoc();
3011 ParseStatus Res = tryParseScalarRegister(Reg);
3012 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3013 return Res;
3014}
3015
3016// Matches a register name or register alias previously defined by '.req'
3017unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3018 RegKind Kind) {
3019 unsigned RegNum = 0;
3020 if ((RegNum = matchSVEDataVectorRegName(Name)))
3021 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3022
3023 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3024 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3025
3027 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3028
3029 if ((RegNum = MatchNeonVectorRegName(Name)))
3030 return Kind == RegKind::NeonVector ? RegNum : 0;
3031
3032 if ((RegNum = matchMatrixRegName(Name)))
3033 return Kind == RegKind::Matrix ? RegNum : 0;
3034
3035 if (Name.equals_insensitive("zt0"))
3036 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3037
3038 // The parsed register must be of RegKind Scalar
3039 if ((RegNum = MatchRegisterName(Name)))
3040 return (Kind == RegKind::Scalar) ? RegNum : 0;
3041
3042 if (!RegNum) {
3043 // Handle a few common aliases of registers.
3044 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3045 .Case("fp", AArch64::FP)
3046 .Case("lr", AArch64::LR)
3047 .Case("x31", AArch64::XZR)
3048 .Case("w31", AArch64::WZR)
3049 .Default(0))
3050 return Kind == RegKind::Scalar ? RegNum : 0;
3051
3052 // Check for aliases registered via .req. Canonicalize to lower case.
3053 // That's more consistent since register names are case insensitive, and
3054 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3055 auto Entry = RegisterReqs.find(Name.lower());
3056 if (Entry == RegisterReqs.end())
3057 return 0;
3058
3059 // set RegNum if the match is the right kind of register
3060 if (Kind == Entry->getValue().first)
3061 RegNum = Entry->getValue().second;
3062 }
3063 return RegNum;
3064}
3065
3066unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3067 switch (K) {
3068 case RegKind::Scalar:
3069 case RegKind::NeonVector:
3070 case RegKind::SVEDataVector:
3071 return 32;
3072 case RegKind::Matrix:
3073 case RegKind::SVEPredicateVector:
3074 case RegKind::SVEPredicateAsCounter:
3075 return 16;
3076 case RegKind::LookupTable:
3077 return 1;
3078 }
3079 llvm_unreachable("Unsupported RegKind");
3080}
3081
3082/// tryParseScalarRegister - Try to parse a register name. The token must be an
3083/// Identifier when called, and if it is a register name the token is eaten and
3084/// the register is added to the operand list.
3085ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3086 const AsmToken &Tok = getTok();
3087 if (Tok.isNot(AsmToken::Identifier))
3088 return ParseStatus::NoMatch;
3089
3090 std::string lowerCase = Tok.getString().lower();
3091 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3092 if (Reg == 0)
3093 return ParseStatus::NoMatch;
3094
3095 RegNum = Reg;
3096 Lex(); // Eat identifier token.
3097 return ParseStatus::Success;
3098}
3099
3100/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3101ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3102 SMLoc S = getLoc();
3103
3104 if (getTok().isNot(AsmToken::Identifier))
3105 return Error(S, "Expected cN operand where 0 <= N <= 15");
3106
3107 StringRef Tok = getTok().getIdentifier();
3108 if (Tok[0] != 'c' && Tok[0] != 'C')
3109 return Error(S, "Expected cN operand where 0 <= N <= 15");
3110
3111 uint32_t CRNum;
3112 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3113 if (BadNum || CRNum > 15)
3114 return Error(S, "Expected cN operand where 0 <= N <= 15");
3115
3116 Lex(); // Eat identifier token.
3117 Operands.push_back(
3118 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3119 return ParseStatus::Success;
3120}
3121
3122// Either an identifier for named values or a 6-bit immediate.
3123ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3124 SMLoc S = getLoc();
3125 const AsmToken &Tok = getTok();
3126
3127 unsigned MaxVal = 63;
3128
3129 // Immediate case, with optional leading hash:
3130 if (parseOptionalToken(AsmToken::Hash) ||
3131 Tok.is(AsmToken::Integer)) {
3132 const MCExpr *ImmVal;
3133 if (getParser().parseExpression(ImmVal))
3134 return ParseStatus::Failure;
3135
3136 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3137 if (!MCE)
3138 return TokError("immediate value expected for prefetch operand");
3139 unsigned prfop = MCE->getValue();
3140 if (prfop > MaxVal)
3141 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3142 "] expected");
3143
3144 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3145 Operands.push_back(AArch64Operand::CreatePrefetch(
3146 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3147 return ParseStatus::Success;
3148 }
3149
3150 if (Tok.isNot(AsmToken::Identifier))
3151 return TokError("prefetch hint expected");
3152
3153 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3154 if (!RPRFM)
3155 return TokError("prefetch hint expected");
3156
3157 Operands.push_back(AArch64Operand::CreatePrefetch(
3158 RPRFM->Encoding, Tok.getString(), S, getContext()));
3159 Lex(); // Eat identifier token.
3160 return ParseStatus::Success;
3161}
3162
3163/// tryParsePrefetch - Try to parse a prefetch operand.
3164template <bool IsSVEPrefetch>
3165ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3166 SMLoc S = getLoc();
3167 const AsmToken &Tok = getTok();
3168
3169 auto LookupByName = [](StringRef N) {
3170 if (IsSVEPrefetch) {
3171 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3172 return std::optional<unsigned>(Res->Encoding);
3173 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3174 return std::optional<unsigned>(Res->Encoding);
3175 return std::optional<unsigned>();
3176 };
3177
3178 auto LookupByEncoding = [](unsigned E) {
3179 if (IsSVEPrefetch) {
3180 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3181 return std::optional<StringRef>(Res->Name);
3182 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3183 return std::optional<StringRef>(Res->Name);
3184 return std::optional<StringRef>();
3185 };
3186 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3187
3188 // Either an identifier for named values or a 5-bit immediate.
3189 // Eat optional hash.
3190 if (parseOptionalToken(AsmToken::Hash) ||
3191 Tok.is(AsmToken::Integer)) {
3192 const MCExpr *ImmVal;
3193 if (getParser().parseExpression(ImmVal))
3194 return ParseStatus::Failure;
3195
3196 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3197 if (!MCE)
3198 return TokError("immediate value expected for prefetch operand");
3199 unsigned prfop = MCE->getValue();
3200 if (prfop > MaxVal)
3201 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3202 "] expected");
3203
3204 auto PRFM = LookupByEncoding(MCE->getValue());
3205 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3206 S, getContext()));
3207 return ParseStatus::Success;
3208 }
3209
3210 if (Tok.isNot(AsmToken::Identifier))
3211 return TokError("prefetch hint expected");
3212
3213 auto PRFM = LookupByName(Tok.getString());
3214 if (!PRFM)
3215 return TokError("prefetch hint expected");
3216
3217 Operands.push_back(AArch64Operand::CreatePrefetch(
3218 *PRFM, Tok.getString(), S, getContext()));
3219 Lex(); // Eat identifier token.
3220 return ParseStatus::Success;
3221}
3222
3223/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3224ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3225 SMLoc S = getLoc();
3226 const AsmToken &Tok = getTok();
3227 if (Tok.isNot(AsmToken::Identifier))
3228 return TokError("invalid operand for instruction");
3229
3230 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3231 if (!PSB)
3232 return TokError("invalid operand for instruction");
3233
3234 Operands.push_back(AArch64Operand::CreatePSBHint(
3235 PSB->Encoding, Tok.getString(), S, getContext()));
3236 Lex(); // Eat identifier token.
3237 return ParseStatus::Success;
3238}
3239
3240ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3241 SMLoc StartLoc = getLoc();
3242
3243 MCRegister RegNum;
3244
3245 // The case where xzr, xzr is not present is handled by an InstAlias.
3246
3247 auto RegTok = getTok(); // in case we need to backtrack
3248 if (!tryParseScalarRegister(RegNum).isSuccess())
3249 return ParseStatus::NoMatch;
3250
3251 if (RegNum != AArch64::XZR) {
3252 getLexer().UnLex(RegTok);
3253 return ParseStatus::NoMatch;
3254 }
3255
3256 if (parseComma())
3257 return ParseStatus::Failure;
3258
3259 if (!tryParseScalarRegister(RegNum).isSuccess())
3260 return TokError("expected register operand");
3261
3262 if (RegNum != AArch64::XZR)
3263 return TokError("xzr must be followed by xzr");
3264
3265 // We need to push something, since we claim this is an operand in .td.
3266 // See also AArch64AsmParser::parseKeywordOperand.
3267 Operands.push_back(AArch64Operand::CreateReg(
3268 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3269
3270 return ParseStatus::Success;
3271}
3272
3273/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3274ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3275 SMLoc S = getLoc();
3276 const AsmToken &Tok = getTok();
3277 if (Tok.isNot(AsmToken::Identifier))
3278 return TokError("invalid operand for instruction");
3279
3280 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3281 if (!BTI)
3282 return TokError("invalid operand for instruction");
3283
3284 Operands.push_back(AArch64Operand::CreateBTIHint(
3285 BTI->Encoding, Tok.getString(), S, getContext()));
3286 Lex(); // Eat identifier token.
3287 return ParseStatus::Success;
3288}
3289
3290/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3291/// instruction.
3292ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3293 SMLoc S = getLoc();
3294 const MCExpr *Expr = nullptr;
3295
3296 if (getTok().is(AsmToken::Hash)) {
3297 Lex(); // Eat hash token.
3298 }
3299
3300 if (parseSymbolicImmVal(Expr))
3301 return ParseStatus::Failure;
3302
3303 AArch64MCExpr::VariantKind ELFRefKind;
3304 MCSymbolRefExpr::VariantKind DarwinRefKind;
3305 int64_t Addend;
3306 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3307 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3308 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3309 // No modifier was specified at all; this is the syntax for an ELF basic
3310 // ADRP relocation (unfortunately).
3311 Expr =
3313 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3314 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3315 Addend != 0) {
3316 return Error(S, "gotpage label reference not allowed an addend");
3317 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3318 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3319 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3320 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3321 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3322 ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE &&
3323 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3324 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3325 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE &&
3327 // The operand must be an @page or @gotpage qualified symbolref.
3328 return Error(S, "page or gotpage label reference expected");
3329 }
3330 }
3331
3332 // We have either a label reference possibly with addend or an immediate. The
3333 // addend is a raw value here. The linker will adjust it to only reference the
3334 // page.
3335 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3336 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3337
3338 return ParseStatus::Success;
3339}
3340
3341/// tryParseAdrLabel - Parse and validate a source label for the ADR
3342/// instruction.
3343ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3344 SMLoc S = getLoc();
3345 const MCExpr *Expr = nullptr;
3346
3347 // Leave anything with a bracket to the default for SVE
3348 if (getTok().is(AsmToken::LBrac))
3349 return ParseStatus::NoMatch;
3350
3351 if (getTok().is(AsmToken::Hash))
3352 Lex(); // Eat hash token.
3353
3354 if (parseSymbolicImmVal(Expr))
3355 return ParseStatus::Failure;
3356
3357 AArch64MCExpr::VariantKind ELFRefKind;
3358 MCSymbolRefExpr::VariantKind DarwinRefKind;
3359 int64_t Addend;
3360 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3361 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3362 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3363 // No modifier was specified at all; this is the syntax for an ELF basic
3364 // ADR relocation (unfortunately).
3365 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3366 } else if (ELFRefKind != AArch64MCExpr::VK_GOT_AUTH_PAGE) {
3367 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3368 // adr. It's not actually GOT entry page address but the GOT address
3369 // itself - we just share the same variant kind with :got_auth: operator
3370 // applied for adrp.
3371 // TODO: can we somehow get current TargetMachine object to call
3372 // getCodeModel() on it to ensure we are using tiny code model?
3373 return Error(S, "unexpected adr label");
3374 }
3375 }
3376
3377 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3378 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3379 return ParseStatus::Success;
3380}
3381
3382/// tryParseFPImm - A floating point immediate expression operand.
3383template <bool AddFPZeroAsLiteral>
3384ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3385 SMLoc S = getLoc();
3386
3387 bool Hash = parseOptionalToken(AsmToken::Hash);
3388
3389 // Handle negation, as that still comes through as a separate token.
3390 bool isNegative = parseOptionalToken(AsmToken::Minus);
3391
3392 const AsmToken &Tok = getTok();
3393 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3394 if (!Hash)
3395 return ParseStatus::NoMatch;
3396 return TokError("invalid floating point immediate");
3397 }
3398
3399 // Parse hexadecimal representation.
3400 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3401 if (Tok.getIntVal() > 255 || isNegative)
3402 return TokError("encoded floating point value out of range");
3403
3405 Operands.push_back(
3406 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3407 } else {
3408 // Parse FP representation.
3409 APFloat RealVal(APFloat::IEEEdouble());
3410 auto StatusOrErr =
3411 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3412 if (errorToBool(StatusOrErr.takeError()))
3413 return TokError("invalid floating point representation");
3414
3415 if (isNegative)
3416 RealVal.changeSign();
3417
3418 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3419 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3420 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3421 } else
3422 Operands.push_back(AArch64Operand::CreateFPImm(
3423 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3424 }
3425
3426 Lex(); // Eat the token.
3427
3428 return ParseStatus::Success;
3429}
3430
3431/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3432/// a shift suffix, for example '#1, lsl #12'.
3434AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3435 SMLoc S = getLoc();
3436
3437 if (getTok().is(AsmToken::Hash))
3438 Lex(); // Eat '#'
3439 else if (getTok().isNot(AsmToken::Integer))
3440 // Operand should start from # or should be integer, emit error otherwise.
3441 return ParseStatus::NoMatch;
3442
3443 if (getTok().is(AsmToken::Integer) &&
3444 getLexer().peekTok().is(AsmToken::Colon))
3445 return tryParseImmRange(Operands);
3446
3447 const MCExpr *Imm = nullptr;
3448 if (parseSymbolicImmVal(Imm))
3449 return ParseStatus::Failure;
3450 else if (getTok().isNot(AsmToken::Comma)) {
3451 Operands.push_back(
3452 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3453 return ParseStatus::Success;
3454 }
3455
3456 // Eat ','
3457 Lex();
3458 StringRef VecGroup;
3459 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3460 Operands.push_back(
3461 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3462 Operands.push_back(
3463 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3464 return ParseStatus::Success;
3465 }
3466
3467 // The optional operand must be "lsl #N" where N is non-negative.
3468 if (!getTok().is(AsmToken::Identifier) ||
3469 !getTok().getIdentifier().equals_insensitive("lsl"))
3470 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3471
3472 // Eat 'lsl'
3473 Lex();
3474
3475 parseOptionalToken(AsmToken::Hash);
3476
3477 if (getTok().isNot(AsmToken::Integer))
3478 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3479
3480 int64_t ShiftAmount = getTok().getIntVal();
3481
3482 if (ShiftAmount < 0)
3483 return Error(getLoc(), "positive shift amount required");
3484 Lex(); // Eat the number
3485
3486 // Just in case the optional lsl #0 is used for immediates other than zero.
3487 if (ShiftAmount == 0 && Imm != nullptr) {
3488 Operands.push_back(
3489 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3490 return ParseStatus::Success;
3491 }
3492
3493 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3494 getLoc(), getContext()));
3495 return ParseStatus::Success;
3496}
3497
3498/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3499/// suggestion to help common typos.
3501AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3503 .Case("eq", AArch64CC::EQ)
3504 .Case("ne", AArch64CC::NE)
3505 .Case("cs", AArch64CC::HS)
3506 .Case("hs", AArch64CC::HS)
3507 .Case("cc", AArch64CC::LO)
3508 .Case("lo", AArch64CC::LO)
3509 .Case("mi", AArch64CC::MI)
3510 .Case("pl", AArch64CC::PL)
3511 .Case("vs", AArch64CC::VS)
3512 .Case("vc", AArch64CC::VC)
3513 .Case("hi", AArch64CC::HI)
3514 .Case("ls", AArch64CC::LS)
3515 .Case("ge", AArch64CC::GE)
3516 .Case("lt", AArch64CC::LT)
3517 .Case("gt", AArch64CC::GT)
3518 .Case("le", AArch64CC::LE)
3519 .Case("al", AArch64CC::AL)
3520 .Case("nv", AArch64CC::NV)
3522
3523 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3525 .Case("none", AArch64CC::EQ)
3526 .Case("any", AArch64CC::NE)
3527 .Case("nlast", AArch64CC::HS)
3528 .Case("last", AArch64CC::LO)
3529 .Case("first", AArch64CC::MI)
3530 .Case("nfrst", AArch64CC::PL)
3531 .Case("pmore", AArch64CC::HI)
3532 .Case("plast", AArch64CC::LS)
3533 .Case("tcont", AArch64CC::GE)
3534 .Case("tstop", AArch64CC::LT)
3536
3537 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3538 Suggestion = "nfrst";
3539 }
3540 return CC;
3541}
3542
3543/// parseCondCode - Parse a Condition Code operand.
3544bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3545 bool invertCondCode) {
3546 SMLoc S = getLoc();
3547 const AsmToken &Tok = getTok();
3548 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3549
3550 StringRef Cond = Tok.getString();
3551 std::string Suggestion;
3552 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3553 if (CC == AArch64CC::Invalid) {
3554 std::string Msg = "invalid condition code";
3555 if (!Suggestion.empty())
3556 Msg += ", did you mean " + Suggestion + "?";
3557 return TokError(Msg);
3558 }
3559 Lex(); // Eat identifier token.
3560
3561 if (invertCondCode) {
3562 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3563 return TokError("condition codes AL and NV are invalid for this instruction");
3565 }
3566
3567 Operands.push_back(
3568 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3569 return false;
3570}
3571
3572ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3573 const AsmToken &Tok = getTok();
3574 SMLoc S = getLoc();
3575
3576 if (Tok.isNot(AsmToken::Identifier))
3577 return TokError("invalid operand for instruction");
3578
3579 unsigned PStateImm = -1;
3580 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3581 if (!SVCR)
3582 return ParseStatus::NoMatch;
3583 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3584 PStateImm = SVCR->Encoding;
3585
3586 Operands.push_back(
3587 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3588 Lex(); // Eat identifier token.
3589 return ParseStatus::Success;
3590}
3591
3592ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3593 const AsmToken &Tok = getTok();
3594 SMLoc S = getLoc();
3595
3596 StringRef Name = Tok.getString();
3597
3598 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3599 Lex(); // eat "za[.(b|h|s|d)]"
3600 unsigned ElementWidth = 0;
3601 auto DotPosition = Name.find('.');
3602 if (DotPosition != StringRef::npos) {
3603 const auto &KindRes =
3604 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3605 if (!KindRes)
3606 return TokError(
3607 "Expected the register to be followed by element width suffix");
3608 ElementWidth = KindRes->second;
3609 }
3610 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3611 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3612 getContext()));
3613 if (getLexer().is(AsmToken::LBrac)) {
3614 // There's no comma after matrix operand, so we can parse the next operand
3615 // immediately.
3616 if (parseOperand(Operands, false, false))
3617 return ParseStatus::NoMatch;
3618 }
3619 return ParseStatus::Success;
3620 }
3621
3622 // Try to parse matrix register.
3623 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3624 if (!Reg)
3625 return ParseStatus::NoMatch;
3626
3627 size_t DotPosition = Name.find('.');
3628 assert(DotPosition != StringRef::npos && "Unexpected register");
3629
3630 StringRef Head = Name.take_front(DotPosition);
3631 StringRef Tail = Name.drop_front(DotPosition);
3632 StringRef RowOrColumn = Head.take_back();
3633
3634 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3635 .Case("h", MatrixKind::Row)
3636 .Case("v", MatrixKind::Col)
3637 .Default(MatrixKind::Tile);
3638
3639 // Next up, parsing the suffix
3640 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3641 if (!KindRes)
3642 return TokError(
3643 "Expected the register to be followed by element width suffix");
3644 unsigned ElementWidth = KindRes->second;
3645
3646 Lex();
3647
3648 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3649 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3650
3651 if (getLexer().is(AsmToken::LBrac)) {
3652 // There's no comma after matrix operand, so we can parse the next operand
3653 // immediately.
3654 if (parseOperand(Operands, false, false))
3655 return ParseStatus::NoMatch;
3656 }
3657 return ParseStatus::Success;
3658}
3659
3660/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3661/// them if present.
3663AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3664 const AsmToken &Tok = getTok();
3665 std::string LowerID = Tok.getString().lower();
3668 .Case("lsl", AArch64_AM::LSL)
3669 .Case("lsr", AArch64_AM::LSR)
3670 .Case("asr", AArch64_AM::ASR)
3671 .Case("ror", AArch64_AM::ROR)
3672 .Case("msl", AArch64_AM::MSL)
3673 .Case("uxtb", AArch64_AM::UXTB)
3674 .Case("uxth", AArch64_AM::UXTH)
3675 .Case("uxtw", AArch64_AM::UXTW)
3676 .Case("uxtx", AArch64_AM::UXTX)
3677 .Case("sxtb", AArch64_AM::SXTB)
3678 .Case("sxth", AArch64_AM::SXTH)
3679 .Case("sxtw", AArch64_AM::SXTW)
3680 .Case("sxtx", AArch64_AM::SXTX)
3682
3684 return ParseStatus::NoMatch;
3685
3686 SMLoc S = Tok.getLoc();
3687 Lex();
3688
3689 bool Hash = parseOptionalToken(AsmToken::Hash);
3690
3691 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3692 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3693 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3694 ShOp == AArch64_AM::MSL) {
3695 // We expect a number here.
3696 return TokError("expected #imm after shift specifier");
3697 }
3698
3699 // "extend" type operations don't need an immediate, #0 is implicit.
3700 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3701 Operands.push_back(
3702 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3703 return ParseStatus::Success;
3704 }
3705
3706 // Make sure we do actually have a number, identifier or a parenthesized
3707 // expression.
3708 SMLoc E = getLoc();
3709 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3710 !getTok().is(AsmToken::Identifier))
3711 return Error(E, "expected integer shift amount");
3712
3713 const MCExpr *ImmVal;
3714 if (getParser().parseExpression(ImmVal))
3715 return ParseStatus::Failure;
3716
3717 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3718 if (!MCE)
3719 return Error(E, "expected constant '#imm' after shift specifier");
3720
3721 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3722 Operands.push_back(AArch64Operand::CreateShiftExtend(
3723 ShOp, MCE->getValue(), true, S, E, getContext()));
3724 return ParseStatus::Success;
3725}
3726
3727static const struct Extension {
3728 const char *Name;
3730} ExtensionMap[] = {
3731 {"crc", {AArch64::FeatureCRC}},
3732 {"sm4", {AArch64::FeatureSM4}},
3733 {"sha3", {AArch64::FeatureSHA3}},
3734 {"sha2", {AArch64::FeatureSHA2}},
3735 {"aes", {AArch64::FeatureAES}},
3736 {"crypto", {AArch64::FeatureCrypto}},
3737 {"fp", {AArch64::FeatureFPARMv8}},
3738 {"simd", {AArch64::FeatureNEON}},
3739 {"ras", {AArch64::FeatureRAS}},
3740 {"rasv2", {AArch64::FeatureRASv2}},
3741 {"lse", {AArch64::FeatureLSE}},
3742 {"predres", {AArch64::FeaturePredRes}},
3743 {"predres2", {AArch64::FeatureSPECRES2}},
3744 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3745 {"mte", {AArch64::FeatureMTE}},
3746 {"memtag", {AArch64::FeatureMTE}},
3747 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3748 {"pan", {AArch64::FeaturePAN}},
3749 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3750 {"ccpp", {AArch64::FeatureCCPP}},
3751 {"rcpc", {AArch64::FeatureRCPC}},
3752 {"rng", {AArch64::FeatureRandGen}},
3753 {"sve", {AArch64::FeatureSVE}},
3754 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3755 {"sve2", {AArch64::FeatureSVE2}},
3756 {"sve-aes", {AArch64::FeatureSVEAES}},
3757 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3758 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3759 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3760 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3761 {"sve2-bitperm",
3762 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3763 AArch64::FeatureSVE2}},
3764 {"sve2p1", {AArch64::FeatureSVE2p1}},
3765 {"ls64", {AArch64::FeatureLS64}},
3766 {"xs", {AArch64::FeatureXS}},
3767 {"pauth", {AArch64::FeaturePAuth}},
3768 {"flagm", {AArch64::FeatureFlagM}},
3769 {"rme", {AArch64::FeatureRME}},
3770 {"sme", {AArch64::FeatureSME}},
3771 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3772 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3773 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3774 {"sme2", {AArch64::FeatureSME2}},
3775 {"sme2p1", {AArch64::FeatureSME2p1}},
3776 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3777 {"hbc", {AArch64::FeatureHBC}},
3778 {"mops", {AArch64::FeatureMOPS}},
3779 {"mec", {AArch64::FeatureMEC}},
3780 {"the", {AArch64::FeatureTHE}},
3781 {"d128", {AArch64::FeatureD128}},
3782 {"lse128", {AArch64::FeatureLSE128}},
3783 {"ite", {AArch64::FeatureITE}},
3784 {"cssc", {AArch64::FeatureCSSC}},
3785 {"rcpc3", {AArch64::FeatureRCPC3}},
3786 {"gcs", {AArch64::FeatureGCS}},
3787 {"bf16", {AArch64::FeatureBF16}},
3788 {"compnum", {AArch64::FeatureComplxNum}},
3789 {"dotprod", {AArch64::FeatureDotProd}},
3790 {"f32mm", {AArch64::FeatureMatMulFP32}},
3791 {"f64mm", {AArch64::FeatureMatMulFP64}},
3792 {"fp16", {AArch64::FeatureFullFP16}},
3793 {"fp16fml", {AArch64::FeatureFP16FML}},
3794 {"i8mm", {AArch64::FeatureMatMulInt8}},
3795 {"lor", {AArch64::FeatureLOR}},
3796 {"profile", {AArch64::FeatureSPE}},
3797 // "rdma" is the name documented by binutils for the feature, but
3798 // binutils also accepts incomplete prefixes of features, so "rdm"
3799 // works too. Support both spellings here.
3800 {"rdm", {AArch64::FeatureRDM}},
3801 {"rdma", {AArch64::FeatureRDM}},
3802 {"sb", {AArch64::FeatureSB}},
3803 {"ssbs", {AArch64::FeatureSSBS}},
3804 {"tme", {AArch64::FeatureTME}},
3805 {"fp8", {AArch64::FeatureFP8}},
3806 {"faminmax", {AArch64::FeatureFAMINMAX}},
3807 {"fp8fma", {AArch64::FeatureFP8FMA}},
3808 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3809 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3810 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3811 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3812 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3813 {"lut", {AArch64::FeatureLUT}},
3814 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3815 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3816 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3817 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3818 {"cpa", {AArch64::FeatureCPA}},
3819 {"tlbiw", {AArch64::FeatureTLBIW}},
3820 {"pops", {AArch64::FeaturePoPS}},
3821 {"cmpbr", {AArch64::FeatureCMPBR}},
3822 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3823 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3824 {"fprcvt", {AArch64::FeatureFPRCVT}},
3825 {"lsfe", {AArch64::FeatureLSFE}},
3826 {"sme2p2", {AArch64::FeatureSME2p2}},
3827 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3828 {"sve2p2", {AArch64::FeatureSVE2p2}},
3829 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3830 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3831 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3832 {"lsui", {AArch64::FeatureLSUI}},
3833 {"occmo", {AArch64::FeatureOCCMO}},
3834 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3835 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3836 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3837 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3839
3840static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3841 if (FBS[AArch64::HasV8_0aOps])
3842 Str += "ARMv8a";
3843 if (FBS[AArch64::HasV8_1aOps])
3844 Str += "ARMv8.1a";
3845 else if (FBS[AArch64::HasV8_2aOps])
3846 Str += "ARMv8.2a";
3847 else if (FBS[AArch64::HasV8_3aOps])
3848 Str += "ARMv8.3a";
3849 else if (FBS[AArch64::HasV8_4aOps])
3850 Str += "ARMv8.4a";
3851 else if (FBS[AArch64::HasV8_5aOps])
3852 Str += "ARMv8.5a";
3853 else if (FBS[AArch64::HasV8_6aOps])
3854 Str += "ARMv8.6a";
3855 else if (FBS[AArch64::HasV8_7aOps])
3856 Str += "ARMv8.7a";
3857 else if (FBS[AArch64::HasV8_8aOps])
3858 Str += "ARMv8.8a";
3859 else if (FBS[AArch64::HasV8_9aOps])
3860 Str += "ARMv8.9a";
3861 else if (FBS[AArch64::HasV9_0aOps])
3862 Str += "ARMv9-a";
3863 else if (FBS[AArch64::HasV9_1aOps])
3864 Str += "ARMv9.1a";
3865 else if (FBS[AArch64::HasV9_2aOps])
3866 Str += "ARMv9.2a";
3867 else if (FBS[AArch64::HasV9_3aOps])
3868 Str += "ARMv9.3a";
3869 else if (FBS[AArch64::HasV9_4aOps])
3870 Str += "ARMv9.4a";
3871 else if (FBS[AArch64::HasV9_5aOps])
3872 Str += "ARMv9.5a";
3873 else if (FBS[AArch64::HasV9_6aOps])
3874 Str += "ARMv9.6a";
3875 else if (FBS[AArch64::HasV8_0rOps])
3876 Str += "ARMv8r";
3877 else {
3878 SmallVector<std::string, 2> ExtMatches;
3879 for (const auto& Ext : ExtensionMap) {
3880 // Use & in case multiple features are enabled
3881 if ((FBS & Ext.Features) != FeatureBitset())
3882 ExtMatches.push_back(Ext.Name);
3883 }
3884 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3885 }
3886}
3887
3888void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3889 SMLoc S) {
3890 const uint16_t Op2 = Encoding & 7;
3891 const uint16_t Cm = (Encoding & 0x78) >> 3;
3892 const uint16_t Cn = (Encoding & 0x780) >> 7;
3893 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3894
3895 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3896
3897 Operands.push_back(
3898 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3899 Operands.push_back(
3900 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3901 Operands.push_back(
3902 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3903 Expr = MCConstantExpr::create(Op2, getContext());
3904 Operands.push_back(
3905 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3906}
3907
3908/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3909/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3910bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3912 if (Name.contains('.'))
3913 return TokError("invalid operand");
3914
3915 Mnemonic = Name;
3916 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3917
3918 const AsmToken &Tok = getTok();
3919 StringRef Op = Tok.getString();
3920 SMLoc S = Tok.getLoc();
3921
3922 if (Mnemonic == "ic") {
3923 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3924 if (!IC)
3925 return TokError("invalid operand for IC instruction");
3926 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3927 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3929 return TokError(Str);
3930 }
3931 createSysAlias(IC->Encoding, Operands, S);
3932 } else if (Mnemonic == "dc") {
3933 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3934 if (!DC)
3935 return TokError("invalid operand for DC instruction");
3936 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3937 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3939 return TokError(Str);
3940 }
3941 createSysAlias(DC->Encoding, Operands, S);
3942 } else if (Mnemonic == "at") {
3943 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3944 if (!AT)
3945 return TokError("invalid operand for AT instruction");
3946 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3947 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3949 return TokError(Str);
3950 }
3951 createSysAlias(AT->Encoding, Operands, S);
3952 } else if (Mnemonic == "tlbi") {
3953 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3954 if (!TLBI)
3955 return TokError("invalid operand for TLBI instruction");
3956 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3957 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3959 return TokError(Str);
3960 }
3961 createSysAlias(TLBI->Encoding, Operands, S);
3962 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3963
3964 if (Op.lower() != "rctx")
3965 return TokError("invalid operand for prediction restriction instruction");
3966
3967 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3968 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3969 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3970
3971 if (Mnemonic == "cosp" && !hasSpecres2)
3972 return TokError("COSP requires: predres2");
3973 if (!hasPredres)
3974 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3975
3976 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3977 : Mnemonic == "dvp" ? 0b101
3978 : Mnemonic == "cosp" ? 0b110
3979 : Mnemonic == "cpp" ? 0b111
3980 : 0;
3981 assert(PRCTX_Op2 &&
3982 "Invalid mnemonic for prediction restriction instruction");
3983 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3984 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3985
3986 createSysAlias(Encoding, Operands, S);
3987 }
3988
3989 Lex(); // Eat operand.
3990
3991 bool ExpectRegister = !Op.contains_insensitive("all");
3992 bool HasRegister = false;
3993
3994 // Check for the optional register operand.
3995 if (parseOptionalToken(AsmToken::Comma)) {
3996 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3997 return TokError("expected register operand");
3998 HasRegister = true;
3999 }
4000
4001 if (ExpectRegister && !HasRegister)
4002 return TokError("specified " + Mnemonic + " op requires a register");
4003 else if (!ExpectRegister && HasRegister)
4004 return TokError("specified " + Mnemonic + " op does not use a register");
4005
4006 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4007 return true;
4008
4009 return false;
4010}
4011
4012/// parseSyspAlias - The TLBIP instructions are simple aliases for
4013/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4014bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4016 if (Name.contains('.'))
4017 return TokError("invalid operand");
4018
4019 Mnemonic = Name;
4020 Operands.push_back(
4021 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4022
4023 const AsmToken &Tok = getTok();
4024 StringRef Op = Tok.getString();
4025 SMLoc S = Tok.getLoc();
4026
4027 if (Mnemonic == "tlbip") {
4028 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4029 if (HasnXSQualifier) {
4030 Op = Op.drop_back(3);
4031 }
4032 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4033 if (!TLBIorig)
4034 return TokError("invalid operand for TLBIP instruction");
4035 const AArch64TLBI::TLBI TLBI(
4036 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4037 TLBIorig->NeedsReg,
4038 HasnXSQualifier
4039 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4040 : TLBIorig->FeaturesRequired);
4041 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4042 std::string Name =
4043 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4044 std::string Str("TLBIP " + Name + " requires: ");
4046 return TokError(Str);
4047 }
4048 createSysAlias(TLBI.Encoding, Operands, S);
4049 }
4050
4051 Lex(); // Eat operand.
4052
4053 if (parseComma())
4054 return true;
4055
4056 if (Tok.isNot(AsmToken::Identifier))
4057 return TokError("expected register identifier");
4058 auto Result = tryParseSyspXzrPair(Operands);
4059 if (Result.isNoMatch())
4060 Result = tryParseGPRSeqPair(Operands);
4061 if (!Result.isSuccess())
4062 return TokError("specified " + Mnemonic +
4063 " op requires a pair of registers");
4064
4065 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4066 return true;
4067
4068 return false;
4069}
4070
4071ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4072 MCAsmParser &Parser = getParser();
4073 const AsmToken &Tok = getTok();
4074
4075 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4076 return TokError("'csync' operand expected");
4077 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4078 // Immediate operand.
4079 const MCExpr *ImmVal;
4080 SMLoc ExprLoc = getLoc();
4081 AsmToken IntTok = Tok;
4082 if (getParser().parseExpression(ImmVal))
4083 return ParseStatus::Failure;
4084 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4085 if (!MCE)
4086 return Error(ExprLoc, "immediate value expected for barrier operand");
4087 int64_t Value = MCE->getValue();
4088 if (Mnemonic == "dsb" && Value > 15) {
4089 // This case is a no match here, but it might be matched by the nXS
4090 // variant. Deliberately not unlex the optional '#' as it is not necessary
4091 // to characterize an integer immediate.
4092 Parser.getLexer().UnLex(IntTok);
4093 return ParseStatus::NoMatch;
4094 }
4095 if (Value < 0 || Value > 15)
4096 return Error(ExprLoc, "barrier operand out of range");
4097 auto DB = AArch64DB::lookupDBByEncoding(Value);
4098 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4099 ExprLoc, getContext(),
4100 false /*hasnXSModifier*/));
4101 return ParseStatus::Success;
4102 }
4103
4104 if (Tok.isNot(AsmToken::Identifier))
4105 return TokError("invalid operand for instruction");
4106
4107 StringRef Operand = Tok.getString();
4108 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4109 auto DB = AArch64DB::lookupDBByName(Operand);
4110 // The only valid named option for ISB is 'sy'
4111 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4112 return TokError("'sy' or #imm operand expected");
4113 // The only valid named option for TSB is 'csync'
4114 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4115 return TokError("'csync' operand expected");
4116 if (!DB && !TSB) {
4117 if (Mnemonic == "dsb") {
4118 // This case is a no match here, but it might be matched by the nXS
4119 // variant.
4120 return ParseStatus::NoMatch;
4121 }
4122 return TokError("invalid barrier option name");
4123 }
4124
4125 Operands.push_back(AArch64Operand::CreateBarrier(
4126 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4127 getContext(), false /*hasnXSModifier*/));
4128 Lex(); // Consume the option
4129
4130 return ParseStatus::Success;
4131}
4132
4134AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4135 const AsmToken &Tok = getTok();
4136
4137 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4138 if (Mnemonic != "dsb")
4139 return ParseStatus::Failure;
4140
4141 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4142 // Immediate operand.
4143 const MCExpr *ImmVal;
4144 SMLoc ExprLoc = getLoc();
4145 if (getParser().parseExpression(ImmVal))
4146 return ParseStatus::Failure;
4147 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4148 if (!MCE)
4149 return Error(ExprLoc, "immediate value expected for barrier operand");
4150 int64_t Value = MCE->getValue();
4151 // v8.7-A DSB in the nXS variant accepts only the following immediate
4152 // values: 16, 20, 24, 28.
4153 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4154 return Error(ExprLoc, "barrier operand out of range");
4155 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4156 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4157 ExprLoc, getContext(),
4158 true /*hasnXSModifier*/));
4159 return ParseStatus::Success;
4160 }
4161
4162 if (Tok.isNot(AsmToken::Identifier))
4163 return TokError("invalid operand for instruction");
4164
4165 StringRef Operand = Tok.getString();
4166 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4167
4168 if (!DB)
4169 return TokError("invalid barrier option name");
4170
4171 Operands.push_back(
4172 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4173 getContext(), true /*hasnXSModifier*/));
4174 Lex(); // Consume the option
4175
4176 return ParseStatus::Success;
4177}
4178
4179ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4180 const AsmToken &Tok = getTok();
4181
4182 if (Tok.isNot(AsmToken::Identifier))
4183 return ParseStatus::NoMatch;
4184
4185 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4186 return ParseStatus::NoMatch;
4187
4188 int MRSReg, MSRReg;
4189 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4190 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4191 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4192 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4193 } else
4194 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4195
4196 unsigned PStateImm = -1;
4197 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4198 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4199 PStateImm = PState15->Encoding;
4200 if (!PState15) {
4201 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4202 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4203 PStateImm = PState1->Encoding;
4204 }
4205
4206 Operands.push_back(
4207 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4208 PStateImm, getContext()));
4209 Lex(); // Eat identifier
4210
4211 return ParseStatus::Success;
4212}
4213
4215AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4216 SMLoc S = getLoc();
4217 const AsmToken &Tok = getTok();
4218 if (Tok.isNot(AsmToken::Identifier))
4219 return TokError("invalid operand for instruction");
4220
4222 if (!PH)
4223 return TokError("invalid operand for instruction");
4224
4225 Operands.push_back(AArch64Operand::CreatePHintInst(
4226 PH->Encoding, Tok.getString(), S, getContext()));
4227 Lex(); // Eat identifier token.
4228 return ParseStatus::Success;
4229}
4230
4231/// tryParseNeonVectorRegister - Parse a vector register operand.
4232bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4233 if (getTok().isNot(AsmToken::Identifier))
4234 return true;
4235
4236 SMLoc S = getLoc();
4237 // Check for a vector register specifier first.
4240 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4241 if (!Res.isSuccess())
4242 return true;
4243
4244 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4245 if (!KindRes)
4246 return true;
4247
4248 unsigned ElementWidth = KindRes->second;
4249 Operands.push_back(
4250 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4251 S, getLoc(), getContext()));
4252
4253 // If there was an explicit qualifier, that goes on as a literal text
4254 // operand.
4255 if (!Kind.empty())
4256 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4257
4258 return tryParseVectorIndex(Operands).isFailure();
4259}
4260
4261ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4262 SMLoc SIdx = getLoc();
4263 if (parseOptionalToken(AsmToken::LBrac)) {
4264 const MCExpr *ImmVal;
4265 if (getParser().parseExpression(ImmVal))
4266 return ParseStatus::NoMatch;
4267 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4268 if (!MCE)
4269 return TokError("immediate value expected for vector index");
4270
4271 SMLoc E = getLoc();
4272
4273 if (parseToken(AsmToken::RBrac, "']' expected"))
4274 return ParseStatus::Failure;
4275
4276 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4277 E, getContext()));
4278 return ParseStatus::Success;
4279 }
4280
4281 return ParseStatus::NoMatch;
4282}
4283
4284// tryParseVectorRegister - Try to parse a vector register name with
4285// optional kind specifier. If it is a register specifier, eat the token
4286// and return it.
4287ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4288 StringRef &Kind,
4289 RegKind MatchKind) {
4290 const AsmToken &Tok = getTok();
4291
4292 if (Tok.isNot(AsmToken::Identifier))
4293 return ParseStatus::NoMatch;
4294
4295 StringRef Name = Tok.getString();
4296 // If there is a kind specifier, it's separated from the register name by
4297 // a '.'.
4298 size_t Start = 0, Next = Name.find('.');
4299 StringRef Head = Name.slice(Start, Next);
4300 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4301
4302 if (RegNum) {
4303 if (Next != StringRef::npos) {
4304 Kind = Name.substr(Next);
4305 if (!isValidVectorKind(Kind, MatchKind))
4306 return TokError("invalid vector kind qualifier");
4307 }
4308 Lex(); // Eat the register token.
4309
4310 Reg = RegNum;
4311 return ParseStatus::Success;
4312 }
4313
4314 return ParseStatus::NoMatch;
4315}
4316
4317ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4320 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4321 if (!Status.isSuccess())
4322 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4323 return Status;
4324}
4325
4326/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4327template <RegKind RK>
4329AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4330 // Check for a SVE predicate register specifier first.
4331 const SMLoc S = getLoc();
4333 MCRegister RegNum;
4334 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4335 if (!Res.isSuccess())
4336 return Res;
4337
4338 const auto &KindRes = parseVectorKind(Kind, RK);
4339 if (!KindRes)
4340 return ParseStatus::NoMatch;
4341
4342 unsigned ElementWidth = KindRes->second;
4343 Operands.push_back(AArch64Operand::CreateVectorReg(
4344 RegNum, RK, ElementWidth, S,
4345 getLoc(), getContext()));
4346
4347 if (getLexer().is(AsmToken::LBrac)) {
4348 if (RK == RegKind::SVEPredicateAsCounter) {
4349 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4350 if (ResIndex.isSuccess())
4351 return ParseStatus::Success;
4352 } else {
4353 // Indexed predicate, there's no comma so try parse the next operand
4354 // immediately.
4355 if (parseOperand(Operands, false, false))
4356 return ParseStatus::NoMatch;
4357 }
4358 }
4359
4360 // Not all predicates are followed by a '/m' or '/z'.
4361 if (getTok().isNot(AsmToken::Slash))
4362 return ParseStatus::Success;
4363
4364 // But when they do they shouldn't have an element type suffix.
4365 if (!Kind.empty())
4366 return Error(S, "not expecting size suffix");
4367
4368 // Add a literal slash as operand
4369 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4370
4371 Lex(); // Eat the slash.
4372
4373 // Zeroing or merging?
4374 auto Pred = getTok().getString().lower();
4375 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4376 return Error(getLoc(), "expecting 'z' predication");
4377
4378 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4379 return Error(getLoc(), "expecting 'm' or 'z' predication");
4380
4381 // Add zero/merge token.
4382 const char *ZM = Pred == "z" ? "z" : "m";
4383 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4384
4385 Lex(); // Eat zero/merge token.
4386 return ParseStatus::Success;
4387}
4388
4389/// parseRegister - Parse a register operand.
4390bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4391 // Try for a Neon vector register.
4392 if (!tryParseNeonVectorRegister(Operands))
4393 return false;
4394
4395 if (tryParseZTOperand(Operands).isSuccess())
4396 return false;
4397
4398 // Otherwise try for a scalar register.
4399 if (tryParseGPROperand<false>(Operands).isSuccess())
4400 return false;
4401
4402 return true;
4403}
4404
4405bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4406 bool HasELFModifier = false;
4408
4409 if (parseOptionalToken(AsmToken::Colon)) {
4410 HasELFModifier = true;
4411
4412 if (getTok().isNot(AsmToken::Identifier))
4413 return TokError("expect relocation specifier in operand after ':'");
4414
4415 std::string LowerCase = getTok().getIdentifier().lower();
4416 RefKind =
4419 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4420 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4421 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4422 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4423 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4424 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4425 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4426 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4427 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4428 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4429 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4430 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4431 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4432 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4433 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4434 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4435 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4436 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4437 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4438 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4439 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4440 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4441 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4442 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4443 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4444 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4445 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4446 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4447 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4448 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4449 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4450 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4451 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4452 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4453 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4454 .Case("tlsdesc_auth_lo12", AArch64MCExpr::VK_TLSDESC_AUTH_LO12)
4456 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4457 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4459 .Case("got_auth_lo12", AArch64MCExpr::VK_GOT_AUTH_LO12)
4461 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4462 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4463 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4466 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4467 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4469
4470 if (RefKind == AArch64MCExpr::VK_INVALID)
4471 return TokError("expect relocation specifier in operand after ':'");
4472
4473 Lex(); // Eat identifier
4474
4475 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4476 return true;
4477 }
4478
4479 if (getParser().parseExpression(ImmVal))
4480 return true;
4481
4482 if (HasELFModifier)
4483 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4484
4485 return false;
4486}
4487
4488ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4489 if (getTok().isNot(AsmToken::LCurly))
4490 return ParseStatus::NoMatch;
4491
4492 auto ParseMatrixTile = [this](unsigned &Reg,
4493 unsigned &ElementWidth) -> ParseStatus {
4494 StringRef Name = getTok().getString();
4495 size_t DotPosition = Name.find('.');
4496 if (DotPosition == StringRef::npos)
4497 return ParseStatus::NoMatch;
4498
4499 unsigned RegNum = matchMatrixTileListRegName(Name);
4500 if (!RegNum)
4501 return ParseStatus::NoMatch;
4502
4503 StringRef Tail = Name.drop_front(DotPosition);
4504 const std::optional<std::pair<int, int>> &KindRes =
4505 parseVectorKind(Tail, RegKind::Matrix);
4506 if (!KindRes)
4507 return TokError(
4508 "Expected the register to be followed by element width suffix");
4509 ElementWidth = KindRes->second;
4510 Reg = RegNum;
4511 Lex(); // Eat the register.
4512 return ParseStatus::Success;
4513 };
4514
4515 SMLoc S = getLoc();
4516 auto LCurly = getTok();
4517 Lex(); // Eat left bracket token.
4518
4519 // Empty matrix list
4520 if (parseOptionalToken(AsmToken::RCurly)) {
4521 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4522 /*RegMask=*/0, S, getLoc(), getContext()));
4523 return ParseStatus::Success;
4524 }
4525
4526 // Try parse {za} alias early
4527 if (getTok().getString().equals_insensitive("za")) {
4528 Lex(); // Eat 'za'
4529
4530 if (parseToken(AsmToken::RCurly, "'}' expected"))
4531 return ParseStatus::Failure;
4532
4533 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4534 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4535 return ParseStatus::Success;
4536 }
4537
4538 SMLoc TileLoc = getLoc();
4539
4540 unsigned FirstReg, ElementWidth;
4541 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4542 if (!ParseRes.isSuccess()) {
4543 getLexer().UnLex(LCurly);
4544 return ParseRes;
4545 }
4546
4547 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4548
4549 unsigned PrevReg = FirstReg;
4550
4552 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4553
4554 SmallSet<unsigned, 8> SeenRegs;
4555 SeenRegs.insert(FirstReg);
4556
4557 while (parseOptionalToken(AsmToken::Comma)) {
4558 TileLoc = getLoc();
4559 unsigned Reg, NextElementWidth;
4560 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4561 if (!ParseRes.isSuccess())
4562 return ParseRes;
4563
4564 // Element size must match on all regs in the list.
4565 if (ElementWidth != NextElementWidth)
4566 return Error(TileLoc, "mismatched register size suffix");
4567
4568 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4569 Warning(TileLoc, "tile list not in ascending order");
4570
4571 if (SeenRegs.contains(Reg))
4572 Warning(TileLoc, "duplicate tile in list");
4573 else {
4574 SeenRegs.insert(Reg);
4575 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4576 }
4577
4578 PrevReg = Reg;
4579 }
4580
4581 if (parseToken(AsmToken::RCurly, "'}' expected"))
4582 return ParseStatus::Failure;
4583
4584 unsigned RegMask = 0;
4585 for (auto Reg : DRegs)
4586 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4587 RI->getEncodingValue(AArch64::ZAD0));
4588 Operands.push_back(
4589 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4590
4591 return ParseStatus::Success;
4592}
4593
4594template <RegKind VectorKind>
4595ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4596 bool ExpectMatch) {
4597 MCAsmParser &Parser = getParser();
4598 if (!getTok().is(AsmToken::LCurly))
4599 return ParseStatus::NoMatch;
4600
4601 // Wrapper around parse function
4602 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4603 bool NoMatchIsError) -> ParseStatus {
4604 auto RegTok = getTok();
4605 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4606 if (ParseRes.isSuccess()) {
4607 if (parseVectorKind(Kind, VectorKind))
4608 return ParseRes;
4609 llvm_unreachable("Expected a valid vector kind");
4610 }
4611
4612 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4613 RegTok.getString().equals_insensitive("zt0"))
4614 return ParseStatus::NoMatch;
4615
4616 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4617 (ParseRes.isNoMatch() && NoMatchIsError &&
4618 !RegTok.getString().starts_with_insensitive("za")))
4619 return Error(Loc, "vector register expected");
4620
4621 return ParseStatus::NoMatch;
4622 };
4623
4624 int NumRegs = getNumRegsForRegKind(VectorKind);
4625 SMLoc S = getLoc();
4626 auto LCurly = getTok();
4627 Lex(); // Eat left bracket token.
4628
4630 MCRegister FirstReg;
4631 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4632
4633 // Put back the original left bracket if there was no match, so that
4634 // different types of list-operands can be matched (e.g. SVE, Neon).
4635 if (ParseRes.isNoMatch())
4636 Parser.getLexer().UnLex(LCurly);
4637
4638 if (!ParseRes.isSuccess())
4639 return ParseRes;
4640
4641 int64_t PrevReg = FirstReg;
4642 unsigned Count = 1;
4643
4644 int Stride = 1;
4645 if (parseOptionalToken(AsmToken::Minus)) {
4646 SMLoc Loc = getLoc();
4647 StringRef NextKind;
4648
4650 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4651 if (!ParseRes.isSuccess())
4652 return ParseRes;
4653
4654 // Any Kind suffices must match on all regs in the list.
4655 if (Kind != NextKind)
4656 return Error(Loc, "mismatched register size suffix");
4657
4658 unsigned Space =
4659 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4660
4661 if (Space == 0 || Space > 3)
4662 return Error(Loc, "invalid number of vectors");
4663
4664 Count += Space;
4665 }
4666 else {
4667 bool HasCalculatedStride = false;
4668 while (parseOptionalToken(AsmToken::Comma)) {
4669 SMLoc Loc = getLoc();
4670 StringRef NextKind;
4672 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4673 if (!ParseRes.isSuccess())
4674 return ParseRes;
4675
4676 // Any Kind suffices must match on all regs in the list.
4677 if (Kind != NextKind)
4678 return Error(Loc, "mismatched register size suffix");
4679
4680 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4681 unsigned PrevRegVal =
4682 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4683 if (!HasCalculatedStride) {
4684 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4685 : (RegVal + NumRegs - PrevRegVal);
4686 HasCalculatedStride = true;
4687 }
4688
4689 // Register must be incremental (with a wraparound at last register).
4690 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4691 return Error(Loc, "registers must have the same sequential stride");
4692
4693 PrevReg = Reg;
4694 ++Count;
4695 }
4696 }
4697
4698 if (parseToken(AsmToken::RCurly, "'}' expected"))
4699 return ParseStatus::Failure;
4700
4701 if (Count > 4)
4702 return Error(S, "invalid number of vectors");
4703
4704 unsigned NumElements = 0;
4705 unsigned ElementWidth = 0;
4706 if (!Kind.empty()) {
4707 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4708 std::tie(NumElements, ElementWidth) = *VK;
4709 }
4710
4711 Operands.push_back(AArch64Operand::CreateVectorList(
4712 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4713 getLoc(), getContext()));
4714
4715 return ParseStatus::Success;
4716}
4717
4718/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4719bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4720 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4721 if (!ParseRes.isSuccess())
4722 return true;
4723
4724 return tryParseVectorIndex(Operands).isFailure();
4725}
4726
4727ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4728 SMLoc StartLoc = getLoc();
4729
4730 MCRegister RegNum;
4731 ParseStatus Res = tryParseScalarRegister(RegNum);
4732 if (!Res.isSuccess())
4733 return Res;
4734
4735 if (!parseOptionalToken(AsmToken::Comma)) {
4736 Operands.push_back(AArch64Operand::CreateReg(
4737 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4738 return ParseStatus::Success;
4739 }
4740
4741 parseOptionalToken(AsmToken::Hash);
4742
4743 if (getTok().isNot(AsmToken::Integer))
4744 return Error(getLoc(), "index must be absent or #0");
4745
4746 const MCExpr *ImmVal;
4747 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4748 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4749 return Error(getLoc(), "index must be absent or #0");
4750
4751 Operands.push_back(AArch64Operand::CreateReg(
4752 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4753 return ParseStatus::Success;
4754}
4755
4756ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4757 SMLoc StartLoc = getLoc();
4758 const AsmToken &Tok = getTok();
4759 std::string Name = Tok.getString().lower();
4760
4761 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4762
4763 if (RegNum == 0)
4764 return ParseStatus::NoMatch;
4765
4766 Operands.push_back(AArch64Operand::CreateReg(
4767 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4768 Lex(); // Eat register.
4769
4770 // Check if register is followed by an index
4771 if (parseOptionalToken(AsmToken::LBrac)) {
4772 Operands.push_back(
4773 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4774 const MCExpr *ImmVal;
4775 if (getParser().parseExpression(ImmVal))
4776 return ParseStatus::NoMatch;
4777 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4778 if (!MCE)
4779 return TokError("immediate value expected for vector index");
4780 Operands.push_back(AArch64Operand::CreateImm(
4781 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4782 getLoc(), getContext()));
4783 if (parseOptionalToken(AsmToken::Comma))
4784 if (parseOptionalMulOperand(Operands))
4785 return ParseStatus::Failure;
4786 if (parseToken(AsmToken::RBrac, "']' expected"))
4787 return ParseStatus::Failure;
4788 Operands.push_back(
4789 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4790 }
4791 return ParseStatus::Success;
4792}
4793
4794template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4795ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4796 SMLoc StartLoc = getLoc();
4797
4798 MCRegister RegNum;
4799 ParseStatus Res = tryParseScalarRegister(RegNum);
4800 if (!Res.isSuccess())
4801 return Res;
4802
4803 // No shift/extend is the default.
4804 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4805 Operands.push_back(AArch64Operand::CreateReg(
4806 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4807 return ParseStatus::Success;
4808 }
4809
4810 // Eat the comma
4811 Lex();
4812
4813 // Match the shift
4815 Res = tryParseOptionalShiftExtend(ExtOpnd);
4816 if (!Res.isSuccess())
4817 return Res;
4818
4819 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4820 Operands.push_back(AArch64Operand::CreateReg(
4821 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4822 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4823 Ext->hasShiftExtendAmount()));
4824
4825 return ParseStatus::Success;
4826}
4827
4828bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4829 MCAsmParser &Parser = getParser();
4830
4831 // Some SVE instructions have a decoration after the immediate, i.e.
4832 // "mul vl". We parse them here and add tokens, which must be present in the
4833 // asm string in the tablegen instruction.
4834 bool NextIsVL =
4835 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4836 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4837 if (!getTok().getString().equals_insensitive("mul") ||
4838 !(NextIsVL || NextIsHash))
4839 return true;
4840
4841 Operands.push_back(
4842 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4843 Lex(); // Eat the "mul"
4844
4845 if (NextIsVL) {
4846 Operands.push_back(
4847 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4848 Lex(); // Eat the "vl"
4849 return false;
4850 }
4851
4852 if (NextIsHash) {
4853 Lex(); // Eat the #
4854 SMLoc S = getLoc();
4855
4856 // Parse immediate operand.
4857 const MCExpr *ImmVal;
4858 if (!Parser.parseExpression(ImmVal))
4859 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4860 Operands.push_back(AArch64Operand::CreateImm(
4861 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4862 getContext()));
4863 return false;
4864 }
4865 }
4866
4867 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4868}
4869
4870bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4871 StringRef &VecGroup) {
4872 MCAsmParser &Parser = getParser();
4873 auto Tok = Parser.getTok();
4874 if (Tok.isNot(AsmToken::Identifier))
4875 return true;
4876
4878 .Case("vgx2", "vgx2")
4879 .Case("vgx4", "vgx4")
4880 .Default("");
4881
4882 if (VG.empty())
4883 return true;
4884
4885 VecGroup = VG;
4886 Parser.Lex(); // Eat vgx[2|4]
4887 return false;
4888}
4889
4890bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4891 auto Tok = getTok();
4892 if (Tok.isNot(AsmToken::Identifier))
4893 return true;
4894
4895 auto Keyword = Tok.getString();
4897 .Case("sm", "sm")
4898 .Case("za", "za")
4899 .Default(Keyword);
4900 Operands.push_back(
4901 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4902
4903 Lex();
4904 return false;
4905}
4906
4907/// parseOperand - Parse a arm instruction operand. For now this parses the
4908/// operand regardless of the mnemonic.
4909bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4910 bool invertCondCode) {
4911 MCAsmParser &Parser = getParser();
4912
4913 ParseStatus ResTy =
4914 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4915
4916 // Check if the current operand has a custom associated parser, if so, try to
4917 // custom parse the operand, or fallback to the general approach.
4918 if (ResTy.isSuccess())
4919 return false;
4920 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4921 // there was a match, but an error occurred, in which case, just return that
4922 // the operand parsing failed.
4923 if (ResTy.isFailure())
4924 return true;
4925
4926 // Nothing custom, so do general case parsing.
4927 SMLoc S, E;
4928 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4929 if (parseOptionalToken(AsmToken::Comma)) {
4930 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4931 if (!Res.isNoMatch())
4932 return Res.isFailure();
4933 getLexer().UnLex(SavedTok);
4934 }
4935 return false;
4936 };
4937 switch (getLexer().getKind()) {
4938 default: {
4939 SMLoc S = getLoc();
4940 const MCExpr *Expr;
4941 if (parseSymbolicImmVal(Expr))
4942 return Error(S, "invalid operand");
4943
4944 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4945 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4946 return parseOptionalShiftExtend(getTok());
4947 }
4948 case AsmToken::LBrac: {
4949 Operands.push_back(
4950 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4951 Lex(); // Eat '['
4952
4953 // There's no comma after a '[', so we can parse the next operand
4954 // immediately.
4955 return parseOperand(Operands, false, false);
4956 }
4957 case AsmToken::LCurly: {
4958 if (!parseNeonVectorList(Operands))
4959 return false;
4960
4961 Operands.push_back(
4962 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4963 Lex(); // Eat '{'
4964
4965 // There's no comma after a '{', so we can parse the next operand
4966 // immediately.
4967 return parseOperand(Operands, false, false);
4968 }
4969 case AsmToken::Identifier: {
4970 // See if this is a "VG" decoration used by SME instructions.
4971 StringRef VecGroup;
4972 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4973 Operands.push_back(
4974 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4975 return false;
4976 }
4977 // If we're expecting a Condition Code operand, then just parse that.
4978 if (isCondCode)
4979 return parseCondCode(Operands, invertCondCode);
4980
4981 // If it's a register name, parse it.
4982 if (!parseRegister(Operands)) {
4983 // Parse an optional shift/extend modifier.
4984 AsmToken SavedTok = getTok();
4985 if (parseOptionalToken(AsmToken::Comma)) {
4986 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4987 // such cases and don't report an error when <label> happens to match a
4988 // shift/extend modifier.
4989 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4990 /*ParseForAllFeatures=*/true);
4991 if (!Res.isNoMatch())
4992 return Res.isFailure();
4993 Res = tryParseOptionalShiftExtend(Operands);
4994 if (!Res.isNoMatch())
4995 return Res.isFailure();
4996 getLexer().UnLex(SavedTok);
4997 }
4998 return false;
4999 }
5000
5001 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5002 // by SVE instructions.
5003 if (!parseOptionalMulOperand(Operands))
5004 return false;
5005
5006 // If this is a two-word mnemonic, parse its special keyword
5007 // operand as an identifier.
5008 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5009 Mnemonic == "gcsb")
5010 return parseKeywordOperand(Operands);
5011
5012 // This was not a register so parse other operands that start with an
5013 // identifier (like labels) as expressions and create them as immediates.
5014 const MCExpr *IdVal;
5015 S = getLoc();
5016 if (getParser().parseExpression(IdVal))
5017 return true;
5018 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5019 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5020
5021 // Parse an optional shift/extend modifier.
5022 return parseOptionalShiftExtend(getTok());
5023 }
5024 case AsmToken::Integer:
5025 case AsmToken::Real:
5026 case AsmToken::Hash: {
5027 // #42 -> immediate.
5028 S = getLoc();
5029
5030 parseOptionalToken(AsmToken::Hash);
5031
5032 // Parse a negative sign
5033 bool isNegative = false;
5034 if (getTok().is(AsmToken::Minus)) {
5035 isNegative = true;
5036 // We need to consume this token only when we have a Real, otherwise
5037 // we let parseSymbolicImmVal take care of it
5038 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5039 Lex();
5040 }
5041
5042 // The only Real that should come through here is a literal #0.0 for
5043 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5044 // so convert the value.
5045 const AsmToken &Tok = getTok();
5046 if (Tok.is(AsmToken::Real)) {
5047 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5048 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5049 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5050 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5051 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5052 return TokError("unexpected floating point literal");
5053 else if (IntVal != 0 || isNegative)
5054 return TokError("expected floating-point constant #0.0");
5055 Lex(); // Eat the token.
5056
5057 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5058 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5059 return false;
5060 }
5061
5062 const MCExpr *ImmVal;
5063 if (parseSymbolicImmVal(ImmVal))
5064 return true;
5065
5066 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5067 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5068
5069 // Parse an optional shift/extend modifier.
5070 return parseOptionalShiftExtend(Tok);
5071 }
5072 case AsmToken::Equal: {
5073 SMLoc Loc = getLoc();
5074 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5075 return TokError("unexpected token in operand");
5076 Lex(); // Eat '='
5077 const MCExpr *SubExprVal;
5078 if (getParser().parseExpression(SubExprVal))
5079 return true;
5080
5081 if (Operands.size() < 2 ||
5082 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5083 return Error(Loc, "Only valid when first operand is register");
5084
5085 bool IsXReg =
5086 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5087 Operands[1]->getReg());
5088
5089 MCContext& Ctx = getContext();
5090 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5091 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5092 if (isa<MCConstantExpr>(SubExprVal)) {
5093 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5094 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5095 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5096 ShiftAmt += 16;
5097 Imm >>= 16;
5098 }
5099 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5100 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5101 Operands.push_back(AArch64Operand::CreateImm(
5102 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5103 if (ShiftAmt)
5104 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5105 ShiftAmt, true, S, E, Ctx));
5106 return false;
5107 }
5108 APInt Simm = APInt(64, Imm << ShiftAmt);
5109 // check if the immediate is an unsigned or signed 32-bit int for W regs
5110 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5111 return Error(Loc, "Immediate too large for register");
5112 }
5113 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5114 const MCExpr *CPLoc =
5115 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5116 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5117 return false;
5118 }
5119 }
5120}
5121
5122bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5123 const MCExpr *Expr = nullptr;
5124 SMLoc L = getLoc();
5125 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5126 return true;
5127 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5128 if (check(!Value, L, "expected constant expression"))
5129 return true;
5130 Out = Value->getValue();
5131 return false;
5132}
5133
5134bool AArch64AsmParser::parseComma() {
5135 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5136 return true;
5137 // Eat the comma
5138 Lex();
5139 return false;
5140}
5141
5142bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5143 unsigned First, unsigned Last) {
5145 SMLoc Start, End;
5146 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5147 return true;
5148
5149 // Special handling for FP and LR; they aren't linearly after x28 in
5150 // the registers enum.
5151 unsigned RangeEnd = Last;
5152 if (Base == AArch64::X0) {
5153 if (Last == AArch64::FP) {
5154 RangeEnd = AArch64::X28;
5155 if (Reg == AArch64::FP) {
5156 Out = 29;
5157 return false;
5158 }
5159 }
5160 if (Last == AArch64::LR) {
5161 RangeEnd = AArch64::X28;
5162 if (Reg == AArch64::FP) {
5163 Out = 29;
5164 return false;
5165 } else if (Reg == AArch64::LR) {
5166 Out = 30;
5167 return false;
5168 }
5169 }
5170 }
5171
5172 if (check(Reg < First || Reg > RangeEnd, Start,
5173 Twine("expected register in range ") +
5176 return true;
5177 Out = Reg - Base;
5178 return false;
5179}
5180
5181bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5182 const MCParsedAsmOperand &Op2) const {
5183 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5184 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5185
5186 if (AOp1.isVectorList() && AOp2.isVectorList())
5187 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5188 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5189 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5190
5191 if (!AOp1.isReg() || !AOp2.isReg())
5192 return false;
5193
5194 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5195 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5196 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5197
5198 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5199 "Testing equality of non-scalar registers not supported");
5200
5201 // Check if a registers match their sub/super register classes.
5202 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5203 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5204 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5205 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5206 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5207 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5208 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5209 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5210
5211 return false;
5212}
5213
5214/// Parse an AArch64 instruction mnemonic followed by its operands.
5215bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5216 StringRef Name, SMLoc NameLoc,
5219 .Case("beq", "b.eq")
5220 .Case("bne", "b.ne")
5221 .Case("bhs", "b.hs")
5222 .Case("bcs", "b.cs")
5223 .Case("blo", "b.lo")
5224 .Case("bcc", "b.cc")
5225 .Case("bmi", "b.mi")
5226 .Case("bpl", "b.pl")
5227 .Case("bvs", "b.vs")
5228 .Case("bvc", "b.vc")
5229 .Case("bhi", "b.hi")
5230 .Case("bls", "b.ls")
5231 .Case("bge", "b.ge")
5232 .Case("blt", "b.lt")
5233 .Case("bgt", "b.gt")
5234 .Case("ble", "b.le")
5235 .Case("bal", "b.al")
5236 .Case("bnv", "b.nv")
5237 .Default(Name);
5238
5239 // First check for the AArch64-specific .req directive.
5240 if (getTok().is(AsmToken::Identifier) &&
5241 getTok().getIdentifier().lower() == ".req") {
5242 parseDirectiveReq(Name, NameLoc);
5243 // We always return 'error' for this, as we're done with this
5244 // statement and don't need to match the 'instruction."
5245 return true;
5246 }
5247
5248 // Create the leading tokens for the mnemonic, split by '.' characters.
5249 size_t Start = 0, Next = Name.find('.');
5250 StringRef Head = Name.slice(Start, Next);
5251
5252 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5253 // the SYS instruction.
5254 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5255 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5256 return parseSysAlias(Head, NameLoc, Operands);
5257
5258 // TLBIP instructions are aliases for the SYSP instruction.
5259 if (Head == "tlbip")
5260 return parseSyspAlias(Head, NameLoc, Operands);
5261
5262 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5263 Mnemonic = Head;
5264
5265 // Handle condition codes for a branch mnemonic
5266 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5267 Start = Next;
5268 Next = Name.find('.', Start + 1);
5269 Head = Name.slice(Start + 1, Next);
5270
5271 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5272 (Head.data() - Name.data()));
5273 std::string Suggestion;
5274 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5275 if (CC == AArch64CC::Invalid) {
5276 std::string Msg = "invalid condition code";
5277 if (!Suggestion.empty())
5278 Msg += ", did you mean " + Suggestion + "?";
5279 return Error(SuffixLoc, Msg);
5280 }
5281 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5282 /*IsSuffix=*/true));
5283 Operands.push_back(
5284 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5285 }
5286
5287 // Add the remaining tokens in the mnemonic.
5288 while (Next != StringRef::npos) {
5289 Start = Next;
5290 Next = Name.find('.', Start + 1);
5291 Head = Name.slice(Start, Next);
5292 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5293 (Head.data() - Name.data()) + 1);
5294 Operands.push_back(AArch64Operand::CreateToken(
5295 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5296 }
5297
5298 // Conditional compare instructions have a Condition Code operand, which needs
5299 // to be parsed and an immediate operand created.
5300 bool condCodeFourthOperand =
5301 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5302 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5303 Head == "csinc" || Head == "csinv" || Head == "csneg");
5304
5305 // These instructions are aliases to some of the conditional select
5306 // instructions. However, the condition code is inverted in the aliased
5307 // instruction.
5308 //
5309 // FIXME: Is this the correct way to handle these? Or should the parser
5310 // generate the aliased instructions directly?
5311 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5312 bool condCodeThirdOperand =
5313 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5314
5315 // Read the remaining operands.
5316 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5317
5318 unsigned N = 1;
5319 do {
5320 // Parse and remember the operand.
5321 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5322 (N == 3 && condCodeThirdOperand) ||
5323 (N == 2 && condCodeSecondOperand),
5324 condCodeSecondOperand || condCodeThirdOperand)) {
5325 return true;
5326 }
5327
5328 // After successfully parsing some operands there are three special cases
5329 // to consider (i.e. notional operands not separated by commas). Two are
5330 // due to memory specifiers:
5331 // + An RBrac will end an address for load/store/prefetch
5332 // + An '!' will indicate a pre-indexed operation.
5333 //
5334 // And a further case is '}', which ends a group of tokens specifying the
5335 // SME accumulator array 'ZA' or tile vector, i.e.
5336 //
5337 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5338 //
5339 // It's someone else's responsibility to make sure these tokens are sane
5340 // in the given context!
5341
5342 if (parseOptionalToken(AsmToken::RBrac))
5343 Operands.push_back(
5344 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5345 if (parseOptionalToken(AsmToken::Exclaim))
5346 Operands.push_back(
5347 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5348 if (parseOptionalToken(AsmToken::RCurly))
5349 Operands.push_back(
5350 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5351
5352 ++N;
5353 } while (parseOptionalToken(AsmToken::Comma));
5354 }
5355
5356 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5357 return true;
5358
5359 return false;
5360}
5361
5362static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5363 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5364 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5365 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5366 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5367 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5368 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5369 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5370}
5371
5372// FIXME: This entire function is a giant hack to provide us with decent
5373// operand range validation/diagnostics until TableGen/MC can be extended
5374// to support autogeneration of this kind of validation.
5375bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5377 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5378 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5379
5380 // A prefix only applies to the instruction following it. Here we extract
5381 // prefix information for the next instruction before validating the current
5382 // one so that in the case of failure we don't erronously continue using the
5383 // current prefix.
5384 PrefixInfo Prefix = NextPrefix;
5385 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5386
5387 // Before validating the instruction in isolation we run through the rules
5388 // applicable when it follows a prefix instruction.
5389 // NOTE: brk & hlt can be prefixed but require no additional validation.
5390 if (Prefix.isActive() &&
5391 (Inst.getOpcode() != AArch64::BRK) &&
5392 (Inst.getOpcode() != AArch64::HLT)) {
5393
5394 // Prefixed intructions must have a destructive operand.
5397 return Error(IDLoc, "instruction is unpredictable when following a"
5398 " movprfx, suggest replacing movprfx with mov");
5399
5400 // Destination operands must match.
5401 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5402 return Error(Loc[0], "instruction is unpredictable when following a"
5403 " movprfx writing to a different destination");
5404
5405 // Destination operand must not be used in any other location.
5406 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5407 if (Inst.getOperand(i).isReg() &&
5408 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5409 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5410 return Error(Loc[0], "instruction is unpredictable when following a"
5411 " movprfx and destination also used as non-destructive"
5412 " source");
5413 }
5414
5415 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5416 if (Prefix.isPredicated()) {
5417 int PgIdx = -1;
5418
5419 // Find the instructions general predicate.
5420 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5421 if (Inst.getOperand(i).isReg() &&
5422 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5423 PgIdx = i;
5424 break;
5425 }
5426
5427 // Instruction must be predicated if the movprfx is predicated.
5428 if (PgIdx == -1 ||
5430 return Error(IDLoc, "instruction is unpredictable when following a"
5431 " predicated movprfx, suggest using unpredicated movprfx");
5432
5433 // Instruction must use same general predicate as the movprfx.
5434 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5435 return Error(IDLoc, "instruction is unpredictable when following a"
5436 " predicated movprfx using a different general predicate");
5437
5438 // Instruction element type must match the movprfx.
5439 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5440 return Error(IDLoc, "instruction is unpredictable when following a"
5441 " predicated movprfx with a different element size");
5442 }
5443 }
5444
5445 // On ARM64EC, only valid registers may be used. Warn against using
5446 // explicitly disallowed registers.
5447 if (IsWindowsArm64EC) {
5448 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5449 if (Inst.getOperand(i).isReg()) {
5450 MCRegister Reg = Inst.getOperand(i).getReg();
5451 // At this point, vector registers are matched to their
5452 // appropriately sized alias.
5453 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5454 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5455 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5456 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5457 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5458 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5459 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5460 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5461 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5462 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5463 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5464 " is disallowed on ARM64EC.");
5465 }
5466 }
5467 }
5468 }
5469
5470 // Check for indexed addressing modes w/ the base register being the
5471 // same as a destination/source register or pair load where
5472 // the Rt == Rt2. All of those are undefined behaviour.
5473 switch (Inst.getOpcode()) {
5474 case AArch64::LDPSWpre:
5475 case AArch64::LDPWpost:
5476 case AArch64::LDPWpre:
5477 case AArch64::LDPXpost:
5478 case AArch64::LDPXpre: {
5479 MCRegister Rt = Inst.getOperand(1).getReg();
5480 MCRegister Rt2 = Inst.getOperand(2).getReg();
5481 MCRegister Rn = Inst.getOperand(3).getReg();
5482 if (RI->isSubRegisterEq(Rn, Rt))
5483 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5484 "is also a destination");
5485 if (RI->isSubRegisterEq(Rn, Rt2))
5486 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5487 "is also a destination");
5488 [[fallthrough]];
5489 }
5490 case AArch64::LDR_ZA:
5491 case AArch64::STR_ZA: {
5492 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5493 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5494 return Error(Loc[1],
5495 "unpredictable instruction, immediate and offset mismatch.");
5496 break;
5497 }
5498 case AArch64::LDPDi:
5499 case AArch64::LDPQi:
5500 case AArch64::LDPSi:
5501 case AArch64::LDPSWi:
5502 case AArch64::LDPWi:
5503 case AArch64::LDPXi: {
5504 MCRegister Rt = Inst.getOperand(0).getReg();
5505 MCRegister Rt2 = Inst.getOperand(1).getReg();
5506 if (Rt == Rt2)
5507 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5508 break;
5509 }
5510 case AArch64::LDPDpost:
5511 case AArch64::LDPDpre:
5512 case AArch64::LDPQpost:
5513 case AArch64::LDPQpre:
5514 case AArch64::LDPSpost:
5515 case AArch64::LDPSpre:
5516 case AArch64::LDPSWpost: {
5517 MCRegister Rt = Inst.getOperand(1).getReg();
5518 MCRegister Rt2 = Inst.getOperand(2).getReg();
5519 if (Rt == Rt2)
5520 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5521 break;
5522 }
5523 case AArch64::STPDpost:
5524 case AArch64::STPDpre:
5525 case AArch64::STPQpost:
5526 case AArch64::STPQpre:
5527 case AArch64::STPSpost:
5528 case AArch64::STPSpre:
5529 case AArch64::STPWpost:
5530 case AArch64::STPWpre:
5531 case AArch64::STPXpost:
5532 case AArch64::STPXpre: {
5533 MCRegister Rt = Inst.getOperand(1).getReg();
5534 MCRegister Rt2 = Inst.getOperand(2).getReg();
5535 MCRegister Rn = Inst.getOperand(3).getReg();
5536 if (RI->isSubRegisterEq(Rn, Rt))
5537 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5538 "is also a source");
5539 if (RI->isSubRegisterEq(Rn, Rt2))
5540 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5541 "is also a source");
5542 break;
5543 }
5544 case AArch64::LDRBBpre:
5545 case AArch64::LDRBpre:
5546 case AArch64::LDRHHpre:
5547 case AArch64::LDRHpre:
5548 case AArch64::LDRSBWpre:
5549 case AArch64::LDRSBXpre:
5550 case AArch64::LDRSHWpre:
5551 case AArch64::LDRSHXpre:
5552 case AArch64::LDRSWpre:
5553 case AArch64::LDRWpre:
5554 case AArch64::LDRXpre:
5555 case AArch64::LDRBBpost:
5556 case AArch64::LDRBpost:
5557 case AArch64::LDRHHpost:
5558 case AArch64::LDRHpost:
5559 case AArch64::LDRSBWpost:
5560 case AArch64::LDRSBXpost:
5561 case AArch64::LDRSHWpost:
5562 case AArch64::LDRSHXpost:
5563 case AArch64::LDRSWpost:
5564 case AArch64::LDRWpost:
5565 case AArch64::LDRXpost: {
5566 MCRegister Rt = Inst.getOperand(1).getReg();
5567 MCRegister Rn = Inst.getOperand(2).getReg();
5568 if (RI->isSubRegisterEq(Rn, Rt))
5569 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5570 "is also a source");
5571 break;
5572 }
5573 case AArch64::STRBBpost:
5574 case AArch64::STRBpost:
5575 case AArch64::STRHHpost:
5576 case AArch64::STRHpost:
5577 case AArch64::STRWpost:
5578 case AArch64::STRXpost:
5579 case AArch64::STRBBpre:
5580 case AArch64::STRBpre:
5581 case AArch64::STRHHpre:
5582 case AArch64::STRHpre:
5583 case AArch64::STRWpre:
5584 case AArch64::STRXpre: {
5585 MCRegister Rt = Inst.getOperand(1).getReg();
5586 MCRegister Rn = Inst.getOperand(2).getReg();
5587 if (RI->isSubRegisterEq(Rn, Rt))
5588 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5589 "is also a source");
5590 break;
5591 }
5592 case AArch64::STXRB:
5593 case AArch64::STXRH:
5594 case AArch64::STXRW:
5595 case AArch64::STXRX:
5596 case AArch64::STLXRB:
5597 case AArch64::STLXRH:
5598 case AArch64::STLXRW:
5599 case AArch64::STLXRX: {
5600 MCRegister Rs = Inst.getOperand(0).getReg();
5601 MCRegister Rt = Inst.getOperand(1).getReg();
5602 MCRegister Rn = Inst.getOperand(2).getReg();
5603 if (RI->isSubRegisterEq(Rt, Rs) ||
5604 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5605 return Error(Loc[0],
5606 "unpredictable STXR instruction, status is also a source");
5607 break;
5608 }
5609 case AArch64::STXPW:
5610 case AArch64::STXPX:
5611 case AArch64::STLXPW:
5612 case AArch64::STLXPX: {
5613 MCRegister Rs = Inst.getOperand(0).getReg();
5614 MCRegister Rt1 = Inst.getOperand(1).getReg();
5615 MCRegister Rt2 = Inst.getOperand(2).getReg();
5616 MCRegister Rn = Inst.getOperand(3).getReg();
5617 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5618 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5619 return Error(Loc[0],
5620 "unpredictable STXP instruction, status is also a source");
5621 break;
5622 }
5623 case AArch64::LDRABwriteback:
5624 case AArch64::LDRAAwriteback: {
5625 MCRegister Xt = Inst.getOperand(0).getReg();
5626 MCRegister Xn = Inst.getOperand(1).getReg();
5627 if (Xt == Xn)
5628 return Error(Loc[0],
5629 "unpredictable LDRA instruction, writeback base"
5630 " is also a destination");
5631 break;
5632 }
5633 }
5634
5635 // Check v8.8-A memops instructions.
5636 switch (Inst.getOpcode()) {
5637 case AArch64::CPYFP:
5638 case AArch64::CPYFPWN:
5639 case AArch64::CPYFPRN:
5640 case AArch64::CPYFPN:
5641 case AArch64::CPYFPWT:
5642 case AArch64::CPYFPWTWN:
5643 case AArch64::CPYFPWTRN:
5644 case AArch64::CPYFPWTN:
5645 case AArch64::CPYFPRT:
5646 case AArch64::CPYFPRTWN:
5647 case AArch64::CPYFPRTRN:
5648 case AArch64::CPYFPRTN:
5649 case AArch64::CPYFPT:
5650 case AArch64::CPYFPTWN:
5651 case AArch64::CPYFPTRN:
5652 case AArch64::CPYFPTN:
5653 case AArch64::CPYFM:
5654 case AArch64::CPYFMWN:
5655 case AArch64::CPYFMRN:
5656 case AArch64::CPYFMN:
5657 case AArch64::CPYFMWT:
5658 case AArch64::CPYFMWTWN:
5659 case AArch64::CPYFMWTRN:
5660 case AArch64::CPYFMWTN:
5661 case AArch64::CPYFMRT:
5662 case AArch64::CPYFMRTWN:
5663 case AArch64::CPYFMRTRN:
5664 case AArch64::CPYFMRTN:
5665 case AArch64::CPYFMT:
5666 case AArch64::CPYFMTWN:
5667 case AArch64::CPYFMTRN:
5668 case AArch64::CPYFMTN:
5669 case AArch64::CPYFE:
5670 case AArch64::CPYFEWN:
5671 case AArch64::CPYFERN:
5672 case AArch64::CPYFEN:
5673 case AArch64::CPYFEWT:
5674 case AArch64::CPYFEWTWN:
5675 case AArch64::CPYFEWTRN:
5676 case AArch64::CPYFEWTN:
5677 case AArch64::CPYFERT:
5678 case AArch64::CPYFERTWN:
5679 case AArch64::CPYFERTRN:
5680 case AArch64::CPYFERTN:
5681 case AArch64::CPYFET:
5682 case AArch64::CPYFETWN:
5683 case AArch64::CPYFETRN:
5684 case AArch64::CPYFETN:
5685 case AArch64::CPYP:
5686 case AArch64::CPYPWN:
5687 case AArch64::CPYPRN:
5688 case AArch64::CPYPN:
5689 case AArch64::CPYPWT:
5690 case AArch64::CPYPWTWN:
5691 case AArch64::CPYPWTRN:
5692 case AArch64::CPYPWTN:
5693 case AArch64::CPYPRT:
5694 case AArch64::CPYPRTWN:
5695 case AArch64::CPYPRTRN:
5696 case AArch64::CPYPRTN:
5697 case AArch64::CPYPT:
5698 case AArch64::CPYPTWN:
5699 case AArch64::CPYPTRN:
5700 case AArch64::CPYPTN:
5701 case AArch64::CPYM:
5702 case AArch64::CPYMWN:
5703 case AArch64::CPYMRN:
5704 case AArch64::CPYMN:
5705 case AArch64::CPYMWT:
5706 case AArch64::CPYMWTWN:
5707 case AArch64::CPYMWTRN:
5708 case AArch64::CPYMWTN:
5709 case AArch64::CPYMRT:
5710 case AArch64::CPYMRTWN:
5711 case AArch64::CPYMRTRN:
5712 case AArch64::CPYMRTN:
5713 case AArch64::CPYMT:
5714 case AArch64::CPYMTWN:
5715 case AArch64::CPYMTRN:
5716 case AArch64::CPYMTN:
5717 case AArch64::CPYE:
5718 case AArch64::CPYEWN:
5719 case AArch64::CPYERN:
5720 case AArch64::CPYEN:
5721 case AArch64::CPYEWT:
5722 case AArch64::CPYEWTWN:
5723 case AArch64::CPYEWTRN:
5724 case AArch64::CPYEWTN:
5725 case AArch64::CPYERT:
5726 case AArch64::CPYERTWN:
5727 case AArch64::CPYERTRN:
5728 case AArch64::CPYERTN:
5729 case AArch64::CPYET:
5730 case AArch64::CPYETWN:
5731 case AArch64::CPYETRN:
5732 case AArch64::CPYETN: {
5733 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5734 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5735 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5736 MCRegister Xd = Inst.getOperand(3).getReg();
5737 MCRegister Xs = Inst.getOperand(4).getReg();
5738 MCRegister Xn = Inst.getOperand(5).getReg();
5739 if (Xd_wb != Xd)
5740 return Error(Loc[0],
5741 "invalid CPY instruction, Xd_wb and Xd do not match");
5742 if (Xs_wb != Xs)
5743 return Error(Loc[0],
5744 "invalid CPY instruction, Xs_wb and Xs do not match");
5745 if (Xn_wb != Xn)
5746 return Error(Loc[0],
5747 "invalid CPY instruction, Xn_wb and Xn do not match");
5748 if (Xd == Xs)
5749 return Error(Loc[0], "invalid CPY instruction, destination and source"
5750 " registers are the same");
5751 if (Xd == Xn)
5752 return Error(Loc[0], "invalid CPY instruction, destination and size"
5753 " registers are the same");
5754 if (Xs == Xn)
5755 return Error(Loc[0], "invalid CPY instruction, source and size"
5756 " registers are the same");
5757 break;
5758 }
5759 case AArch64::SETP:
5760 case AArch64::SETPT:
5761 case AArch64::SETPN:
5762 case AArch64::SETPTN:
5763 case AArch64::SETM:
5764 case AArch64::SETMT:
5765 case AArch64::SETMN:
5766 case AArch64::SETMTN:
5767 case AArch64::SETE:
5768 case AArch64::SETET:
5769 case AArch64::SETEN:
5770 case AArch64::SETETN:
5771 case AArch64::SETGP:
5772 case AArch64::SETGPT:
5773 case AArch64::SETGPN:
5774 case AArch64::SETGPTN:
5775 case AArch64::SETGM:
5776 case AArch64::SETGMT:
5777 case AArch64::SETGMN:
5778 case AArch64::SETGMTN:
5779 case AArch64::MOPSSETGE:
5780 case AArch64::MOPSSETGET:
5781 case AArch64::MOPSSETGEN:
5782 case AArch64::MOPSSETGETN: {
5783 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5784 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5785 MCRegister Xd = Inst.getOperand(2).getReg();
5786 MCRegister Xn = Inst.getOperand(3).getReg();
5787 MCRegister Xm = Inst.getOperand(4).getReg();
5788 if (Xd_wb != Xd)
5789 return Error(Loc[0],
5790 "invalid SET instruction, Xd_wb and Xd do not match");
5791 if (Xn_wb != Xn)
5792 return Error(Loc[0],
5793 "invalid SET instruction, Xn_wb and Xn do not match");
5794 if (Xd == Xn)
5795 return Error(Loc[0], "invalid SET instruction, destination and size"
5796 " registers are the same");
5797 if (Xd == Xm)
5798 return Error(Loc[0], "invalid SET instruction, destination and source"
5799 " registers are the same");
5800 if (Xn == Xm)
5801 return Error(Loc[0], "invalid SET instruction, source and size"
5802 " registers are the same");
5803 break;
5804 }
5805 }
5806
5807 // Now check immediate ranges. Separate from the above as there is overlap
5808 // in the instructions being checked and this keeps the nested conditionals
5809 // to a minimum.
5810 switch (Inst.getOpcode()) {
5811 case AArch64::ADDSWri:
5812 case AArch64::ADDSXri:
5813 case AArch64::ADDWri:
5814 case AArch64::ADDXri:
5815 case AArch64::SUBSWri:
5816 case AArch64::SUBSXri:
5817 case AArch64::SUBWri:
5818 case AArch64::SUBXri: {
5819 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5820 // some slight duplication here.
5821 if (Inst.getOperand(2).isExpr()) {
5822 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5823 AArch64MCExpr::VariantKind ELFRefKind;
5824 MCSymbolRefExpr::VariantKind DarwinRefKind;
5825 int64_t Addend;
5826 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5827
5828 // Only allow these with ADDXri.
5829 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5830 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5831 Inst.getOpcode() == AArch64::ADDXri)
5832 return false;
5833
5834 // Only allow these with ADDXri/ADDWri
5835 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5836 ELFRefKind == AArch64MCExpr::VK_GOT_AUTH_LO12 ||
5837 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5838 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5839 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5840 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5841 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5842 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5843 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5845 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5846 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5847 (Inst.getOpcode() == AArch64::ADDXri ||
5848 Inst.getOpcode() == AArch64::ADDWri))
5849 return false;
5850
5851 // Don't allow symbol refs in the immediate field otherwise
5852 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5853 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5854 // 'cmp w0, 'borked')
5855 return Error(Loc.back(), "invalid immediate expression");
5856 }
5857 // We don't validate more complex expressions here
5858 }
5859 return false;
5860 }
5861 default:
5862 return false;
5863 }
5864}
5865
5867 const FeatureBitset &FBS,
5868 unsigned VariantID = 0);
5869
5870bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5873 switch (ErrCode) {
5874 case Match_InvalidTiedOperand: {
5875 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5876 if (Op.isVectorList())
5877 return Error(Loc, "operand must match destination register list");
5878
5879 assert(Op.isReg() && "Unexpected operand type");
5880 switch (Op.getRegEqualityTy()) {
5881 case RegConstraintEqualityTy::EqualsSubReg:
5882 return Error(Loc, "operand must be 64-bit form of destination register");
5883 case RegConstraintEqualityTy::EqualsSuperReg:
5884 return Error(Loc, "operand must be 32-bit form of destination register");
5885 case RegConstraintEqualityTy::EqualsReg:
5886 return Error(Loc, "operand must match destination register");
5887 }
5888 llvm_unreachable("Unknown RegConstraintEqualityTy");
5889 }
5890 case Match_MissingFeature:
5891 return Error(Loc,
5892 "instruction requires a CPU feature not currently enabled");
5893 case Match_InvalidOperand:
5894 return Error(Loc, "invalid operand for instruction");
5895 case Match_InvalidSuffix:
5896 return Error(Loc, "invalid type suffix for instruction");
5897 case Match_InvalidCondCode:
5898 return Error(Loc, "expected AArch64 condition code");
5899 case Match_AddSubRegExtendSmall:
5900 return Error(Loc,
5901 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5902 case Match_AddSubRegExtendLarge:
5903 return Error(Loc,
5904 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5905 case Match_AddSubSecondSource:
5906 return Error(Loc,
5907 "expected compatible register, symbol or integer in range [0, 4095]");
5908 case Match_LogicalSecondSource:
5909 return Error(Loc, "expected compatible register or logical immediate");
5910 case Match_InvalidMovImm32Shift:
5911 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5912 case Match_InvalidMovImm64Shift:
5913 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5914 case Match_AddSubRegShift32:
5915 return Error(Loc,
5916 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5917 case Match_AddSubRegShift64:
5918 return Error(Loc,
5919 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5920 case Match_InvalidFPImm:
5921 return Error(Loc,
5922 "expected compatible register or floating-point constant");
5923 case Match_InvalidMemoryIndexedSImm6:
5924 return Error(Loc, "index must be an integer in range [-32, 31].");
5925 case Match_InvalidMemoryIndexedSImm5:
5926 return Error(Loc, "index must be an integer in range [-16, 15].");
5927 case Match_InvalidMemoryIndexed1SImm4:
5928 return Error(Loc, "index must be an integer in range [-8, 7].");
5929 case Match_InvalidMemoryIndexed2SImm4:
5930 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5931 case Match_InvalidMemoryIndexed3SImm4:
5932 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5933 case Match_InvalidMemoryIndexed4SImm4:
5934 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5935 case Match_InvalidMemoryIndexed16SImm4:
5936 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5937 case Match_InvalidMemoryIndexed32SImm4:
5938 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5939 case Match_InvalidMemoryIndexed1SImm6:
5940 return Error(Loc, "index must be an integer in range [-32, 31].");
5941 case Match_InvalidMemoryIndexedSImm8:
5942 return Error(Loc, "index must be an integer in range [-128, 127].");
5943 case Match_InvalidMemoryIndexedSImm9:
5944 return Error(Loc, "index must be an integer in range [-256, 255].");
5945 case Match_InvalidMemoryIndexed16SImm9:
5946 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5947 case Match_InvalidMemoryIndexed8SImm10:
5948 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5949 case Match_InvalidMemoryIndexed4SImm7:
5950 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5951 case Match_InvalidMemoryIndexed8SImm7:
5952 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5953 case Match_InvalidMemoryIndexed16SImm7:
5954 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5955 case Match_InvalidMemoryIndexed8UImm5:
5956 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5957 case Match_InvalidMemoryIndexed8UImm3:
5958 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5959 case Match_InvalidMemoryIndexed4UImm5:
5960 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5961 case Match_InvalidMemoryIndexed2UImm5:
5962 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5963 case Match_InvalidMemoryIndexed8UImm6:
5964 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5965 case Match_InvalidMemoryIndexed16UImm6:
5966 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5967 case Match_InvalidMemoryIndexed4UImm6:
5968 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5969 case Match_InvalidMemoryIndexed2UImm6:
5970 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5971 case Match_InvalidMemoryIndexed1UImm6:
5972 return Error(Loc, "index must be in range [0, 63].");
5973 case Match_InvalidMemoryWExtend8:
5974 return Error(Loc,
5975 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5976 case Match_InvalidMemoryWExtend16:
5977 return Error(Loc,
5978 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5979 case Match_InvalidMemoryWExtend32:
5980 return Error(Loc,
5981 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5982 case Match_InvalidMemoryWExtend64:
5983 return Error(Loc,
5984 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5985 case Match_InvalidMemoryWExtend128:
5986 return Error(Loc,
5987 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5988 case Match_InvalidMemoryXExtend8:
5989 return Error(Loc,
5990 "expected 'lsl' or 'sxtx' with optional shift of #0");
5991 case Match_InvalidMemoryXExtend16:
5992 return Error(Loc,
5993 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5994 case Match_InvalidMemoryXExtend32:
5995 return Error(Loc,
5996 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5997 case Match_InvalidMemoryXExtend64:
5998 return Error(Loc,
5999 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6000 case Match_InvalidMemoryXExtend128:
6001 return Error(Loc,
6002 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6003 case Match_InvalidMemoryIndexed1:
6004 return Error(Loc, "index must be an integer in range [0, 4095].");
6005 case Match_InvalidMemoryIndexed2:
6006 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6007 case Match_InvalidMemoryIndexed4:
6008 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6009 case Match_InvalidMemoryIndexed8:
6010 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6011 case Match_InvalidMemoryIndexed16:
6012 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6013 case Match_InvalidImm0_0:
6014 return Error(Loc, "immediate must be 0.");
6015 case Match_InvalidImm0_1:
6016 return Error(Loc, "immediate must be an integer in range [0, 1].");
6017 case Match_InvalidImm0_3:
6018 return Error(Loc, "immediate must be an integer in range [0, 3].");
6019 case Match_InvalidImm0_7:
6020 return Error(Loc, "immediate must be an integer in range [0, 7].");
6021 case Match_InvalidImm0_15:
6022 return Error(Loc, "immediate must be an integer in range [0, 15].");
6023 case Match_InvalidImm0_31:
6024 return Error(Loc, "immediate must be an integer in range [0, 31].");
6025 case Match_InvalidImm0_63:
6026 return Error(Loc, "immediate must be an integer in range [0, 63].");
6027 case Match_InvalidImm0_127:
6028 return Error(Loc, "immediate must be an integer in range [0, 127].");
6029 case Match_InvalidImm0_255:
6030 return Error(Loc, "immediate must be an integer in range [0, 255].");
6031 case Match_InvalidImm0_65535:
6032 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6033 case Match_InvalidImm1_8:
6034 return Error(Loc, "immediate must be an integer in range [1, 8].");
6035 case Match_InvalidImm1_16:
6036 return Error(Loc, "immediate must be an integer in range [1, 16].");
6037 case Match_InvalidImm1_32:
6038 return Error(Loc, "immediate must be an integer in range [1, 32].");
6039 case Match_InvalidImm1_64:
6040 return Error(Loc, "immediate must be an integer in range [1, 64].");
6041 case Match_InvalidImmM1_62:
6042 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6043 case Match_InvalidMemoryIndexedRange2UImm0:
6044 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6045 case Match_InvalidMemoryIndexedRange2UImm1:
6046 return Error(Loc, "vector select offset must be an immediate range of the "
6047 "form <immf>:<imml>, where the first "
6048 "immediate is a multiple of 2 in the range [0, 2], and "
6049 "the second immediate is immf + 1.");
6050 case Match_InvalidMemoryIndexedRange2UImm2:
6051 case Match_InvalidMemoryIndexedRange2UImm3:
6052 return Error(
6053 Loc,
6054 "vector select offset must be an immediate range of the form "
6055 "<immf>:<imml>, "
6056 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6057 "[0, 14] "
6058 "depending on the instruction, and the second immediate is immf + 1.");
6059 case Match_InvalidMemoryIndexedRange4UImm0:
6060 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6061 case Match_InvalidMemoryIndexedRange4UImm1:
6062 case Match_InvalidMemoryIndexedRange4UImm2:
6063 return Error(
6064 Loc,
6065 "vector select offset must be an immediate range of the form "
6066 "<immf>:<imml>, "
6067 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6068 "[0, 12] "
6069 "depending on the instruction, and the second immediate is immf + 3.");
6070 case Match_InvalidSVEAddSubImm8:
6071 return Error(Loc, "immediate must be an integer in range [0, 255]"
6072 " with a shift amount of 0");
6073 case Match_InvalidSVEAddSubImm16:
6074 case Match_InvalidSVEAddSubImm32:
6075 case Match_InvalidSVEAddSubImm64:
6076 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6077 "multiple of 256 in range [256, 65280]");
6078 case Match_InvalidSVECpyImm8:
6079 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6080 " with a shift amount of 0");
6081 case Match_InvalidSVECpyImm16:
6082 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6083 "multiple of 256 in range [-32768, 65280]");
6084 case Match_InvalidSVECpyImm32:
6085 case Match_InvalidSVECpyImm64:
6086 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6087 "multiple of 256 in range [-32768, 32512]");
6088 case Match_InvalidIndexRange0_0:
6089 return Error(Loc, "expected lane specifier '[0]'");
6090 case Match_InvalidIndexRange1_1:
6091 return Error(Loc, "expected lane specifier '[1]'");
6092 case Match_InvalidIndexRange0_15:
6093 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6094 case Match_InvalidIndexRange0_7:
6095 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6096 case Match_InvalidIndexRange0_3:
6097 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6098 case Match_InvalidIndexRange0_1:
6099 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6100 case Match_InvalidSVEIndexRange0_63:
6101 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6102 case Match_InvalidSVEIndexRange0_31:
6103 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6104 case Match_InvalidSVEIndexRange0_15:
6105 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6106 case Match_InvalidSVEIndexRange0_7:
6107 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6108 case Match_InvalidSVEIndexRange0_3:
6109 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6110 case Match_InvalidLabel:
6111 return Error(Loc, "expected label or encodable integer pc offset");
6112 case Match_MRS:
6113 return Error(Loc, "expected readable system register");
6114 case Match_MSR:
6115 case Match_InvalidSVCR:
6116 return Error(Loc, "expected writable system register or pstate");
6117 case Match_InvalidComplexRotationEven:
6118 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6119 case Match_InvalidComplexRotationOdd:
6120 return Error(Loc, "complex rotation must be 90 or 270.");
6121 case Match_MnemonicFail: {
6122 std::string Suggestion = AArch64MnemonicSpellCheck(
6123 ((AArch64Operand &)*Operands[0]).getToken(),
6124 ComputeAvailableFeatures(STI->getFeatureBits()));
6125 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6126 }
6127 case Match_InvalidGPR64shifted8:
6128 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6129 case Match_InvalidGPR64shifted16:
6130 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6131 case Match_InvalidGPR64shifted32:
6132 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6133 case Match_InvalidGPR64shifted64:
6134 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6135 case Match_InvalidGPR64shifted128:
6136 return Error(
6137 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6138 case Match_InvalidGPR64NoXZRshifted8:
6139 return Error(Loc, "register must be x0..x30 without shift");
6140 case Match_InvalidGPR64NoXZRshifted16:
6141 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6142 case Match_InvalidGPR64NoXZRshifted32:
6143 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6144 case Match_InvalidGPR64NoXZRshifted64:
6145 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6146 case Match_InvalidGPR64NoXZRshifted128:
6147 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6148 case Match_InvalidZPR32UXTW8:
6149 case Match_InvalidZPR32SXTW8:
6150 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6151 case Match_InvalidZPR32UXTW16:
6152 case Match_InvalidZPR32SXTW16:
6153 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6154 case Match_InvalidZPR32UXTW32:
6155 case Match_InvalidZPR32SXTW32:
6156 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6157 case Match_InvalidZPR32UXTW64:
6158 case Match_InvalidZPR32SXTW64:
6159 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6160 case Match_InvalidZPR64UXTW8:
6161 case Match_InvalidZPR64SXTW8:
6162 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6163 case Match_InvalidZPR64UXTW16:
6164 case Match_InvalidZPR64SXTW16:
6165 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6166 case Match_InvalidZPR64UXTW32:
6167 case Match_InvalidZPR64SXTW32:
6168 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6169 case Match_InvalidZPR64UXTW64:
6170 case Match_InvalidZPR64SXTW64:
6171 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6172 case Match_InvalidZPR32LSL8:
6173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6174 case Match_InvalidZPR32LSL16:
6175 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6176 case Match_InvalidZPR32LSL32:
6177 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6178 case Match_InvalidZPR32LSL64:
6179 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6180 case Match_InvalidZPR64LSL8:
6181 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6182 case Match_InvalidZPR64LSL16:
6183 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6184 case Match_InvalidZPR64LSL32:
6185 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6186 case Match_InvalidZPR64LSL64:
6187 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6188 case Match_InvalidZPR0:
6189 return Error(Loc, "expected register without element width suffix");
6190 case Match_InvalidZPR8:
6191 case Match_InvalidZPR16:
6192 case Match_InvalidZPR32:
6193 case Match_InvalidZPR64:
6194 case Match_InvalidZPR128:
6195 return Error(Loc, "invalid element width");
6196 case Match_InvalidZPR_3b8:
6197 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6198 case Match_InvalidZPR_3b16:
6199 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6200 case Match_InvalidZPR_3b32:
6201 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6202 case Match_InvalidZPR_4b8:
6203 return Error(Loc,
6204 "Invalid restricted vector register, expected z0.b..z15.b");
6205 case Match_InvalidZPR_4b16:
6206 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6207 case Match_InvalidZPR_4b32:
6208 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6209 case Match_InvalidZPR_4b64:
6210 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6211 case Match_InvalidZPRMul2_Lo8:
6212 return Error(Loc, "Invalid restricted vector register, expected even "
6213 "register in z0.b..z14.b");
6214 case Match_InvalidZPRMul2_Hi8:
6215 return Error(Loc, "Invalid restricted vector register, expected even "
6216 "register in z16.b..z30.b");
6217 case Match_InvalidZPRMul2_Lo16:
6218 return Error(Loc, "Invalid restricted vector register, expected even "
6219 "register in z0.h..z14.h");
6220 case Match_InvalidZPRMul2_Hi16:
6221 return Error(Loc, "Invalid restricted vector register, expected even "
6222 "register in z16.h..z30.h");
6223 case Match_InvalidZPRMul2_Lo32:
6224 return Error(Loc, "Invalid restricted vector register, expected even "
6225 "register in z0.s..z14.s");
6226 case Match_InvalidZPRMul2_Hi32:
6227 return Error(Loc, "Invalid restricted vector register, expected even "
6228 "register in z16.s..z30.s");
6229 case Match_InvalidZPRMul2_Lo64:
6230 return Error(Loc, "Invalid restricted vector register, expected even "
6231 "register in z0.d..z14.d");
6232 case Match_InvalidZPRMul2_Hi64:
6233 return Error(Loc, "Invalid restricted vector register, expected even "
6234 "register in z16.d..z30.d");
6235 case Match_InvalidZPR_K0:
6236 return Error(Loc, "invalid restricted vector register, expected register "
6237 "in z20..z23 or z28..z31");
6238 case Match_InvalidSVEPattern:
6239 return Error(Loc, "invalid predicate pattern");
6240 case Match_InvalidSVEPPRorPNRAnyReg:
6241 case Match_InvalidSVEPPRorPNRBReg:
6242 case Match_InvalidSVEPredicateAnyReg:
6243 case Match_InvalidSVEPredicateBReg:
6244 case Match_InvalidSVEPredicateHReg:
6245 case Match_InvalidSVEPredicateSReg:
6246 case Match_InvalidSVEPredicateDReg:
6247 return Error(Loc, "invalid predicate register.");
6248 case Match_InvalidSVEPredicate3bAnyReg:
6249 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6250 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6251 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6252 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6253 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6254 return Error(Loc, "Invalid predicate register, expected PN in range "
6255 "pn8..pn15 with element suffix.");
6256 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6257 return Error(Loc, "invalid restricted predicate-as-counter register "
6258 "expected pn8..pn15");
6259 case Match_InvalidSVEPNPredicateBReg:
6260 case Match_InvalidSVEPNPredicateHReg:
6261 case Match_InvalidSVEPNPredicateSReg:
6262 case Match_InvalidSVEPNPredicateDReg:
6263 return Error(Loc, "Invalid predicate register, expected PN in range "
6264 "pn0..pn15 with element suffix.");
6265 case Match_InvalidSVEVecLenSpecifier:
6266 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6267 case Match_InvalidSVEPredicateListMul2x8:
6268 case Match_InvalidSVEPredicateListMul2x16:
6269 case Match_InvalidSVEPredicateListMul2x32:
6270 case Match_InvalidSVEPredicateListMul2x64:
6271 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6272 "predicate registers, where the first vector is a multiple of 2 "
6273 "and with correct element type");
6274 case Match_InvalidSVEExactFPImmOperandHalfOne:
6275 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6276 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6277 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6278 case Match_InvalidSVEExactFPImmOperandZeroOne:
6279 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6280 case Match_InvalidMatrixTileVectorH8:
6281 case Match_InvalidMatrixTileVectorV8:
6282 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6283 case Match_InvalidMatrixTileVectorH16:
6284 case Match_InvalidMatrixTileVectorV16:
6285 return Error(Loc,
6286 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6287 case Match_InvalidMatrixTileVectorH32:
6288 case Match_InvalidMatrixTileVectorV32:
6289 return Error(Loc,
6290 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6291 case Match_InvalidMatrixTileVectorH64:
6292 case Match_InvalidMatrixTileVectorV64:
6293 return Error(Loc,
6294 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6295 case Match_InvalidMatrixTileVectorH128:
6296 case Match_InvalidMatrixTileVectorV128:
6297 return Error(Loc,
6298 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6299 case Match_InvalidMatrixTile16:
6300 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6301 case Match_InvalidMatrixTile32:
6302 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6303 case Match_InvalidMatrixTile64:
6304 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6305 case Match_InvalidMatrix:
6306 return Error(Loc, "invalid matrix operand, expected za");
6307 case Match_InvalidMatrix8:
6308 return Error(Loc, "invalid matrix operand, expected suffix .b");
6309 case Match_InvalidMatrix16:
6310 return Error(Loc, "invalid matrix operand, expected suffix .h");
6311 case Match_InvalidMatrix32:
6312 return Error(Loc, "invalid matrix operand, expected suffix .s");
6313 case Match_InvalidMatrix64:
6314 return Error(Loc, "invalid matrix operand, expected suffix .d");
6315 case Match_InvalidMatrixIndexGPR32_12_15:
6316 return Error(Loc, "operand must be a register in range [w12, w15]");
6317 case Match_InvalidMatrixIndexGPR32_8_11:
6318 return Error(Loc, "operand must be a register in range [w8, w11]");
6319 case Match_InvalidSVEVectorList2x8Mul2:
6320 case Match_InvalidSVEVectorList2x16Mul2:
6321 case Match_InvalidSVEVectorList2x32Mul2:
6322 case Match_InvalidSVEVectorList2x64Mul2:
6323 case Match_InvalidSVEVectorList2x128Mul2:
6324 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6325 "SVE vectors, where the first vector is a multiple of 2 "
6326 "and with matching element types");
6327 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6328 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6329 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6330 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6331 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6332 "SVE vectors in the range z0-z14, where the first vector "
6333 "is a multiple of 2 "
6334 "and with matching element types");
6335 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6336 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6337 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6338 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6339 return Error(Loc,
6340 "Invalid vector list, expected list with 2 consecutive "
6341 "SVE vectors in the range z16-z30, where the first vector "
6342 "is a multiple of 2 "
6343 "and with matching element types");
6344 case Match_InvalidSVEVectorList4x8Mul4:
6345 case Match_InvalidSVEVectorList4x16Mul4:
6346 case Match_InvalidSVEVectorList4x32Mul4:
6347 case Match_InvalidSVEVectorList4x64Mul4:
6348 case Match_InvalidSVEVectorList4x128Mul4:
6349 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6350 "SVE vectors, where the first vector is a multiple of 4 "
6351 "and with matching element types");
6352 case Match_InvalidLookupTable:
6353 return Error(Loc, "Invalid lookup table, expected zt0");
6354 case Match_InvalidSVEVectorListStrided2x8:
6355 case Match_InvalidSVEVectorListStrided2x16:
6356 case Match_InvalidSVEVectorListStrided2x32:
6357 case Match_InvalidSVEVectorListStrided2x64:
6358 return Error(
6359 Loc,
6360 "Invalid vector list, expected list with each SVE vector in the list "
6361 "8 registers apart, and the first register in the range [z0, z7] or "
6362 "[z16, z23] and with correct element type");
6363 case Match_InvalidSVEVectorListStrided4x8:
6364 case Match_InvalidSVEVectorListStrided4x16:
6365 case Match_InvalidSVEVectorListStrided4x32:
6366 case Match_InvalidSVEVectorListStrided4x64:
6367 return Error(
6368 Loc,
6369 "Invalid vector list, expected list with each SVE vector in the list "
6370 "4 registers apart, and the first register in the range [z0, z3] or "
6371 "[z16, z19] and with correct element type");
6372 case Match_AddSubLSLImm3ShiftLarge:
6373 return Error(Loc,
6374 "expected 'lsl' with optional integer in range [0, 7]");
6375 default:
6376 llvm_unreachable("unexpected error code!");
6377 }
6378}
6379
6380static const char *getSubtargetFeatureName(uint64_t Val);
6381
6382bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6384 MCStreamer &Out,
6386 bool MatchingInlineAsm) {
6387 assert(!Operands.empty() && "Unexpect empty operand list!");
6388 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6389 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6390
6391 StringRef Tok = Op.getToken();
6392 unsigned NumOperands = Operands.size();
6393
6394 if (NumOperands == 4 && Tok == "lsl") {
6395 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6396 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6397 if (Op2.isScalarReg() && Op3.isImm()) {
6398 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6399 if (Op3CE) {
6400 uint64_t Op3Val = Op3CE->getValue();
6401 uint64_t NewOp3Val = 0;
6402 uint64_t NewOp4Val = 0;
6403 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6404 Op2.getReg())) {
6405 NewOp3Val = (32 - Op3Val) & 0x1f;
6406 NewOp4Val = 31 - Op3Val;
6407 } else {
6408 NewOp3Val = (64 - Op3Val) & 0x3f;
6409 NewOp4Val = 63 - Op3Val;
6410 }
6411
6412 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6413 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6414
6415 Operands[0] =
6416 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6417 Operands.push_back(AArch64Operand::CreateImm(
6418 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6419 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6420 Op3.getEndLoc(), getContext());
6421 }
6422 }
6423 } else if (NumOperands == 4 && Tok == "bfc") {
6424 // FIXME: Horrible hack to handle BFC->BFM alias.
6425 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6426 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6427 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6428
6429 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6430 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6431 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6432
6433 if (LSBCE && WidthCE) {
6434 uint64_t LSB = LSBCE->getValue();
6435 uint64_t Width = WidthCE->getValue();
6436
6437 uint64_t RegWidth = 0;
6438 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6439 Op1.getReg()))
6440 RegWidth = 64;
6441 else
6442 RegWidth = 32;
6443
6444 if (LSB >= RegWidth)
6445 return Error(LSBOp.getStartLoc(),
6446 "expected integer in range [0, 31]");
6447 if (Width < 1 || Width > RegWidth)
6448 return Error(WidthOp.getStartLoc(),
6449 "expected integer in range [1, 32]");
6450
6451 uint64_t ImmR = 0;
6452 if (RegWidth == 32)
6453 ImmR = (32 - LSB) & 0x1f;
6454 else
6455 ImmR = (64 - LSB) & 0x3f;
6456
6457 uint64_t ImmS = Width - 1;
6458
6459 if (ImmR != 0 && ImmS >= ImmR)
6460 return Error(WidthOp.getStartLoc(),
6461 "requested insert overflows register");
6462
6463 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6464 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6465 Operands[0] =
6466 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6467 Operands[2] = AArch64Operand::CreateReg(
6468 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6469 SMLoc(), SMLoc(), getContext());
6470 Operands[3] = AArch64Operand::CreateImm(
6471 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6472 Operands.emplace_back(
6473 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6474 WidthOp.getEndLoc(), getContext()));
6475 }
6476 }
6477 } else if (NumOperands == 5) {
6478 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6479 // UBFIZ -> UBFM aliases.
6480 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6481 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6482 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6483 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6484
6485 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6486 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6487 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6488
6489 if (Op3CE && Op4CE) {
6490 uint64_t Op3Val = Op3CE->getValue();
6491 uint64_t Op4Val = Op4CE->getValue();
6492
6493 uint64_t RegWidth = 0;
6494 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6495 Op1.getReg()))
6496 RegWidth = 64;
6497 else
6498 RegWidth = 32;
6499
6500 if (Op3Val >= RegWidth)
6501 return Error(Op3.getStartLoc(),
6502 "expected integer in range [0, 31]");
6503 if (Op4Val < 1 || Op4Val > RegWidth)
6504 return Error(Op4.getStartLoc(),
6505 "expected integer in range [1, 32]");
6506
6507 uint64_t NewOp3Val = 0;
6508 if (RegWidth == 32)
6509 NewOp3Val = (32 - Op3Val) & 0x1f;
6510 else
6511 NewOp3Val = (64 - Op3Val) & 0x3f;
6512
6513 uint64_t NewOp4Val = Op4Val - 1;
6514
6515 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6516 return Error(Op4.getStartLoc(),
6517 "requested insert overflows register");
6518
6519 const MCExpr *NewOp3 =
6520 MCConstantExpr::create(NewOp3Val, getContext());
6521 const MCExpr *NewOp4 =
6522 MCConstantExpr::create(NewOp4Val, getContext());
6523 Operands[3] = AArch64Operand::CreateImm(
6524 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6525 Operands[4] = AArch64Operand::CreateImm(
6526 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6527 if (Tok == "bfi")
6528 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6529 getContext());
6530 else if (Tok == "sbfiz")
6531 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6532 getContext());
6533 else if (Tok == "ubfiz")
6534 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6535 getContext());
6536 else
6537 llvm_unreachable("No valid mnemonic for alias?");
6538 }
6539 }
6540
6541 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6542 // UBFX -> UBFM aliases.
6543 } else if (NumOperands == 5 &&
6544 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6545 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6546 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6547 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6548
6549 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6550 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6551 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6552
6553 if (Op3CE && Op4CE) {
6554 uint64_t Op3Val = Op3CE->getValue();
6555 uint64_t Op4Val = Op4CE->getValue();
6556
6557 uint64_t RegWidth = 0;
6558 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6559 Op1.getReg()))
6560 RegWidth = 64;
6561 else
6562 RegWidth = 32;
6563
6564 if (Op3Val >= RegWidth)
6565 return Error(Op3.getStartLoc(),
6566 "expected integer in range [0, 31]");
6567 if (Op4Val < 1 || Op4Val > RegWidth)
6568 return Error(Op4.getStartLoc(),
6569 "expected integer in range [1, 32]");
6570
6571 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6572
6573 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6574 return Error(Op4.getStartLoc(),
6575 "requested extract overflows register");
6576
6577 const MCExpr *NewOp4 =
6578 MCConstantExpr::create(NewOp4Val, getContext());
6579 Operands[4] = AArch64Operand::CreateImm(
6580 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6581 if (Tok == "bfxil")
6582 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6583 getContext());
6584 else if (Tok == "sbfx")
6585 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6586 getContext());
6587 else if (Tok == "ubfx")
6588 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6589 getContext());
6590 else
6591 llvm_unreachable("No valid mnemonic for alias?");
6592 }
6593 }
6594 }
6595 }
6596
6597 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6598 // instruction for FP registers correctly in some rare circumstances. Convert
6599 // it to a safe instruction and warn (because silently changing someone's
6600 // assembly is rude).
6601 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6602 NumOperands == 4 && Tok == "movi") {
6603 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6604 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6605 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6606 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6607 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6608 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6609 if (Suffix.lower() == ".2d" &&
6610 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6611 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6612 " correctly on this CPU, converting to equivalent movi.16b");
6613 // Switch the suffix to .16b.
6614 unsigned Idx = Op1.isToken() ? 1 : 2;
6615 Operands[Idx] =
6616 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6617 }
6618 }
6619 }
6620
6621 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6622 // InstAlias can't quite handle this since the reg classes aren't
6623 // subclasses.
6624 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6625 // The source register can be Wn here, but the matcher expects a
6626 // GPR64. Twiddle it here if necessary.
6627 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6628 if (Op.isScalarReg()) {
6629 MCRegister Reg = getXRegFromWReg(Op.getReg());
6630 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6631 Op.getStartLoc(), Op.getEndLoc(),
6632 getContext());
6633 }
6634 }
6635 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6636 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6637 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6638 if (Op.isScalarReg() &&
6639 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6640 Op.getReg())) {
6641 // The source register can be Wn here, but the matcher expects a
6642 // GPR64. Twiddle it here if necessary.
6643 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6644 if (Op.isScalarReg()) {
6645 MCRegister Reg = getXRegFromWReg(Op.getReg());
6646 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6647 Op.getStartLoc(),
6648 Op.getEndLoc(), getContext());
6649 }
6650 }
6651 }
6652 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6653 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6654 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6655 if (Op.isScalarReg() &&
6656 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6657 Op.getReg())) {
6658 // The source register can be Wn here, but the matcher expects a
6659 // GPR32. Twiddle it here if necessary.
6660 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6661 if (Op.isScalarReg()) {
6662 MCRegister Reg = getWRegFromXReg(Op.getReg());
6663 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6664 Op.getStartLoc(),
6665 Op.getEndLoc(), getContext());
6666 }
6667 }
6668 }
6669
6670 MCInst Inst;
6671 FeatureBitset MissingFeatures;
6672 // First try to match against the secondary set of tables containing the
6673 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6674 unsigned MatchResult =
6675 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6676 MatchingInlineAsm, 1);
6677
6678 // If that fails, try against the alternate table containing long-form NEON:
6679 // "fadd v0.2s, v1.2s, v2.2s"
6680 if (MatchResult != Match_Success) {
6681 // But first, save the short-form match result: we can use it in case the
6682 // long-form match also fails.
6683 auto ShortFormNEONErrorInfo = ErrorInfo;
6684 auto ShortFormNEONMatchResult = MatchResult;
6685 auto ShortFormNEONMissingFeatures = MissingFeatures;
6686
6687 MatchResult =
6688 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6689 MatchingInlineAsm, 0);
6690
6691 // Now, both matches failed, and the long-form match failed on the mnemonic
6692 // suffix token operand. The short-form match failure is probably more
6693 // relevant: use it instead.
6694 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6695 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6696 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6697 MatchResult = ShortFormNEONMatchResult;
6698 ErrorInfo = ShortFormNEONErrorInfo;
6699 MissingFeatures = ShortFormNEONMissingFeatures;
6700 }
6701 }
6702
6703 switch (MatchResult) {
6704 case Match_Success: {
6705 // Perform range checking and other semantic validations
6706 SmallVector<SMLoc, 8> OperandLocs;
6707 NumOperands = Operands.size();
6708 for (unsigned i = 1; i < NumOperands; ++i)
6709 OperandLocs.push_back(Operands[i]->getStartLoc());
6710 if (validateInstruction(Inst, IDLoc, OperandLocs))
6711 return true;
6712
6713 Inst.setLoc(IDLoc);
6714 Out.emitInstruction(Inst, getSTI());
6715 return false;
6716 }
6717 case Match_MissingFeature: {
6718 assert(MissingFeatures.any() && "Unknown missing feature!");
6719 // Special case the error message for the very common case where only
6720 // a single subtarget feature is missing (neon, e.g.).
6721 std::string Msg = "instruction requires:";
6722 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6723 if (MissingFeatures[i]) {
6724 Msg += " ";
6725 Msg += getSubtargetFeatureName(i);
6726 }
6727 }
6728 return Error(IDLoc, Msg);
6729 }
6730 case Match_MnemonicFail:
6731 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6732 case Match_InvalidOperand: {
6733 SMLoc ErrorLoc = IDLoc;
6734
6735 if (ErrorInfo != ~0ULL) {
6736 if (ErrorInfo >= Operands.size())
6737 return Error(IDLoc, "too few operands for instruction",
6738 SMRange(IDLoc, getTok().getLoc()));
6739
6740 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6741 if (ErrorLoc == SMLoc())
6742 ErrorLoc = IDLoc;
6743 }
6744 // If the match failed on a suffix token operand, tweak the diagnostic
6745 // accordingly.
6746 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6747 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6748 MatchResult = Match_InvalidSuffix;
6749
6750 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6751 }
6752 case Match_InvalidTiedOperand:
6753 case Match_InvalidMemoryIndexed1:
6754 case Match_InvalidMemoryIndexed2:
6755 case Match_InvalidMemoryIndexed4:
6756 case Match_InvalidMemoryIndexed8:
6757 case Match_InvalidMemoryIndexed16:
6758 case Match_InvalidCondCode:
6759 case Match_AddSubLSLImm3ShiftLarge:
6760 case Match_AddSubRegExtendSmall:
6761 case Match_AddSubRegExtendLarge:
6762 case Match_AddSubSecondSource:
6763 case Match_LogicalSecondSource:
6764 case Match_AddSubRegShift32:
6765 case Match_AddSubRegShift64:
6766 case Match_InvalidMovImm32Shift:
6767 case Match_InvalidMovImm64Shift:
6768 case Match_InvalidFPImm:
6769 case Match_InvalidMemoryWExtend8:
6770 case Match_InvalidMemoryWExtend16:
6771 case Match_InvalidMemoryWExtend32:
6772 case Match_InvalidMemoryWExtend64:
6773 case Match_InvalidMemoryWExtend128:
6774 case Match_InvalidMemoryXExtend8:
6775 case Match_InvalidMemoryXExtend16:
6776 case Match_InvalidMemoryXExtend32:
6777 case Match_InvalidMemoryXExtend64:
6778 case Match_InvalidMemoryXExtend128:
6779 case Match_InvalidMemoryIndexed1SImm4:
6780 case Match_InvalidMemoryIndexed2SImm4:
6781 case Match_InvalidMemoryIndexed3SImm4:
6782 case Match_InvalidMemoryIndexed4SImm4:
6783 case Match_InvalidMemoryIndexed1SImm6:
6784 case Match_InvalidMemoryIndexed16SImm4:
6785 case Match_InvalidMemoryIndexed32SImm4:
6786 case Match_InvalidMemoryIndexed4SImm7:
6787 case Match_InvalidMemoryIndexed8SImm7:
6788 case Match_InvalidMemoryIndexed16SImm7:
6789 case Match_InvalidMemoryIndexed8UImm5:
6790 case Match_InvalidMemoryIndexed8UImm3:
6791 case Match_InvalidMemoryIndexed4UImm5:
6792 case Match_InvalidMemoryIndexed2UImm5:
6793 case Match_InvalidMemoryIndexed1UImm6:
6794 case Match_InvalidMemoryIndexed2UImm6:
6795 case Match_InvalidMemoryIndexed4UImm6:
6796 case Match_InvalidMemoryIndexed8UImm6:
6797 case Match_InvalidMemoryIndexed16UImm6:
6798 case Match_InvalidMemoryIndexedSImm6:
6799 case Match_InvalidMemoryIndexedSImm5:
6800 case Match_InvalidMemoryIndexedSImm8:
6801 case Match_InvalidMemoryIndexedSImm9:
6802 case Match_InvalidMemoryIndexed16SImm9:
6803 case Match_InvalidMemoryIndexed8SImm10:
6804 case Match_InvalidImm0_0:
6805 case Match_InvalidImm0_1:
6806 case Match_InvalidImm0_3:
6807 case Match_InvalidImm0_7:
6808 case Match_InvalidImm0_15:
6809 case Match_InvalidImm0_31:
6810 case Match_InvalidImm0_63:
6811 case Match_InvalidImm0_127:
6812 case Match_InvalidImm0_255:
6813 case Match_InvalidImm0_65535:
6814 case Match_InvalidImm1_8:
6815 case Match_InvalidImm1_16:
6816 case Match_InvalidImm1_32:
6817 case Match_InvalidImm1_64:
6818 case Match_InvalidImmM1_62:
6819 case Match_InvalidMemoryIndexedRange2UImm0:
6820 case Match_InvalidMemoryIndexedRange2UImm1:
6821 case Match_InvalidMemoryIndexedRange2UImm2:
6822 case Match_InvalidMemoryIndexedRange2UImm3:
6823 case Match_InvalidMemoryIndexedRange4UImm0:
6824 case Match_InvalidMemoryIndexedRange4UImm1:
6825 case Match_InvalidMemoryIndexedRange4UImm2:
6826 case Match_InvalidSVEAddSubImm8:
6827 case Match_InvalidSVEAddSubImm16:
6828 case Match_InvalidSVEAddSubImm32:
6829 case Match_InvalidSVEAddSubImm64:
6830 case Match_InvalidSVECpyImm8:
6831 case Match_InvalidSVECpyImm16:
6832 case Match_InvalidSVECpyImm32:
6833 case Match_InvalidSVECpyImm64:
6834 case Match_InvalidIndexRange0_0:
6835 case Match_InvalidIndexRange1_1:
6836 case Match_InvalidIndexRange0_15:
6837 case Match_InvalidIndexRange0_7:
6838 case Match_InvalidIndexRange0_3:
6839 case Match_InvalidIndexRange0_1:
6840 case Match_InvalidSVEIndexRange0_63:
6841 case Match_InvalidSVEIndexRange0_31:
6842 case Match_InvalidSVEIndexRange0_15:
6843 case Match_InvalidSVEIndexRange0_7:
6844 case Match_InvalidSVEIndexRange0_3:
6845 case Match_InvalidLabel:
6846 case Match_InvalidComplexRotationEven:
6847 case Match_InvalidComplexRotationOdd:
6848 case Match_InvalidGPR64shifted8:
6849 case Match_InvalidGPR64shifted16:
6850 case Match_InvalidGPR64shifted32:
6851 case Match_InvalidGPR64shifted64:
6852 case Match_InvalidGPR64shifted128:
6853 case Match_InvalidGPR64NoXZRshifted8:
6854 case Match_InvalidGPR64NoXZRshifted16:
6855 case Match_InvalidGPR64NoXZRshifted32:
6856 case Match_InvalidGPR64NoXZRshifted64:
6857 case Match_InvalidGPR64NoXZRshifted128:
6858 case Match_InvalidZPR32UXTW8:
6859 case Match_InvalidZPR32UXTW16:
6860 case Match_InvalidZPR32UXTW32:
6861 case Match_InvalidZPR32UXTW64:
6862 case Match_InvalidZPR32SXTW8:
6863 case Match_InvalidZPR32SXTW16:
6864 case Match_InvalidZPR32SXTW32:
6865 case Match_InvalidZPR32SXTW64:
6866 case Match_InvalidZPR64UXTW8:
6867 case Match_InvalidZPR64SXTW8:
6868 case Match_InvalidZPR64UXTW16:
6869 case Match_InvalidZPR64SXTW16:
6870 case Match_InvalidZPR64UXTW32:
6871 case Match_InvalidZPR64SXTW32:
6872 case Match_InvalidZPR64UXTW64:
6873 case Match_InvalidZPR64SXTW64:
6874 case Match_InvalidZPR32LSL8:
6875 case Match_InvalidZPR32LSL16:
6876 case Match_InvalidZPR32LSL32:
6877 case Match_InvalidZPR32LSL64:
6878 case Match_InvalidZPR64LSL8:
6879 case Match_InvalidZPR64LSL16:
6880 case Match_InvalidZPR64LSL32:
6881 case Match_InvalidZPR64LSL64:
6882 case Match_InvalidZPR0:
6883 case Match_InvalidZPR8:
6884 case Match_InvalidZPR16:
6885 case Match_InvalidZPR32:
6886 case Match_InvalidZPR64:
6887 case Match_InvalidZPR128:
6888 case Match_InvalidZPR_3b8:
6889 case Match_InvalidZPR_3b16:
6890 case Match_InvalidZPR_3b32:
6891 case Match_InvalidZPR_4b8:
6892 case Match_InvalidZPR_4b16:
6893 case Match_InvalidZPR_4b32:
6894 case Match_InvalidZPR_4b64:
6895 case Match_InvalidSVEPPRorPNRAnyReg:
6896 case Match_InvalidSVEPPRorPNRBReg:
6897 case Match_InvalidSVEPredicateAnyReg:
6898 case Match_InvalidSVEPattern:
6899 case Match_InvalidSVEVecLenSpecifier:
6900 case Match_InvalidSVEPredicateBReg:
6901 case Match_InvalidSVEPredicateHReg:
6902 case Match_InvalidSVEPredicateSReg:
6903 case Match_InvalidSVEPredicateDReg:
6904 case Match_InvalidSVEPredicate3bAnyReg:
6905 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6906 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6907 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6908 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6909 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6910 case Match_InvalidSVEPNPredicateBReg:
6911 case Match_InvalidSVEPNPredicateHReg:
6912 case Match_InvalidSVEPNPredicateSReg:
6913 case Match_InvalidSVEPNPredicateDReg:
6914 case Match_InvalidSVEPredicateListMul2x8:
6915 case Match_InvalidSVEPredicateListMul2x16:
6916 case Match_InvalidSVEPredicateListMul2x32:
6917 case Match_InvalidSVEPredicateListMul2x64:
6918 case Match_InvalidSVEExactFPImmOperandHalfOne:
6919 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6920 case Match_InvalidSVEExactFPImmOperandZeroOne:
6921 case Match_InvalidMatrixTile16:
6922 case Match_InvalidMatrixTile32:
6923 case Match_InvalidMatrixTile64:
6924 case Match_InvalidMatrix:
6925 case Match_InvalidMatrix8:
6926 case Match_InvalidMatrix16:
6927 case Match_InvalidMatrix32:
6928 case Match_InvalidMatrix64:
6929 case Match_InvalidMatrixTileVectorH8:
6930 case Match_InvalidMatrixTileVectorH16:
6931 case Match_InvalidMatrixTileVectorH32:
6932 case Match_InvalidMatrixTileVectorH64:
6933 case Match_InvalidMatrixTileVectorH128:
6934 case Match_InvalidMatrixTileVectorV8:
6935 case Match_InvalidMatrixTileVectorV16:
6936 case Match_InvalidMatrixTileVectorV32:
6937 case Match_InvalidMatrixTileVectorV64:
6938 case Match_InvalidMatrixTileVectorV128:
6939 case Match_InvalidSVCR:
6940 case Match_InvalidMatrixIndexGPR32_12_15:
6941 case Match_InvalidMatrixIndexGPR32_8_11:
6942 case Match_InvalidLookupTable:
6943 case Match_InvalidZPRMul2_Lo8:
6944 case Match_InvalidZPRMul2_Hi8:
6945 case Match_InvalidZPRMul2_Lo16:
6946 case Match_InvalidZPRMul2_Hi16:
6947 case Match_InvalidZPRMul2_Lo32:
6948 case Match_InvalidZPRMul2_Hi32:
6949 case Match_InvalidZPRMul2_Lo64:
6950 case Match_InvalidZPRMul2_Hi64:
6951 case Match_InvalidZPR_K0:
6952 case Match_InvalidSVEVectorList2x8Mul2:
6953 case Match_InvalidSVEVectorList2x16Mul2:
6954 case Match_InvalidSVEVectorList2x32Mul2:
6955 case Match_InvalidSVEVectorList2x64Mul2:
6956 case Match_InvalidSVEVectorList2x128Mul2:
6957 case Match_InvalidSVEVectorList4x8Mul4:
6958 case Match_InvalidSVEVectorList4x16Mul4:
6959 case Match_InvalidSVEVectorList4x32Mul4:
6960 case Match_InvalidSVEVectorList4x64Mul4:
6961 case Match_InvalidSVEVectorList4x128Mul4:
6962 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6963 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6964 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6965 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6966 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6967 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6968 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6969 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6970 case Match_InvalidSVEVectorListStrided2x8:
6971 case Match_InvalidSVEVectorListStrided2x16:
6972 case Match_InvalidSVEVectorListStrided2x32:
6973 case Match_InvalidSVEVectorListStrided2x64:
6974 case Match_InvalidSVEVectorListStrided4x8:
6975 case Match_InvalidSVEVectorListStrided4x16:
6976 case Match_InvalidSVEVectorListStrided4x32:
6977 case Match_InvalidSVEVectorListStrided4x64:
6978 case Match_MSR:
6979 case Match_MRS: {
6980 if (ErrorInfo >= Operands.size())
6981 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6982 // Any time we get here, there's nothing fancy to do. Just get the
6983 // operand SMLoc and display the diagnostic.
6984 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6985 if (ErrorLoc == SMLoc())
6986 ErrorLoc = IDLoc;
6987 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6988 }
6989 }
6990
6991 llvm_unreachable("Implement any new match types added!");
6992}
6993
6994/// ParseDirective parses the arm specific directives
6995bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6996 const MCContext::Environment Format = getContext().getObjectFileType();
6997 bool IsMachO = Format == MCContext::IsMachO;
6998 bool IsCOFF = Format == MCContext::IsCOFF;
6999 bool IsELF = Format == MCContext::IsELF;
7000
7001 auto IDVal = DirectiveID.getIdentifier().lower();
7002 SMLoc Loc = DirectiveID.getLoc();
7003 if (IDVal == ".arch")
7004 parseDirectiveArch(Loc);
7005 else if (IDVal == ".cpu")
7006 parseDirectiveCPU(Loc);
7007 else if (IDVal == ".tlsdesccall")
7008 parseDirectiveTLSDescCall(Loc);
7009 else if (IDVal == ".ltorg" || IDVal == ".pool")
7010 parseDirectiveLtorg(Loc);
7011 else if (IDVal == ".unreq")
7012 parseDirectiveUnreq(Loc);
7013 else if (IDVal == ".inst")
7014 parseDirectiveInst(Loc);
7015 else if (IDVal == ".cfi_negate_ra_state")
7016 parseDirectiveCFINegateRAState();
7017 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7018 parseDirectiveCFINegateRAStateWithPC();
7019 else if (IDVal == ".cfi_b_key_frame")
7020 parseDirectiveCFIBKeyFrame();
7021 else if (IDVal == ".cfi_mte_tagged_frame")
7022 parseDirectiveCFIMTETaggedFrame();
7023 else if (IDVal == ".arch_extension")
7024 parseDirectiveArchExtension(Loc);
7025 else if (IDVal == ".variant_pcs")
7026 parseDirectiveVariantPCS(Loc);
7027 else if (IsMachO) {
7028 if (IDVal == MCLOHDirectiveName())
7029 parseDirectiveLOH(IDVal, Loc);
7030 else
7031 return true;
7032 } else if (IsCOFF) {
7033 if (IDVal == ".seh_stackalloc")
7034 parseDirectiveSEHAllocStack(Loc);
7035 else if (IDVal == ".seh_endprologue")
7036 parseDirectiveSEHPrologEnd(Loc);
7037 else if (IDVal == ".seh_save_r19r20_x")
7038 parseDirectiveSEHSaveR19R20X(Loc);
7039 else if (IDVal == ".seh_save_fplr")
7040 parseDirectiveSEHSaveFPLR(Loc);
7041 else if (IDVal == ".seh_save_fplr_x")
7042 parseDirectiveSEHSaveFPLRX(Loc);
7043 else if (IDVal == ".seh_save_reg")
7044 parseDirectiveSEHSaveReg(Loc);
7045 else if (IDVal == ".seh_save_reg_x")
7046 parseDirectiveSEHSaveRegX(Loc);
7047 else if (IDVal == ".seh_save_regp")
7048 parseDirectiveSEHSaveRegP(Loc);
7049 else if (IDVal == ".seh_save_regp_x")
7050 parseDirectiveSEHSaveRegPX(Loc);
7051 else if (IDVal == ".seh_save_lrpair")
7052 parseDirectiveSEHSaveLRPair(Loc);
7053 else if (IDVal == ".seh_save_freg")
7054 parseDirectiveSEHSaveFReg(Loc);
7055 else if (IDVal == ".seh_save_freg_x")
7056 parseDirectiveSEHSaveFRegX(Loc);
7057 else if (IDVal == ".seh_save_fregp")
7058 parseDirectiveSEHSaveFRegP(Loc);
7059 else if (IDVal == ".seh_save_fregp_x")
7060 parseDirectiveSEHSaveFRegPX(Loc);
7061 else if (IDVal == ".seh_set_fp")
7062 parseDirectiveSEHSetFP(Loc);
7063 else if (IDVal == ".seh_add_fp")
7064 parseDirectiveSEHAddFP(Loc);
7065 else if (IDVal == ".seh_nop")
7066 parseDirectiveSEHNop(Loc);
7067 else if (IDVal == ".seh_save_next")
7068 parseDirectiveSEHSaveNext(Loc);
7069 else if (IDVal == ".seh_startepilogue")
7070 parseDirectiveSEHEpilogStart(Loc);
7071 else if (IDVal == ".seh_endepilogue")
7072 parseDirectiveSEHEpilogEnd(Loc);
7073 else if (IDVal == ".seh_trap_frame")
7074 parseDirectiveSEHTrapFrame(Loc);
7075 else if (IDVal == ".seh_pushframe")
7076 parseDirectiveSEHMachineFrame(Loc);
7077 else if (IDVal == ".seh_context")
7078 parseDirectiveSEHContext(Loc);
7079 else if (IDVal == ".seh_ec_context")
7080 parseDirectiveSEHECContext(Loc);
7081 else if (IDVal == ".seh_clear_unwound_to_call")
7082 parseDirectiveSEHClearUnwoundToCall(Loc);
7083 else if (IDVal == ".seh_pac_sign_lr")
7084 parseDirectiveSEHPACSignLR(Loc);
7085 else if (IDVal == ".seh_save_any_reg")
7086 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7087 else if (IDVal == ".seh_save_any_reg_p")
7088 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7089 else if (IDVal == ".seh_save_any_reg_x")
7090 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7091 else if (IDVal == ".seh_save_any_reg_px")
7092 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7093 else
7094 return true;
7095 } else if (IsELF) {
7096 if (IDVal == ".aeabi_subsection")
7097 parseDirectiveAeabiSubSectionHeader(Loc);
7098 else if (IDVal == ".aeabi_attribute")
7099 parseDirectiveAeabiAArch64Attr(Loc);
7100 else
7101 return true;
7102 } else
7103 return true;
7104 return false;
7105}
7106
7107static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7108 SmallVector<StringRef, 4> &RequestedExtensions) {
7109 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7110 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7111
7112 if (!NoCrypto && Crypto) {
7113 // Map 'generic' (and others) to sha2 and aes, because
7114 // that was the traditional meaning of crypto.
7115 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7116 ArchInfo == AArch64::ARMV8_3A) {
7117 RequestedExtensions.push_back("sha2");
7118 RequestedExtensions.push_back("aes");
7119 }
7120 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7121 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7122 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7123 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7124 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7125 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7126 RequestedExtensions.push_back("sm4");
7127 RequestedExtensions.push_back("sha3");
7128 RequestedExtensions.push_back("sha2");
7129 RequestedExtensions.push_back("aes");
7130 }
7131 } else if (NoCrypto) {
7132 // Map 'generic' (and others) to sha2 and aes, because
7133 // that was the traditional meaning of crypto.
7134 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7135 ArchInfo == AArch64::ARMV8_3A) {
7136 RequestedExtensions.push_back("nosha2");
7137 RequestedExtensions.push_back("noaes");
7138 }
7139 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7140 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7141 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7142 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7143 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7144 ArchInfo == AArch64::ARMV9_4A) {
7145 RequestedExtensions.push_back("nosm4");
7146 RequestedExtensions.push_back("nosha3");
7147 RequestedExtensions.push_back("nosha2");
7148 RequestedExtensions.push_back("noaes");
7149 }
7150 }
7151}
7152
7154 return SMLoc::getFromPointer(L.getPointer() + Offset);
7155}
7156
7157/// parseDirectiveArch
7158/// ::= .arch token
7159bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7160 SMLoc CurLoc = getLoc();
7161
7162 StringRef Arch, ExtensionString;
7163 std::tie(Arch, ExtensionString) =
7164 getParser().parseStringToEndOfStatement().trim().split('+');
7165
7166 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7167 if (!ArchInfo)
7168 return Error(CurLoc, "unknown arch name");
7169
7170 if (parseToken(AsmToken::EndOfStatement))
7171 return true;
7172
7173 // Get the architecture and extension features.
7174 std::vector<StringRef> AArch64Features;
7175 AArch64Features.push_back(ArchInfo->ArchFeature);
7176 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7177
7178 MCSubtargetInfo &STI = copySTI();
7179 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7180 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7181 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7182
7183 SmallVector<StringRef, 4> RequestedExtensions;
7184 if (!ExtensionString.empty())
7185 ExtensionString.split(RequestedExtensions, '+');
7186
7187 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7188 CurLoc = incrementLoc(CurLoc, Arch.size());
7189
7190 for (auto Name : RequestedExtensions) {
7191 // Advance source location past '+'.
7192 CurLoc = incrementLoc(CurLoc, 1);
7193
7194 bool EnableFeature = !Name.consume_front_insensitive("no");
7195
7196 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7197 return Extension.Name == Name;
7198 });
7199
7200 if (It == std::end(ExtensionMap))
7201 return Error(CurLoc, "unsupported architectural extension: " + Name);
7202
7203 if (EnableFeature)
7204 STI.SetFeatureBitsTransitively(It->Features);
7205 else
7206 STI.ClearFeatureBitsTransitively(It->Features);
7207 CurLoc = incrementLoc(CurLoc, Name.size());
7208 }
7209 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7210 setAvailableFeatures(Features);
7211 return false;
7212}
7213
7214/// parseDirectiveArchExtension
7215/// ::= .arch_extension [no]feature
7216bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7217 SMLoc ExtLoc = getLoc();
7218
7219 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7220
7221 if (parseEOL())
7222 return true;
7223
7224 bool EnableFeature = true;
7225 if (Name.starts_with_insensitive("no")) {
7226 EnableFeature = false;
7227 Name = Name.substr(2);
7228 }
7229
7230 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7231 return Extension.Name == Name;
7232 });
7233
7234 if (It == std::end(ExtensionMap))
7235 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7236
7237 MCSubtargetInfo &STI = copySTI();
7238 if (EnableFeature)
7239 STI.SetFeatureBitsTransitively(It->Features);
7240 else
7241 STI.ClearFeatureBitsTransitively(It->Features);
7242 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7243 setAvailableFeatures(Features);
7244 return false;
7245}
7246
7247/// parseDirectiveCPU
7248/// ::= .cpu id
7249bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7250 SMLoc CurLoc = getLoc();
7251
7252 StringRef CPU, ExtensionString;
7253 std::tie(CPU, ExtensionString) =
7254 getParser().parseStringToEndOfStatement().trim().split('+');
7255
7256 if (parseToken(AsmToken::EndOfStatement))
7257 return true;
7258
7259 SmallVector<StringRef, 4> RequestedExtensions;
7260 if (!ExtensionString.empty())
7261 ExtensionString.split(RequestedExtensions, '+');
7262
7264 if (!CpuArch) {
7265 Error(CurLoc, "unknown CPU name");
7266 return false;
7267 }
7268 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7269
7270 MCSubtargetInfo &STI = copySTI();
7271 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7272 CurLoc = incrementLoc(CurLoc, CPU.size());
7273
7274 for (auto Name : RequestedExtensions) {
7275 // Advance source location past '+'.
7276 CurLoc = incrementLoc(CurLoc, 1);
7277
7278 bool EnableFeature = !Name.consume_front_insensitive("no");
7279
7280 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7281 return Extension.Name == Name;
7282 });
7283
7284 if (It == std::end(ExtensionMap))
7285 return Error(CurLoc, "unsupported architectural extension: " + Name);
7286
7287 if (EnableFeature)
7288 STI.SetFeatureBitsTransitively(It->Features);
7289 else
7290 STI.ClearFeatureBitsTransitively(It->Features);
7291 CurLoc = incrementLoc(CurLoc, Name.size());
7292 }
7293 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7294 setAvailableFeatures(Features);
7295 return false;
7296}
7297
7298/// parseDirectiveInst
7299/// ::= .inst opcode [, ...]
7300bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7301 if (getLexer().is(AsmToken::EndOfStatement))
7302 return Error(Loc, "expected expression following '.inst' directive");
7303
7304 auto parseOp = [&]() -> bool {
7305 SMLoc L = getLoc();
7306 const MCExpr *Expr = nullptr;
7307 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7308 return true;
7309 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7310 if (check(!Value, L, "expected constant expression"))
7311 return true;
7312 getTargetStreamer().emitInst(Value->getValue());
7313 return false;
7314 };
7315
7316 return parseMany(parseOp);
7317}
7318
7319// parseDirectiveTLSDescCall:
7320// ::= .tlsdesccall symbol
7321bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7323 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7324 parseToken(AsmToken::EndOfStatement))
7325 return true;
7326
7327 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7328 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7329 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7330
7331 MCInst Inst;
7332 Inst.setOpcode(AArch64::TLSDESCCALL);
7334
7335 getParser().getStreamer().emitInstruction(Inst, getSTI());
7336 return false;
7337}
7338
7339/// ::= .loh <lohName | lohId> label1, ..., labelN
7340/// The number of arguments depends on the loh identifier.
7341bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7343 if (getTok().isNot(AsmToken::Identifier)) {
7344 if (getTok().isNot(AsmToken::Integer))
7345 return TokError("expected an identifier or a number in directive");
7346 // We successfully get a numeric value for the identifier.
7347 // Check if it is valid.
7348 int64_t Id = getTok().getIntVal();
7349 if (Id <= -1U && !isValidMCLOHType(Id))
7350 return TokError("invalid numeric identifier in directive");
7351 Kind = (MCLOHType)Id;
7352 } else {
7353 StringRef Name = getTok().getIdentifier();
7354 // We successfully parse an identifier.
7355 // Check if it is a recognized one.
7356 int Id = MCLOHNameToId(Name);
7357
7358 if (Id == -1)
7359 return TokError("invalid identifier in directive");
7360 Kind = (MCLOHType)Id;
7361 }
7362 // Consume the identifier.
7363 Lex();
7364 // Get the number of arguments of this LOH.
7365 int NbArgs = MCLOHIdToNbArgs(Kind);
7366
7367 assert(NbArgs != -1 && "Invalid number of arguments");
7368
7370 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7372 if (getParser().parseIdentifier(Name))
7373 return TokError("expected identifier in directive");
7374 Args.push_back(getContext().getOrCreateSymbol(Name));
7375
7376 if (Idx + 1 == NbArgs)
7377 break;
7378 if (parseComma())
7379 return true;
7380 }
7381 if (parseEOL())
7382 return true;
7383
7384 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7385 return false;
7386}
7387
7388/// parseDirectiveLtorg
7389/// ::= .ltorg | .pool
7390bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7391 if (parseEOL())
7392 return true;
7393 getTargetStreamer().emitCurrentConstantPool();
7394 return false;
7395}
7396
7397/// parseDirectiveReq
7398/// ::= name .req registername
7399bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7400 Lex(); // Eat the '.req' token.
7401 SMLoc SRegLoc = getLoc();
7402 RegKind RegisterKind = RegKind::Scalar;
7403 MCRegister RegNum;
7404 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7405
7406 if (!ParseRes.isSuccess()) {
7408 RegisterKind = RegKind::NeonVector;
7409 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7410
7411 if (ParseRes.isFailure())
7412 return true;
7413
7414 if (ParseRes.isSuccess() && !Kind.empty())
7415 return Error(SRegLoc, "vector register without type specifier expected");
7416 }
7417
7418 if (!ParseRes.isSuccess()) {
7420 RegisterKind = RegKind::SVEDataVector;
7421 ParseRes =
7422 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7423
7424 if (ParseRes.isFailure())
7425 return true;
7426
7427 if (ParseRes.isSuccess() && !Kind.empty())
7428 return Error(SRegLoc,
7429 "sve vector register without type specifier expected");
7430 }
7431
7432 if (!ParseRes.isSuccess()) {
7434 RegisterKind = RegKind::SVEPredicateVector;
7435 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7436
7437 if (ParseRes.isFailure())
7438 return true;
7439
7440 if (ParseRes.isSuccess() && !Kind.empty())
7441 return Error(SRegLoc,
7442 "sve predicate register without type specifier expected");
7443 }
7444
7445 if (!ParseRes.isSuccess())
7446 return Error(SRegLoc, "register name or alias expected");
7447
7448 // Shouldn't be anything else.
7449 if (parseEOL())
7450 return true;
7451
7452 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7453 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7454 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7455
7456 return false;
7457}
7458
7459/// parseDirectiveUneq
7460/// ::= .unreq registername
7461bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7462 if (getTok().isNot(AsmToken::Identifier))
7463 return TokError("unexpected input in .unreq directive.");
7464 RegisterReqs.erase(getTok().getIdentifier().lower());
7465 Lex(); // Eat the identifier.
7466 return parseToken(AsmToken::EndOfStatement);
7467}
7468
7469bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7470 if (parseEOL())
7471 return true;
7472 getStreamer().emitCFINegateRAState();
7473 return false;
7474}
7475
7476bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7477 if (parseEOL())
7478 return true;
7479 getStreamer().emitCFINegateRAStateWithPC();
7480 return false;
7481}
7482
7483/// parseDirectiveCFIBKeyFrame
7484/// ::= .cfi_b_key
7485bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7486 if (parseEOL())
7487 return true;
7488 getStreamer().emitCFIBKeyFrame();
7489 return false;
7490}
7491
7492/// parseDirectiveCFIMTETaggedFrame
7493/// ::= .cfi_mte_tagged_frame
7494bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7495 if (parseEOL())
7496 return true;
7497 getStreamer().emitCFIMTETaggedFrame();
7498 return false;
7499}
7500
7501/// parseDirectiveVariantPCS
7502/// ::= .variant_pcs symbolname
7503bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7505 if (getParser().parseIdentifier(Name))
7506 return TokError("expected symbol name");
7507 if (parseEOL())
7508 return true;
7509 getTargetStreamer().emitDirectiveVariantPCS(
7510 getContext().getOrCreateSymbol(Name));
7511 return false;
7512}
7513
7514/// parseDirectiveSEHAllocStack
7515/// ::= .seh_stackalloc
7516bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7517 int64_t Size;
7518 if (parseImmExpr(Size))
7519 return true;
7520 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7521 return false;
7522}
7523
7524/// parseDirectiveSEHPrologEnd
7525/// ::= .seh_endprologue
7526bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7527 getTargetStreamer().emitARM64WinCFIPrologEnd();
7528 return false;
7529}
7530
7531/// parseDirectiveSEHSaveR19R20X
7532/// ::= .seh_save_r19r20_x
7533bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7534 int64_t Offset;
7535 if (parseImmExpr(Offset))
7536 return true;
7537 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7538 return false;
7539}
7540
7541/// parseDirectiveSEHSaveFPLR
7542/// ::= .seh_save_fplr
7543bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7544 int64_t Offset;
7545 if (parseImmExpr(Offset))
7546 return true;
7547 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7548 return false;
7549}
7550
7551/// parseDirectiveSEHSaveFPLRX
7552/// ::= .seh_save_fplr_x
7553bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7554 int64_t Offset;
7555 if (parseImmExpr(Offset))
7556 return true;
7557 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7558 return false;
7559}
7560
7561/// parseDirectiveSEHSaveReg
7562/// ::= .seh_save_reg
7563bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7564 unsigned Reg;
7565 int64_t Offset;
7566 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7567 parseComma() || parseImmExpr(Offset))
7568 return true;
7569 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7570 return false;
7571}
7572
7573/// parseDirectiveSEHSaveRegX
7574/// ::= .seh_save_reg_x
7575bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7576 unsigned Reg;
7577 int64_t Offset;
7578 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7579 parseComma() || parseImmExpr(Offset))
7580 return true;
7581 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7582 return false;
7583}
7584
7585/// parseDirectiveSEHSaveRegP
7586/// ::= .seh_save_regp
7587bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7588 unsigned Reg;
7589 int64_t Offset;
7590 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7591 parseComma() || parseImmExpr(Offset))
7592 return true;
7593 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7594 return false;
7595}
7596
7597/// parseDirectiveSEHSaveRegPX
7598/// ::= .seh_save_regp_x
7599bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7600 unsigned Reg;
7601 int64_t Offset;
7602 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7603 parseComma() || parseImmExpr(Offset))
7604 return true;
7605 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7606 return false;
7607}
7608
7609/// parseDirectiveSEHSaveLRPair
7610/// ::= .seh_save_lrpair
7611bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7612 unsigned Reg;
7613 int64_t Offset;
7614 L = getLoc();
7615 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7616 parseComma() || parseImmExpr(Offset))
7617 return true;
7618 if (check(((Reg - 19) % 2 != 0), L,
7619 "expected register with even offset from x19"))
7620 return true;
7621 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7622 return false;
7623}
7624
7625/// parseDirectiveSEHSaveFReg
7626/// ::= .seh_save_freg
7627bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7628 unsigned Reg;
7629 int64_t Offset;
7630 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7631 parseComma() || parseImmExpr(Offset))
7632 return true;
7633 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7634 return false;
7635}
7636
7637/// parseDirectiveSEHSaveFRegX
7638/// ::= .seh_save_freg_x
7639bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7640 unsigned Reg;
7641 int64_t Offset;
7642 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7643 parseComma() || parseImmExpr(Offset))
7644 return true;
7645 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7646 return false;
7647}
7648
7649/// parseDirectiveSEHSaveFRegP
7650/// ::= .seh_save_fregp
7651bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7652 unsigned Reg;
7653 int64_t Offset;
7654 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7655 parseComma() || parseImmExpr(Offset))
7656 return true;
7657 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7658 return false;
7659}
7660
7661/// parseDirectiveSEHSaveFRegPX
7662/// ::= .seh_save_fregp_x
7663bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7664 unsigned Reg;
7665 int64_t Offset;
7666 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7667 parseComma() || parseImmExpr(Offset))
7668 return true;
7669 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7670 return false;
7671}
7672
7673/// parseDirectiveSEHSetFP
7674/// ::= .seh_set_fp
7675bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7676 getTargetStreamer().emitARM64WinCFISetFP();
7677 return false;
7678}
7679
7680/// parseDirectiveSEHAddFP
7681/// ::= .seh_add_fp
7682bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7683 int64_t Size;
7684 if (parseImmExpr(Size))
7685 return true;
7686 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7687 return false;
7688}
7689
7690/// parseDirectiveSEHNop
7691/// ::= .seh_nop
7692bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7693 getTargetStreamer().emitARM64WinCFINop();
7694 return false;
7695}
7696
7697/// parseDirectiveSEHSaveNext
7698/// ::= .seh_save_next
7699bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7700 getTargetStreamer().emitARM64WinCFISaveNext();
7701 return false;
7702}
7703
7704/// parseDirectiveSEHEpilogStart
7705/// ::= .seh_startepilogue
7706bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7707 getTargetStreamer().emitARM64WinCFIEpilogStart();
7708 return false;
7709}
7710
7711/// parseDirectiveSEHEpilogEnd
7712/// ::= .seh_endepilogue
7713bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7714 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7715 return false;
7716}
7717
7718/// parseDirectiveSEHTrapFrame
7719/// ::= .seh_trap_frame
7720bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7721 getTargetStreamer().emitARM64WinCFITrapFrame();
7722 return false;
7723}
7724
7725/// parseDirectiveSEHMachineFrame
7726/// ::= .seh_pushframe
7727bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7728 getTargetStreamer().emitARM64WinCFIMachineFrame();
7729 return false;
7730}
7731
7732/// parseDirectiveSEHContext
7733/// ::= .seh_context
7734bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7735 getTargetStreamer().emitARM64WinCFIContext();
7736 return false;
7737}
7738
7739/// parseDirectiveSEHECContext
7740/// ::= .seh_ec_context
7741bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7742 getTargetStreamer().emitARM64WinCFIECContext();
7743 return false;
7744}
7745
7746/// parseDirectiveSEHClearUnwoundToCall
7747/// ::= .seh_clear_unwound_to_call
7748bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7749 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7750 return false;
7751}
7752
7753/// parseDirectiveSEHPACSignLR
7754/// ::= .seh_pac_sign_lr
7755bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7756 getTargetStreamer().emitARM64WinCFIPACSignLR();
7757 return false;
7758}
7759
7760/// parseDirectiveSEHSaveAnyReg
7761/// ::= .seh_save_any_reg
7762/// ::= .seh_save_any_reg_p
7763/// ::= .seh_save_any_reg_x
7764/// ::= .seh_save_any_reg_px
7765bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7766 bool Writeback) {
7768 SMLoc Start, End;
7769 int64_t Offset;
7770 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7771 parseComma() || parseImmExpr(Offset))
7772 return true;
7773
7774 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7775 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7776 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7777 return Error(L, "invalid save_any_reg offset");
7778 unsigned EncodedReg;
7779 if (Reg == AArch64::FP)
7780 EncodedReg = 29;
7781 else if (Reg == AArch64::LR)
7782 EncodedReg = 30;
7783 else
7784 EncodedReg = Reg - AArch64::X0;
7785 if (Paired) {
7786 if (Reg == AArch64::LR)
7787 return Error(Start, "lr cannot be paired with another register");
7788 if (Writeback)
7789 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7790 else
7791 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7792 } else {
7793 if (Writeback)
7794 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7795 else
7796 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7797 }
7798 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7799 unsigned EncodedReg = Reg - AArch64::D0;
7800 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7801 return Error(L, "invalid save_any_reg offset");
7802 if (Paired) {
7803 if (Reg == AArch64::D31)
7804 return Error(Start, "d31 cannot be paired with another register");
7805 if (Writeback)
7806 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7807 else
7808 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7809 } else {
7810 if (Writeback)
7811 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7812 else
7813 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7814 }
7815 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7816 unsigned EncodedReg = Reg - AArch64::Q0;
7817 if (Offset < 0 || Offset % 16)
7818 return Error(L, "invalid save_any_reg offset");
7819 if (Paired) {
7820 if (Reg == AArch64::Q31)
7821 return Error(Start, "q31 cannot be paired with another register");
7822 if (Writeback)
7823 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7824 else
7825 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7826 } else {
7827 if (Writeback)
7828 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7829 else
7830 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7831 }
7832 } else {
7833 return Error(Start, "save_any_reg register must be x, q or d register");
7834 }
7835 return false;
7836}
7837
7838bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
7839 // Expecting 3 AsmToken::Identifier after '.aeabi_subsection', a name and 2
7840 // parameters, e.g.: .aeabi_subsection (1)aeabi_feature_and_bits, (2)optional,
7841 // (3)uleb128 separated by 2 commas.
7842 MCAsmParser &Parser = getParser();
7843
7844 // Consume the name (subsection name)
7845 StringRef SubsectionName;
7846 AArch64BuildAttributes::VendorID SubsectionNameID;
7847 if (Parser.getTok().is(AsmToken::Identifier)) {
7848 SubsectionName = Parser.getTok().getIdentifier();
7849 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
7850 } else {
7851 Error(Parser.getTok().getLoc(), "subsection name not found");
7852 return true;
7853 }
7854 Parser.Lex();
7855 // consume a comma
7856 // parseComma() return *false* on success, and call Lex(), no need to call
7857 // Lex() again.
7858 if (Parser.parseComma()) {
7859 return true;
7860 }
7861
7862 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
7863 getTargetStreamer().getAtributesSubsectionByName(SubsectionName);
7864
7865 // Consume the first parameter (optionality parameter)
7867 // options: optional/required
7868 if (Parser.getTok().is(AsmToken::Identifier)) {
7869 StringRef Optionality = Parser.getTok().getIdentifier();
7870 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
7872 Error(Parser.getTok().getLoc(),
7874 Optionality);
7875 return true;
7876 }
7877 if (SubsectionExists) {
7878 if (IsOptional != SubsectionExists->IsOptional) {
7879 Error(Parser.getTok().getLoc(),
7880 "optionality mismatch! subsection '" + SubsectionName +
7881 "' already exists with optionality defined as '" +
7883 SubsectionExists->IsOptional) +
7884 "' and not '" +
7885 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
7886 return true;
7887 }
7888 }
7889 } else {
7890 Error(Parser.getTok().getLoc(),
7891 "optionality parameter not found, expected required|optional");
7892 return true;
7893 }
7894 // Check for possible IsOptional unaccepted values for known subsections
7895 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
7896 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
7897 Error(Parser.getTok().getLoc(),
7898 "aeabi_feature_and_bits must be marked as optional");
7899 return true;
7900 }
7901 }
7902 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
7903 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
7904 Error(Parser.getTok().getLoc(),
7905 "aeabi_pauthabi must be marked as required");
7906 return true;
7907 }
7908 }
7909 Parser.Lex();
7910 // consume a comma
7911 if (Parser.parseComma()) {
7912 return true;
7913 }
7914
7915 // Consume the second parameter (type parameter)
7917 if (Parser.getTok().is(AsmToken::Identifier)) {
7918 StringRef Name = Parser.getTok().getIdentifier();
7921 Error(Parser.getTok().getLoc(),
7923 Name);
7924 return true;
7925 }
7926 if (SubsectionExists) {
7927 if (Type != SubsectionExists->ParameterType) {
7928 Error(Parser.getTok().getLoc(),
7929 "type mismatch! subsection '" + SubsectionName +
7930 "' already exists with type defined as '" +
7932 SubsectionExists->ParameterType) +
7933 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
7934 "'");
7935 return true;
7936 }
7937 }
7938 } else {
7939 Error(Parser.getTok().getLoc(),
7940 "type parameter not found, expected uleb128|ntbs");
7941 return true;
7942 }
7943 // Check for possible unaccepted 'type' values for known subsections
7944 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
7945 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
7947 Error(Parser.getTok().getLoc(),
7948 SubsectionName + " must be marked as ULEB128");
7949 return true;
7950 }
7951 }
7952 Parser.Lex();
7953 // Parsing finished, check for trailing tokens.
7955 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
7956 "attributes subsection header directive");
7957 return true;
7958 }
7959
7960 getTargetStreamer().emitAtributesSubsection(SubsectionName, IsOptional, Type);
7961
7962 return false;
7963}
7964
7965bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
7966 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
7967 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
7968 // separated by a comma.
7969 MCAsmParser &Parser = getParser();
7970
7971 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
7972 getTargetStreamer().getActiveAtributesSubsection();
7973 if (nullptr == ActiveSubsection) {
7974 Error(Parser.getTok().getLoc(),
7975 "no active subsection, build attribute can not be added");
7976 return true;
7977 }
7978 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
7979 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
7980
7981 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
7983 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
7984 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
7987 ActiveSubsectionName)
7989
7990 StringRef TagStr = "";
7991 unsigned Tag;
7992 if (Parser.getTok().is(AsmToken::Identifier)) {
7993 TagStr = Parser.getTok().getIdentifier();
7994 switch (ActiveSubsectionID) {
7995 default:
7996 assert(0 && "Subsection name error");
7997 break;
7999 // Private subsection, accept any tag.
8000 break;
8004 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8005 TagStr + "' for subsection '" +
8006 ActiveSubsectionName + "'");
8007 return true;
8008 }
8009 break;
8013 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8014 TagStr + "' for subsection '" +
8015 ActiveSubsectionName + "'");
8016 return true;
8017 }
8018 break;
8019 }
8020 } else if (Parser.getTok().is(AsmToken::Integer)) {
8021 Tag = getTok().getIntVal();
8022 } else {
8023 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8024 return true;
8025 }
8026 Parser.Lex();
8027 // consume a comma
8028 // parseComma() return *false* on success, and call Lex(), no need to call
8029 // Lex() again.
8030 if (Parser.parseComma()) {
8031 return true;
8032 }
8033
8034 // Consume the second parameter (attribute value)
8035 unsigned ValueInt = unsigned(-1);
8036 std::string ValueStr = "";
8037 if (Parser.getTok().is(AsmToken::Integer)) {
8038 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8039 Error(
8040 Parser.getTok().getLoc(),
8041 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8042 return true;
8043 }
8044 ValueInt = getTok().getIntVal();
8045 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8046 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8047 Error(
8048 Parser.getTok().getLoc(),
8049 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8050 return true;
8051 }
8052 ValueStr = Parser.getTok().getIdentifier();
8053 } else if (Parser.getTok().is(AsmToken::String)) {
8054 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8055 Error(
8056 Parser.getTok().getLoc(),
8057 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8058 return true;
8059 }
8060 ValueStr = Parser.getTok().getString();
8061 } else {
8062 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8063 return true;
8064 }
8065 // Check for possible unaccepted values for known tags (AEABI_PAUTHABI,
8066 // AEABI_FEATURE_AND_BITS)
8067 if (!(ActiveSubsectionID == AArch64BuildAttributes::VENDOR_UNKNOWN) &&
8068 TagStr != "") { // TagStr was a recognized string
8069 if (0 != ValueInt && 1 != ValueInt) {
8070 Error(Parser.getTok().getLoc(),
8071 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8072 "' options are 0|1");
8073 return true;
8074 }
8075 }
8076 Parser.Lex();
8077 // Parsing finished, check for trailing tokens.
8079 Error(Parser.getTok().getLoc(),
8080 "unexpected token for AArch64 build attributes tag and value "
8081 "attribute directive");
8082 return true;
8083 }
8084
8085 if (unsigned(-1) != ValueInt) {
8086 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "",
8087 false);
8088 }
8089
8090 if ("" != ValueStr) {
8091 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8092 ValueStr, false);
8093 }
8094 return false;
8095}
8096
8097bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8098 // Try @AUTH expressions: they're more complex than the usual symbol variants.
8099 if (!parseAuthExpr(Res, EndLoc))
8100 return false;
8101 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
8102}
8103
8104/// parseAuthExpr
8105/// ::= _sym@AUTH(ib,123[,addr])
8106/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8107/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8108bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8109 MCAsmParser &Parser = getParser();
8110 MCContext &Ctx = getContext();
8111
8112 AsmToken Tok = Parser.getTok();
8113
8114 // Look for '_sym@AUTH' ...
8115 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
8116 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
8117 if (SymName.contains('@'))
8118 return TokError(
8119 "combination of @AUTH with other modifiers not supported");
8120 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
8121
8122 Parser.Lex(); // Eat the identifier.
8123 } else {
8124 // ... or look for a more complex symbol reference, such as ...
8126
8127 // ... '"_long sym"@AUTH' ...
8128 if (Tok.is(AsmToken::String))
8129 Tokens.resize(2);
8130 // ... or '(_sym + 5)@AUTH'.
8131 else if (Tok.is(AsmToken::LParen))
8132 Tokens.resize(6);
8133 else
8134 return true;
8135
8136 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
8137 return true;
8138
8139 // In either case, the expression ends with '@' 'AUTH'.
8140 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
8141 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
8142 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
8143 return true;
8144
8145 if (Tok.is(AsmToken::String)) {
8146 StringRef SymName;
8147 if (Parser.parseIdentifier(SymName))
8148 return true;
8149 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
8150 } else {
8151 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
8152 return true;
8153 }
8154
8155 Parser.Lex(); // '@'
8156 Parser.Lex(); // 'AUTH'
8157 }
8158
8159 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8160 if (parseToken(AsmToken::LParen, "expected '('"))
8161 return true;
8162
8163 if (Parser.getTok().isNot(AsmToken::Identifier))
8164 return TokError("expected key name");
8165
8166 StringRef KeyStr = Parser.getTok().getIdentifier();
8167 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8168 if (!KeyIDOrNone)
8169 return TokError("invalid key '" + KeyStr + "'");
8170 Parser.Lex();
8171
8172 if (parseToken(AsmToken::Comma, "expected ','"))
8173 return true;
8174
8175 if (Parser.getTok().isNot(AsmToken::Integer))
8176 return TokError("expected integer discriminator");
8177 int64_t Discriminator = Parser.getTok().getIntVal();
8178
8179 if (!isUInt<16>(Discriminator))
8180 return TokError("integer discriminator " + Twine(Discriminator) +
8181 " out of range [0, 0xFFFF]");
8182 Parser.Lex();
8183
8184 bool UseAddressDiversity = false;
8185 if (Parser.getTok().is(AsmToken::Comma)) {
8186 Parser.Lex();
8187 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8188 Parser.getTok().getIdentifier() != "addr")
8189 return TokError("expected 'addr'");
8190 UseAddressDiversity = true;
8191 Parser.Lex();
8192 }
8193
8194 EndLoc = Parser.getTok().getEndLoc();
8195 if (parseToken(AsmToken::RParen, "expected ')'"))
8196 return true;
8197
8198 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8199 UseAddressDiversity, Ctx);
8200 return false;
8201}
8202
8203bool
8204AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8205 AArch64MCExpr::VariantKind &ELFRefKind,
8206 MCSymbolRefExpr::VariantKind &DarwinRefKind,
8207 int64_t &Addend) {
8208 ELFRefKind = AArch64MCExpr::VK_INVALID;
8209 DarwinRefKind = MCSymbolRefExpr::VK_None;
8210 Addend = 0;
8211
8212 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
8213 ELFRefKind = AE->getKind();
8214 Expr = AE->getSubExpr();
8215 }
8216
8217 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8218 if (SE) {
8219 // It's a simple symbol reference with no addend.
8220 DarwinRefKind = SE->getKind();
8221 return true;
8222 }
8223
8224 // Check that it looks like a symbol + an addend
8225 MCValue Res;
8226 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
8227 if (!Relocatable || Res.getSymB())
8228 return false;
8229
8230 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
8231 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8232 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
8233 return false;
8234
8235 if (Res.getSymA())
8236 DarwinRefKind = Res.getSymA()->getKind();
8237 Addend = Res.getConstant();
8238
8239 // It's some symbol reference + a constant addend, but really
8240 // shouldn't use both Darwin and ELF syntax.
8241 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
8242 DarwinRefKind == MCSymbolRefExpr::VK_None;
8243}
8244
8245/// Force static initialization.
8252}
8253
8254#define GET_REGISTER_MATCHER
8255#define GET_SUBTARGET_FEATURE_NAME
8256#define GET_MATCHER_IMPLEMENTATION
8257#define GET_MNEMONIC_SPELL_CHECKER
8258#include "AArch64GenAsmMatcher.inc"
8259
8260// Define this matcher function after the auto-generated include so we
8261// have the match class enum definitions.
8262unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8263 unsigned Kind) {
8264 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8265
8266 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8267 if (!Op.isImm())
8268 return Match_InvalidOperand;
8269 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8270 if (!CE)
8271 return Match_InvalidOperand;
8272 if (CE->getValue() == ExpectedVal)
8273 return Match_Success;
8274 return Match_InvalidOperand;
8275 };
8276
8277 switch (Kind) {
8278 default:
8279 return Match_InvalidOperand;
8280 case MCK_MPR:
8281 // If the Kind is a token for the MPR register class which has the "za"
8282 // register (SME accumulator array), check if the asm is a literal "za"
8283 // token. This is for the "smstart za" alias that defines the register
8284 // as a literal token.
8285 if (Op.isTokenEqual("za"))
8286 return Match_Success;
8287 return Match_InvalidOperand;
8288
8289 // If the kind is a token for a literal immediate, check if our asm operand
8290 // matches. This is for InstAliases which have a fixed-value immediate in
8291 // the asm string, such as hints which are parsed into a specific
8292 // instruction definition.
8293#define MATCH_HASH(N) \
8294 case MCK__HASH_##N: \
8295 return MatchesOpImmediate(N);
8296 MATCH_HASH(0)
8297 MATCH_HASH(1)
8298 MATCH_HASH(2)
8299 MATCH_HASH(3)
8300 MATCH_HASH(4)
8301 MATCH_HASH(6)
8302 MATCH_HASH(7)
8303 MATCH_HASH(8)
8304 MATCH_HASH(10)
8305 MATCH_HASH(12)
8306 MATCH_HASH(14)
8307 MATCH_HASH(16)
8308 MATCH_HASH(24)
8309 MATCH_HASH(25)
8310 MATCH_HASH(26)
8311 MATCH_HASH(27)
8312 MATCH_HASH(28)
8313 MATCH_HASH(29)
8314 MATCH_HASH(30)
8315 MATCH_HASH(31)
8316 MATCH_HASH(32)
8317 MATCH_HASH(40)
8318 MATCH_HASH(48)
8319 MATCH_HASH(64)
8320#undef MATCH_HASH
8321#define MATCH_HASH_MINUS(N) \
8322 case MCK__HASH__MINUS_##N: \
8323 return MatchesOpImmediate(-N);
8327#undef MATCH_HASH_MINUS
8328 }
8329}
8330
8331ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8332
8333 SMLoc S = getLoc();
8334
8335 if (getTok().isNot(AsmToken::Identifier))
8336 return Error(S, "expected register");
8337
8338 MCRegister FirstReg;
8339 ParseStatus Res = tryParseScalarRegister(FirstReg);
8340 if (!Res.isSuccess())
8341 return Error(S, "expected first even register of a consecutive same-size "
8342 "even/odd register pair");
8343
8344 const MCRegisterClass &WRegClass =
8345 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8346 const MCRegisterClass &XRegClass =
8347 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8348
8349 bool isXReg = XRegClass.contains(FirstReg),
8350 isWReg = WRegClass.contains(FirstReg);
8351 if (!isXReg && !isWReg)
8352 return Error(S, "expected first even register of a consecutive same-size "
8353 "even/odd register pair");
8354
8355 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8356 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8357
8358 if (FirstEncoding & 0x1)
8359 return Error(S, "expected first even register of a consecutive same-size "
8360 "even/odd register pair");
8361
8362 if (getTok().isNot(AsmToken::Comma))
8363 return Error(getLoc(), "expected comma");
8364 // Eat the comma
8365 Lex();
8366
8367 SMLoc E = getLoc();
8368 MCRegister SecondReg;
8369 Res = tryParseScalarRegister(SecondReg);
8370 if (!Res.isSuccess())
8371 return Error(E, "expected second odd register of a consecutive same-size "
8372 "even/odd register pair");
8373
8374 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8375 (isXReg && !XRegClass.contains(SecondReg)) ||
8376 (isWReg && !WRegClass.contains(SecondReg)))
8377 return Error(E, "expected second odd register of a consecutive same-size "
8378 "even/odd register pair");
8379
8380 MCRegister Pair;
8381 if (isXReg) {
8382 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8383 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8384 } else {
8385 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8386 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8387 }
8388
8389 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8390 getLoc(), getContext()));
8391
8392 return ParseStatus::Success;
8393}
8394
8395template <bool ParseShiftExtend, bool ParseSuffix>
8396ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8397 const SMLoc S = getLoc();
8398 // Check for a SVE vector register specifier first.
8399 MCRegister RegNum;
8401
8402 ParseStatus Res =
8403 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8404
8405 if (!Res.isSuccess())
8406 return Res;
8407
8408 if (ParseSuffix && Kind.empty())
8409 return ParseStatus::NoMatch;
8410
8411 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8412 if (!KindRes)
8413 return ParseStatus::NoMatch;
8414
8415 unsigned ElementWidth = KindRes->second;
8416
8417 // No shift/extend is the default.
8418 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8419 Operands.push_back(AArch64Operand::CreateVectorReg(
8420 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8421
8422 ParseStatus Res = tryParseVectorIndex(Operands);
8423 if (Res.isFailure())
8424 return ParseStatus::Failure;
8425 return ParseStatus::Success;
8426 }
8427
8428 // Eat the comma
8429 Lex();
8430
8431 // Match the shift
8433 Res = tryParseOptionalShiftExtend(ExtOpnd);
8434 if (!Res.isSuccess())
8435 return Res;
8436
8437 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8438 Operands.push_back(AArch64Operand::CreateVectorReg(
8439 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8440 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8441 Ext->hasShiftExtendAmount()));
8442
8443 return ParseStatus::Success;
8444}
8445
8446ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8447 MCAsmParser &Parser = getParser();
8448
8449 SMLoc SS = getLoc();
8450 const AsmToken &TokE = getTok();
8451 bool IsHash = TokE.is(AsmToken::Hash);
8452
8453 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8454 return ParseStatus::NoMatch;
8455
8456 int64_t Pattern;
8457 if (IsHash) {
8458 Lex(); // Eat hash
8459
8460 // Parse the immediate operand.
8461 const MCExpr *ImmVal;
8462 SS = getLoc();
8463 if (Parser.parseExpression(ImmVal))
8464 return ParseStatus::Failure;
8465
8466 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8467 if (!MCE)
8468 return TokError("invalid operand for instruction");
8469
8470 Pattern = MCE->getValue();
8471 } else {
8472 // Parse the pattern
8473 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8474 if (!Pat)
8475 return ParseStatus::NoMatch;
8476
8477 Lex();
8478 Pattern = Pat->Encoding;
8479 assert(Pattern >= 0 && Pattern < 32);
8480 }
8481
8482 Operands.push_back(
8483 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8484 SS, getLoc(), getContext()));
8485
8486 return ParseStatus::Success;
8487}
8488
8490AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8491 int64_t Pattern;
8492 SMLoc SS = getLoc();
8493 const AsmToken &TokE = getTok();
8494 // Parse the pattern
8495 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8496 TokE.getString());
8497 if (!Pat)
8498 return ParseStatus::NoMatch;
8499
8500 Lex();
8501 Pattern = Pat->Encoding;
8502 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8503
8504 Operands.push_back(
8505 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8506 SS, getLoc(), getContext()));
8507
8508 return ParseStatus::Success;
8509}
8510
8511ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8512 SMLoc SS = getLoc();
8513
8514 MCRegister XReg;
8515 if (!tryParseScalarRegister(XReg).isSuccess())
8516 return ParseStatus::NoMatch;
8517
8518 MCContext &ctx = getContext();
8519 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8520 MCRegister X8Reg = RI->getMatchingSuperReg(
8521 XReg, AArch64::x8sub_0,
8522 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8523 if (!X8Reg)
8524 return Error(SS,
8525 "expected an even-numbered x-register in the range [x0,x22]");
8526
8527 Operands.push_back(
8528 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8529 return ParseStatus::Success;
8530}
8531
8532ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8533 SMLoc S = getLoc();
8534
8535 if (getTok().isNot(AsmToken::Integer))
8536 return ParseStatus::NoMatch;
8537
8538 if (getLexer().peekTok().isNot(AsmToken::Colon))
8539 return ParseStatus::NoMatch;
8540
8541 const MCExpr *ImmF;
8542 if (getParser().parseExpression(ImmF))
8543 return ParseStatus::NoMatch;
8544
8545 if (getTok().isNot(AsmToken::Colon))
8546 return ParseStatus::NoMatch;
8547
8548 Lex(); // Eat ':'
8549 if (getTok().isNot(AsmToken::Integer))
8550 return ParseStatus::NoMatch;
8551
8552 SMLoc E = getTok().getLoc();
8553 const MCExpr *ImmL;
8554 if (getParser().parseExpression(ImmL))
8555 return ParseStatus::NoMatch;
8556
8557 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8558 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8559
8560 Operands.push_back(
8561 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8562 return ParseStatus::Success;
8563}
8564
8565template <int Adj>
8566ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8567 SMLoc S = getLoc();
8568
8569 parseOptionalToken(AsmToken::Hash);
8570 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8571
8572 if (getTok().isNot(AsmToken::Integer))
8573 return ParseStatus::NoMatch;
8574
8575 const MCExpr *Ex;
8576 if (getParser().parseExpression(Ex))
8577 return ParseStatus::NoMatch;
8578
8579 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8580 if (IsNegative)
8581 Imm = -Imm;
8582
8583 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8584 // return a value, which is certain to trigger a error message about invalid
8585 // immediate range instead of a non-descriptive invalid operand error.
8586 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8587 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8588 Imm = -2;
8589 else
8590 Imm += Adj;
8591
8592 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8593 Operands.push_back(AArch64Operand::CreateImm(
8594 MCConstantExpr::create(Imm, getContext()), S, E, getContext()));
8595
8596 return ParseStatus::Success;
8597}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:128
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live value
@ Default
Definition: DwarfDebug.cpp:87
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static LVOptions Options
Definition: LVOptions.cpp:25
Live Register Matrix
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
APInt bitcastToAPInt() const
Definition: APFloat.h:1351
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:435
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:432
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1542
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:355
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr size_t size() const
void UnLex(AsmToken const &Token)
Definition: MCAsmLexer.h:93
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual size_t peekTokens(MutableArrayRef< AsmToken > Buf, bool ShouldSkipSpace=true)=0
Look ahead an arbitrary number of tokens.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc, AsmTypeInfo *TypeInfo)=0
Parse a primary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual MCAsmLexer & getLexer()=0
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:222
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
Definition: MCContext.cpp:212
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
Definition: MCExpr.cpp:819
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
unsigned getNumOperands() const
Definition: MCInst.h:209
void setLoc(SMLoc loc)
Definition: MCInst.h:204
unsigned getOpcode() const
Definition: MCInst.h:199
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:163
int64_t getImm() const
Definition: MCInst.h:81
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:142
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Streaming machine code generation interface.
Definition: MCStreamer.h:213
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:309
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:398
VariantKind getKind() const
Definition: MCExpr.h:413
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc)
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
void setAvailableFeatures(const FeatureBitset &Value)
const MCSubtargetInfo & getSTI() const
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
Target specific streamer interface.
Definition: MCStreamer.h:94
This represents an "assembler immediate".
Definition: MCValue.h:36
int64_t getConstant() const
Definition: MCValue.h:43
const MCSymbolRefExpr * getSymB() const
Definition: MCValue.h:45
const MCSymbolRefExpr * getSymA() const
Definition: MCValue.h:44
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void resize(size_type N)
Definition: SmallVector.h:638
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:220
iterator find(StringRef Key)
Definition: StringMap.h:233
void erase(iterator I)
Definition: StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:308
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:147
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:609
std::string upper() const
Convert the given ASCII string to uppercase.
Definition: StringRef.cpp:118
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:144
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition: StringRef.h:589
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:815
std::string lower() const
Definition: StringRef.cpp:113
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
StringRef drop_back(size_t N=1) const
Return a StringRef equal to 'this' but with the last N elements dropped.
Definition: StringRef.h:616
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:176
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:412
bool isWindowsArm64EC() const
Definition: Triple.h:671
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define INT64_MIN
Definition: DataTypes.h:74
#define INT64_MAX
Definition: DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const ArchInfo * parseArch(StringRef Arch)
const ArchInfo * getArchForCpu(StringRef CPU)
bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition: COFF.h:844
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1610
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
@ SS
Definition: X86.h:212
Reg
All possible values of the reg field in the ModR/M byte.
constexpr double e
Definition: MathExtras.h:47
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition: Error.h:1099
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
static int MCLOHNameToId(StringRef Name)
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:170
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:341
Target & getTheAArch64_32Target()
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
Target & getTheARM64Target()
DWARFExpression::Operation Op
static MCRegister getWRegFromXReg(MCRegister Reg)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
#define N
const FeatureBitset Features
const char * Name
A record for a potential prefetch made during the initial scan of the loop.
AArch64::ExtensionBitset DefaultExts
Description of the encoding of one expression Op.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired