Line data Source code
1 : //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 :
10 : #include "MCTargetDesc/AArch64AddressingModes.h"
11 : #include "MCTargetDesc/AArch64MCExpr.h"
12 : #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 : #include "MCTargetDesc/AArch64TargetStreamer.h"
14 : #include "AArch64InstrInfo.h"
15 : #include "Utils/AArch64BaseInfo.h"
16 : #include "llvm/ADT/APFloat.h"
17 : #include "llvm/ADT/APInt.h"
18 : #include "llvm/ADT/ArrayRef.h"
19 : #include "llvm/ADT/STLExtras.h"
20 : #include "llvm/ADT/SmallVector.h"
21 : #include "llvm/ADT/StringExtras.h"
22 : #include "llvm/ADT/StringMap.h"
23 : #include "llvm/ADT/StringRef.h"
24 : #include "llvm/ADT/StringSwitch.h"
25 : #include "llvm/ADT/Twine.h"
26 : #include "llvm/MC/MCContext.h"
27 : #include "llvm/MC/MCExpr.h"
28 : #include "llvm/MC/MCInst.h"
29 : #include "llvm/MC/MCLinkerOptimizationHint.h"
30 : #include "llvm/MC/MCObjectFileInfo.h"
31 : #include "llvm/MC/MCParser/MCAsmLexer.h"
32 : #include "llvm/MC/MCParser/MCAsmParser.h"
33 : #include "llvm/MC/MCParser/MCAsmParserExtension.h"
34 : #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35 : #include "llvm/MC/MCParser/MCTargetAsmParser.h"
36 : #include "llvm/MC/MCRegisterInfo.h"
37 : #include "llvm/MC/MCStreamer.h"
38 : #include "llvm/MC/MCSubtargetInfo.h"
39 : #include "llvm/MC/MCSymbol.h"
40 : #include "llvm/MC/MCTargetOptions.h"
41 : #include "llvm/MC/SubtargetFeature.h"
42 : #include "llvm/MC/MCValue.h"
43 : #include "llvm/Support/Casting.h"
44 : #include "llvm/Support/Compiler.h"
45 : #include "llvm/Support/ErrorHandling.h"
46 : #include "llvm/Support/MathExtras.h"
47 : #include "llvm/Support/SMLoc.h"
48 : #include "llvm/Support/TargetParser.h"
49 : #include "llvm/Support/TargetRegistry.h"
50 : #include "llvm/Support/raw_ostream.h"
51 : #include <cassert>
52 : #include <cctype>
53 : #include <cstdint>
54 : #include <cstdio>
55 : #include <string>
56 : #include <tuple>
57 : #include <utility>
58 : #include <vector>
59 :
60 : using namespace llvm;
61 :
62 : namespace {
63 :
64 : enum class RegKind {
65 : Scalar,
66 : NeonVector,
67 : SVEDataVector,
68 : SVEPredicateVector
69 : };
70 :
71 : enum RegConstraintEqualityTy {
72 : EqualsReg,
73 : EqualsSuperReg,
74 : EqualsSubReg
75 : };
76 :
77 : class AArch64AsmParser : public MCTargetAsmParser {
78 : private:
79 : StringRef Mnemonic; ///< Instruction mnemonic.
80 :
81 : // Map of register aliases registers via the .req directive.
82 : StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83 :
84 : class PrefixInfo {
85 : public:
86 29053 : static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87 : PrefixInfo Prefix;
88 29053 : switch (Inst.getOpcode()) {
89 776 : case AArch64::MOVPRFX_ZZ:
90 : Prefix.Active = true;
91 776 : Prefix.Dst = Inst.getOperand(0).getReg();
92 776 : break;
93 9 : case AArch64::MOVPRFX_ZPmZ_B:
94 : case AArch64::MOVPRFX_ZPmZ_H:
95 : case AArch64::MOVPRFX_ZPmZ_S:
96 : case AArch64::MOVPRFX_ZPmZ_D:
97 : Prefix.Active = true;
98 : Prefix.Predicated = true;
99 9 : Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100 : assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101 : "No destructive element size set for movprfx");
102 9 : Prefix.Dst = Inst.getOperand(0).getReg();
103 9 : Prefix.Pg = Inst.getOperand(2).getReg();
104 9 : break;
105 600 : case AArch64::MOVPRFX_ZPzZ_B:
106 : case AArch64::MOVPRFX_ZPzZ_H:
107 : case AArch64::MOVPRFX_ZPzZ_S:
108 : case AArch64::MOVPRFX_ZPzZ_D:
109 : Prefix.Active = true;
110 : Prefix.Predicated = true;
111 600 : Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112 : assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113 : "No destructive element size set for movprfx");
114 600 : Prefix.Dst = Inst.getOperand(0).getReg();
115 600 : Prefix.Pg = Inst.getOperand(1).getReg();
116 600 : break;
117 : default:
118 : break;
119 : }
120 :
121 29053 : return Prefix;
122 : }
123 :
124 2167 : PrefixInfo() : Active(false), Predicated(false) {}
125 0 : bool isActive() const { return Active; }
126 0 : bool isPredicated() const { return Predicated; }
127 0 : unsigned getElementSize() const {
128 : assert(Predicated);
129 0 : return ElementSize;
130 : }
131 0 : unsigned getDstReg() const { return Dst; }
132 0 : unsigned getPgReg() const {
133 : assert(Predicated);
134 0 : return Pg;
135 : }
136 :
137 : private:
138 : bool Active;
139 : bool Predicated;
140 : unsigned ElementSize;
141 : unsigned Dst;
142 : unsigned Pg;
143 : } NextPrefix;
144 :
145 : AArch64TargetStreamer &getTargetStreamer() {
146 32 : MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147 : return static_cast<AArch64TargetStreamer &>(TS);
148 : }
149 :
150 607870 : SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 :
152 : bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153 : void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154 : AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155 : bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156 : unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157 : bool parseRegister(OperandVector &Operands);
158 : bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159 : bool parseNeonVectorList(OperandVector &Operands);
160 : bool parseOptionalMulOperand(OperandVector &Operands);
161 : bool parseOperand(OperandVector &Operands, bool isCondCode,
162 : bool invertCondCode);
163 :
164 : bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165 : OperandVector &Operands);
166 :
167 : bool parseDirectiveArch(SMLoc L);
168 : bool parseDirectiveCPU(SMLoc L);
169 : bool parseDirectiveInst(SMLoc L);
170 :
171 : bool parseDirectiveTLSDescCall(SMLoc L);
172 :
173 : bool parseDirectiveLOH(StringRef LOH, SMLoc L);
174 : bool parseDirectiveLtorg(SMLoc L);
175 :
176 : bool parseDirectiveReq(StringRef Name, SMLoc L);
177 : bool parseDirectiveUnreq(SMLoc L);
178 :
179 : bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
180 : SmallVectorImpl<SMLoc> &Loc);
181 : bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
182 : OperandVector &Operands, MCStreamer &Out,
183 : uint64_t &ErrorInfo,
184 : bool MatchingInlineAsm) override;
185 : /// @name Auto-generated Match Functions
186 : /// {
187 :
188 : #define GET_ASSEMBLER_HEADER
189 : #include "AArch64GenAsmMatcher.inc"
190 :
191 : /// }
192 :
193 : OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
194 : OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
195 : RegKind MatchKind);
196 : OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
197 : OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
198 : OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
199 : OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
200 : OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
201 : template <bool IsSVEPrefetch = false>
202 : OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
203 : OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
204 : OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
205 : OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
206 : OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
207 : template<bool AddFPZeroAsLiteral>
208 : OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
209 : OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
210 : OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
211 : bool tryParseNeonVectorRegister(OperandVector &Operands);
212 : OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
213 : OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
214 : template <bool ParseShiftExtend,
215 : RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
216 : OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
217 : template <bool ParseShiftExtend, bool ParseSuffix>
218 : OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
219 : OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
220 : template <RegKind VectorKind>
221 : OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
222 : bool ExpectMatch = false);
223 : OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
224 :
225 : public:
226 : enum AArch64MatchResultTy {
227 : Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
228 : #define GET_OPERAND_DIAGNOSTIC_TYPES
229 : #include "AArch64GenAsmMatcher.inc"
230 : };
231 : bool IsILP32;
232 :
233 2167 : AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
234 : const MCInstrInfo &MII, const MCTargetOptions &Options)
235 2167 : : MCTargetAsmParser(Options, STI, MII) {
236 2167 : IsILP32 = Options.getABIName() == "ilp32";
237 2167 : MCAsmParserExtension::Initialize(Parser);
238 2167 : MCStreamer &S = getParser().getStreamer();
239 2167 : if (S.getTargetStreamer() == nullptr)
240 34 : new AArch64TargetStreamer(S);
241 :
242 : // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
243 : // directives as they have the same form and semantics:
244 : /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
245 4334 : Parser.addAliasForDirective(".hword", ".2byte");
246 4334 : Parser.addAliasForDirective(".word", ".4byte");
247 4334 : Parser.addAliasForDirective(".xword", ".8byte");
248 :
249 : // Initialize the set of available features.
250 2167 : setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
251 2167 : }
252 :
253 : bool regsEqual(const MCParsedAsmOperand &Op1,
254 : const MCParsedAsmOperand &Op2) const override;
255 : bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
256 : SMLoc NameLoc, OperandVector &Operands) override;
257 : bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
258 : bool ParseDirective(AsmToken DirectiveID) override;
259 : unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
260 : unsigned Kind) override;
261 :
262 : static bool classifySymbolRef(const MCExpr *Expr,
263 : AArch64MCExpr::VariantKind &ELFRefKind,
264 : MCSymbolRefExpr::VariantKind &DarwinRefKind,
265 : int64_t &Addend);
266 : };
267 :
268 : /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
269 : /// instruction.
270 8 : class AArch64Operand : public MCParsedAsmOperand {
271 : private:
272 : enum KindTy {
273 : k_Immediate,
274 : k_ShiftedImm,
275 : k_CondCode,
276 : k_Register,
277 : k_VectorList,
278 : k_VectorIndex,
279 : k_Token,
280 : k_SysReg,
281 : k_SysCR,
282 : k_Prefetch,
283 : k_ShiftExtend,
284 : k_FPImm,
285 : k_Barrier,
286 : k_PSBHint,
287 : k_BTIHint,
288 : } Kind;
289 :
290 : SMLoc StartLoc, EndLoc;
291 :
292 : struct TokOp {
293 : const char *Data;
294 : unsigned Length;
295 : bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
296 : };
297 :
298 : // Separate shift/extend operand.
299 : struct ShiftExtendOp {
300 : AArch64_AM::ShiftExtendType Type;
301 : unsigned Amount;
302 : bool HasExplicitAmount;
303 : };
304 :
305 : struct RegOp {
306 : unsigned RegNum;
307 : RegKind Kind;
308 : int ElementWidth;
309 :
310 : // The register may be allowed as a different register class,
311 : // e.g. for GPR64as32 or GPR32as64.
312 : RegConstraintEqualityTy EqualityTy;
313 :
314 : // In some cases the shift/extend needs to be explicitly parsed together
315 : // with the register, rather than as a separate operand. This is needed
316 : // for addressing modes where the instruction as a whole dictates the
317 : // scaling/extend, rather than specific bits in the instruction.
318 : // By parsing them as a single operand, we avoid the need to pass an
319 : // extra operand in all CodeGen patterns (because all operands need to
320 : // have an associated value), and we avoid the need to update TableGen to
321 : // accept operands that have no associated bits in the instruction.
322 : //
323 : // An added benefit of parsing them together is that the assembler
324 : // can give a sensible diagnostic if the scaling is not correct.
325 : //
326 : // The default is 'lsl #0' (HasExplicitAmount = false) if no
327 : // ShiftExtend is specified.
328 : ShiftExtendOp ShiftExtend;
329 : };
330 :
331 : struct VectorListOp {
332 : unsigned RegNum;
333 : unsigned Count;
334 : unsigned NumElements;
335 : unsigned ElementWidth;
336 : RegKind RegisterKind;
337 : };
338 :
339 : struct VectorIndexOp {
340 : unsigned Val;
341 : };
342 :
343 : struct ImmOp {
344 : const MCExpr *Val;
345 : };
346 :
347 : struct ShiftedImmOp {
348 : const MCExpr *Val;
349 : unsigned ShiftAmount;
350 : };
351 :
352 : struct CondCodeOp {
353 : AArch64CC::CondCode Code;
354 : };
355 :
356 : struct FPImmOp {
357 : uint64_t Val; // APFloat value bitcasted to uint64_t.
358 : bool IsExact; // describes whether parsed value was exact.
359 : };
360 :
361 : struct BarrierOp {
362 : const char *Data;
363 : unsigned Length;
364 : unsigned Val; // Not the enum since not all values have names.
365 : };
366 :
367 : struct SysRegOp {
368 : const char *Data;
369 : unsigned Length;
370 : uint32_t MRSReg;
371 : uint32_t MSRReg;
372 : uint32_t PStateField;
373 : };
374 :
375 : struct SysCRImmOp {
376 : unsigned Val;
377 : };
378 :
379 : struct PrefetchOp {
380 : const char *Data;
381 : unsigned Length;
382 : unsigned Val;
383 : };
384 :
385 : struct PSBHintOp {
386 : const char *Data;
387 : unsigned Length;
388 : unsigned Val;
389 : };
390 :
391 : struct BTIHintOp {
392 : const char *Data;
393 : unsigned Length;
394 : unsigned Val;
395 : };
396 :
397 : struct ExtendOp {
398 : unsigned Val;
399 : };
400 :
401 : union {
402 : struct TokOp Tok;
403 : struct RegOp Reg;
404 : struct VectorListOp VectorList;
405 : struct VectorIndexOp VectorIndex;
406 : struct ImmOp Imm;
407 : struct ShiftedImmOp ShiftedImm;
408 : struct CondCodeOp CondCode;
409 : struct FPImmOp FPImm;
410 : struct BarrierOp Barrier;
411 : struct SysRegOp SysReg;
412 : struct SysCRImmOp SysCRImm;
413 : struct PrefetchOp Prefetch;
414 : struct PSBHintOp PSBHint;
415 : struct BTIHintOp BTIHint;
416 : struct ShiftExtendOp ShiftExtend;
417 : };
418 :
419 : // Keep the MCContext around as the MCExprs may need manipulated during
420 : // the add<>Operands() calls.
421 : MCContext &Ctx;
422 :
423 : public:
424 0 : AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
425 :
426 32 : AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
427 16 : Kind = o.Kind;
428 16 : StartLoc = o.StartLoc;
429 16 : EndLoc = o.EndLoc;
430 16 : switch (Kind) {
431 0 : case k_Token:
432 0 : Tok = o.Tok;
433 0 : break;
434 16 : case k_Immediate:
435 16 : Imm = o.Imm;
436 16 : break;
437 0 : case k_ShiftedImm:
438 0 : ShiftedImm = o.ShiftedImm;
439 0 : break;
440 0 : case k_CondCode:
441 0 : CondCode = o.CondCode;
442 0 : break;
443 0 : case k_FPImm:
444 0 : FPImm = o.FPImm;
445 0 : break;
446 0 : case k_Barrier:
447 0 : Barrier = o.Barrier;
448 0 : break;
449 0 : case k_Register:
450 0 : Reg = o.Reg;
451 0 : break;
452 0 : case k_VectorList:
453 0 : VectorList = o.VectorList;
454 0 : break;
455 0 : case k_VectorIndex:
456 0 : VectorIndex = o.VectorIndex;
457 0 : break;
458 0 : case k_SysReg:
459 0 : SysReg = o.SysReg;
460 0 : break;
461 0 : case k_SysCR:
462 0 : SysCRImm = o.SysCRImm;
463 0 : break;
464 0 : case k_Prefetch:
465 0 : Prefetch = o.Prefetch;
466 0 : break;
467 0 : case k_PSBHint:
468 0 : PSBHint = o.PSBHint;
469 0 : break;
470 0 : case k_BTIHint:
471 0 : BTIHint = o.BTIHint;
472 0 : break;
473 0 : case k_ShiftExtend:
474 0 : ShiftExtend = o.ShiftExtend;
475 0 : break;
476 : }
477 16 : }
478 :
479 : /// getStartLoc - Get the location of the first token of this operand.
480 32 : SMLoc getStartLoc() const override { return StartLoc; }
481 : /// getEndLoc - Get the location of the last token of this operand.
482 1356 : SMLoc getEndLoc() const override { return EndLoc; }
483 :
484 0 : StringRef getToken() const {
485 : assert(Kind == k_Token && "Invalid access!");
486 229993 : return StringRef(Tok.Data, Tok.Length);
487 : }
488 :
489 0 : bool isTokenSuffix() const {
490 : assert(Kind == k_Token && "Invalid access!");
491 0 : return Tok.IsSuffix;
492 : }
493 :
494 0 : const MCExpr *getImm() const {
495 : assert(Kind == k_Immediate && "Invalid access!");
496 0 : return Imm.Val;
497 : }
498 :
499 0 : const MCExpr *getShiftedImmVal() const {
500 : assert(Kind == k_ShiftedImm && "Invalid access!");
501 0 : return ShiftedImm.Val;
502 : }
503 :
504 0 : unsigned getShiftedImmShift() const {
505 : assert(Kind == k_ShiftedImm && "Invalid access!");
506 0 : return ShiftedImm.ShiftAmount;
507 : }
508 :
509 0 : AArch64CC::CondCode getCondCode() const {
510 : assert(Kind == k_CondCode && "Invalid access!");
511 0 : return CondCode.Code;
512 : }
513 :
514 0 : APFloat getFPImm() const {
515 : assert (Kind == k_FPImm && "Invalid access!");
516 0 : return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
517 : }
518 :
519 0 : bool getFPImmIsExact() const {
520 : assert (Kind == k_FPImm && "Invalid access!");
521 0 : return FPImm.IsExact;
522 : }
523 :
524 0 : unsigned getBarrier() const {
525 : assert(Kind == k_Barrier && "Invalid access!");
526 0 : return Barrier.Val;
527 : }
528 :
529 0 : StringRef getBarrierName() const {
530 : assert(Kind == k_Barrier && "Invalid access!");
531 0 : return StringRef(Barrier.Data, Barrier.Length);
532 : }
533 :
534 4518 : unsigned getReg() const override {
535 : assert(Kind == k_Register && "Invalid access!");
536 423659 : return Reg.RegNum;
537 : }
538 :
539 0 : RegConstraintEqualityTy getRegEqualityTy() const {
540 : assert(Kind == k_Register && "Invalid access!");
541 0 : return Reg.EqualityTy;
542 : }
543 :
544 0 : unsigned getVectorListStart() const {
545 : assert(Kind == k_VectorList && "Invalid access!");
546 0 : return VectorList.RegNum;
547 : }
548 :
549 0 : unsigned getVectorListCount() const {
550 : assert(Kind == k_VectorList && "Invalid access!");
551 0 : return VectorList.Count;
552 : }
553 :
554 0 : unsigned getVectorIndex() const {
555 : assert(Kind == k_VectorIndex && "Invalid access!");
556 0 : return VectorIndex.Val;
557 : }
558 :
559 0 : StringRef getSysReg() const {
560 : assert(Kind == k_SysReg && "Invalid access!");
561 0 : return StringRef(SysReg.Data, SysReg.Length);
562 : }
563 :
564 0 : unsigned getSysCR() const {
565 : assert(Kind == k_SysCR && "Invalid access!");
566 0 : return SysCRImm.Val;
567 : }
568 :
569 0 : unsigned getPrefetch() const {
570 : assert(Kind == k_Prefetch && "Invalid access!");
571 0 : return Prefetch.Val;
572 : }
573 :
574 0 : unsigned getPSBHint() const {
575 : assert(Kind == k_PSBHint && "Invalid access!");
576 0 : return PSBHint.Val;
577 : }
578 :
579 0 : StringRef getPSBHintName() const {
580 : assert(Kind == k_PSBHint && "Invalid access!");
581 0 : return StringRef(PSBHint.Data, PSBHint.Length);
582 : }
583 :
584 0 : unsigned getBTIHint() const {
585 : assert(Kind == k_BTIHint && "Invalid access!");
586 0 : return BTIHint.Val;
587 : }
588 :
589 0 : StringRef getBTIHintName() const {
590 : assert(Kind == k_BTIHint && "Invalid access!");
591 0 : return StringRef(BTIHint.Data, BTIHint.Length);
592 : }
593 :
594 0 : StringRef getPrefetchName() const {
595 : assert(Kind == k_Prefetch && "Invalid access!");
596 0 : return StringRef(Prefetch.Data, Prefetch.Length);
597 : }
598 :
599 : AArch64_AM::ShiftExtendType getShiftExtendType() const {
600 3411 : if (Kind == k_ShiftExtend)
601 1372 : return ShiftExtend.Type;
602 2039 : if (Kind == k_Register)
603 2039 : return Reg.ShiftExtend.Type;
604 0 : llvm_unreachable("Invalid access!");
605 : }
606 :
607 : unsigned getShiftExtendAmount() const {
608 5764 : if (Kind == k_ShiftExtend)
609 1328 : return ShiftExtend.Amount;
610 4375 : if (Kind == k_Register)
611 4375 : return Reg.ShiftExtend.Amount;
612 0 : llvm_unreachable("Invalid access!");
613 : }
614 :
615 : bool hasShiftExtendAmount() const {
616 1443 : if (Kind == k_ShiftExtend)
617 1181 : return ShiftExtend.HasExplicitAmount;
618 1443 : if (Kind == k_Register)
619 524 : return Reg.ShiftExtend.HasExplicitAmount;
620 0 : llvm_unreachable("Invalid access!");
621 : }
622 :
623 11090 : bool isImm() const override { return Kind == k_Immediate; }
624 0 : bool isMem() const override { return false; }
625 :
626 : bool isUImm6() const {
627 13 : if (!isImm())
628 : return false;
629 13 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 : if (!MCE)
631 : return false;
632 13 : int64_t Val = MCE->getValue();
633 13 : return (Val >= 0 && Val < 64);
634 : }
635 :
636 : template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
637 :
638 : template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
639 2477 : return isImmScaled<Bits, Scale>(true);
640 : }
641 :
642 : template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
643 660 : return isImmScaled<Bits, Scale>(false);
644 : }
645 :
646 : template <int Bits, int Scale>
647 3137 : DiagnosticPredicate isImmScaled(bool Signed) const {
648 7038 : if (!isImm())
649 874 : return DiagnosticPredicateTy::NoMatch;
650 :
651 5455 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 : if (!MCE)
653 0 : return DiagnosticPredicateTy::NoMatch;
654 :
655 : int64_t MinVal, MaxVal;
656 2263 : if (Signed) {
657 : int64_t Shift = Bits - 1;
658 : MinVal = (int64_t(1) << Shift) * -Scale;
659 : MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
660 : } else {
661 : MinVal = 0;
662 : MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
663 : }
664 :
665 4876 : int64_t Val = MCE->getValue();
666 4876 : if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
667 1311 : return DiagnosticPredicateTy::Match;
668 :
669 952 : return DiagnosticPredicateTy::NearMatch;
670 : }
671 661 :
672 661 : DiagnosticPredicate isSVEPattern() const {
673 234 : if (!isImm())
674 : return DiagnosticPredicateTy::NoMatch;
675 427 : auto *MCE = dyn_cast<MCConstantExpr>(getImm());
676 : if (!MCE)
677 0 : return DiagnosticPredicateTy::NoMatch;
678 : int64_t Val = MCE->getValue();
679 : if (Val >= 0 && Val < 32)
680 427 : return DiagnosticPredicateTy::Match;
681 : return DiagnosticPredicateTy::NearMatch;
682 : }
683 :
684 : bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
685 : AArch64MCExpr::VariantKind ELFRefKind;
686 : MCSymbolRefExpr::VariantKind DarwinRefKind;
687 : int64_t Addend;
688 : if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
689 427 : Addend)) {
690 427 : // If we don't understand the expression, assume the best and
691 303 : // let the fixup and relocation code deal with it.
692 : return true;
693 124 : }
694 :
695 19 : if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
696 19 : ELFRefKind == AArch64MCExpr::VK_LO12 ||
697 2 : ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
698 : ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
699 17 : ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
700 : ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
701 0 : ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
702 : ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
703 : ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
704 17 : ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
705 : ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
706 : // Note that we don't range-check the addend. It's adjusted modulo page
707 : // size when converted, so there is no "out of range" condition when using
708 : // @pageoff.
709 : return true;
710 : } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
711 : DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
712 : // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
713 17 : return Addend == 0;
714 17 : }
715 7 :
716 : return false;
717 10 : }
718 :
719 41 : template <int Scale> bool isUImm12Offset() const {
720 41 : if (!isImm())
721 6 : return false;
722 :
723 35 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
724 : if (!MCE)
725 0 : return isSymbolicUImm12Offset(getImm());
726 :
727 : int64_t Val = MCE->getValue();
728 35 : return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
729 : }
730 :
731 : template <int N, int M>
732 : bool isImmInRange() const {
733 : if (!isImm())
734 : return false;
735 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 : if (!MCE)
737 35 : return false;
738 35 : int64_t Val = MCE->getValue();
739 19 : return (Val >= N && Val <= M);
740 : }
741 16 :
742 : // NOTE: Also used for isLogicalImmNot as anything that can be represented as
743 55 : // a logical immediate can always be represented when inverted.
744 55 : template <typename T>
745 10 : bool isLogicalImm() const {
746 : if (!isImm())
747 45 : return false;
748 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
749 0 : if (!MCE)
750 : return false;
751 :
752 45 : int64_t Val = MCE->getValue();
753 : int64_t SVal = typename std::make_signed<T>::type(Val);
754 : int64_t UVal = typename std::make_unsigned<T>::type(Val);
755 : if (Val != SVal && Val != UVal)
756 : return false;
757 :
758 : return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
759 : }
760 :
761 45 : bool isShiftedImm() const { return Kind == k_ShiftedImm; }
762 45 :
763 29 : /// Returns the immediate value as a pair of (imm, shift) if the immediate is
764 : /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
765 16 : /// immediate that can be shifted by 'Shift'.
766 : template <unsigned Width>
767 82 : Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
768 82 : if (isShiftedImm() && Width == getShiftedImmShift())
769 4 : if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
770 : return std::make_pair(CE->getValue(), Width);
771 78 :
772 : if (isImm())
773 0 : if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
774 : int64_t Val = CE->getValue();
775 : if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
776 78 : return std::make_pair(Val >> Width, Width);
777 : else
778 : return std::make_pair(Val, 0u);
779 : }
780 :
781 : return {};
782 : }
783 :
784 : bool isAddSubImm() const {
785 78 : if (!isShiftedImm() && !isImm())
786 78 : return false;
787 66 :
788 : const MCExpr *Expr;
789 12 :
790 : // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
791 81 : if (isShiftedImm()) {
792 81 : unsigned Shift = ShiftedImm.ShiftAmount;
793 4 : Expr = ShiftedImm.Val;
794 : if (Shift != 0 && Shift != 12)
795 77 : return false;
796 : } else {
797 0 : Expr = getImm();
798 : }
799 :
800 77 : AArch64MCExpr::VariantKind ELFRefKind;
801 : MCSymbolRefExpr::VariantKind DarwinRefKind;
802 : int64_t Addend;
803 : if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
804 : DarwinRefKind, Addend)) {
805 : return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
806 : || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
807 : || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
808 : || ELFRefKind == AArch64MCExpr::VK_LO12
809 77 : || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
810 77 : || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
811 37 : || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
812 : || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
813 40 : || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
814 : || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
815 174 : || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
816 174 : || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
817 12 : || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
818 : }
819 162 :
820 : // If it's a constant, it should be a real immediate in range.
821 0 : if (auto ShiftedVal = getShiftedVal<12>())
822 : return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
823 :
824 162 : // If it's an expression, we hope for the best and let the fixup/relocation
825 : // code deal with it.
826 : return true;
827 : }
828 :
829 : bool isAddSubImmNeg() const {
830 : if (!isShiftedImm() && !isImm())
831 : return false;
832 :
833 162 : // Otherwise it should be a real negative immediate in range.
834 162 : if (auto ShiftedVal = getShiftedVal<12>())
835 62 : return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
836 :
837 100 : return false;
838 : }
839 208 :
840 208 : // Signed value in the range -128 to +127. For element widths of
841 16 : // 16 bits or higher it may also be a signed multiple of 256 in the
842 : // range -32768 to +32512.
843 192 : // For element-width of 8 bits a range of -128 to 255 is accepted,
844 : // since a copy of a byte can be either signed/unsigned.
845 0 : template <typename T>
846 : DiagnosticPredicate isSVECpyImm() const {
847 : if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
848 192 : return DiagnosticPredicateTy::NoMatch;
849 :
850 : bool IsByte =
851 : std::is_same<int8_t, typename std::make_signed<T>::type>::value;
852 : if (auto ShiftedImm = getShiftedVal<8>())
853 : if (!(IsByte && ShiftedImm->second) &&
854 : AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
855 : << ShiftedImm->second))
856 : return DiagnosticPredicateTy::Match;
857 192 :
858 192 : return DiagnosticPredicateTy::NearMatch;
859 72 : }
860 :
861 120 : // Unsigned value in the range 0 to 255. For element widths of
862 : // 16 bits or higher it may also be a signed multiple of 256 in the
863 178 : // range 0 to 65280.
864 178 : template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
865 24 : if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
866 : return DiagnosticPredicateTy::NoMatch;
867 154 :
868 : bool IsByte =
869 0 : std::is_same<int8_t, typename std::make_signed<T>::type>::value;
870 : if (auto ShiftedImm = getShiftedVal<8>())
871 : if (!(IsByte && ShiftedImm->second) &&
872 154 : AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
873 : << ShiftedImm->second))
874 : return DiagnosticPredicateTy::Match;
875 :
876 : return DiagnosticPredicateTy::NearMatch;
877 : }
878 :
879 : template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
880 : if (isLogicalImm<T>() && !isSVECpyImm<T>())
881 154 : return DiagnosticPredicateTy::Match;
882 154 : return DiagnosticPredicateTy::NoMatch;
883 88 : }
884 :
885 66 : bool isCondCode() const { return Kind == k_CondCode; }
886 :
887 244 : bool isSIMDImmType10() const {
888 244 : if (!isImm())
889 44 : return false;
890 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
891 200 : if (!MCE)
892 : return false;
893 0 : return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
894 : }
895 :
896 200 : template<int N>
897 : bool isBranchTarget() const {
898 : if (!isImm())
899 : return false;
900 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
901 : if (!MCE)
902 : return true;
903 : int64_t Val = MCE->getValue();
904 : if (Val & 0x3)
905 200 : return false;
906 200 : assert(N > 0 && "Branch target immediate cannot be 0 bits!");
907 80 : return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
908 : }
909 120 :
910 : bool
911 302 : isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
912 302 : if (!isImm())
913 90 : return false;
914 :
915 212 : AArch64MCExpr::VariantKind ELFRefKind;
916 : MCSymbolRefExpr::VariantKind DarwinRefKind;
917 0 : int64_t Addend;
918 : if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
919 : DarwinRefKind, Addend)) {
920 212 : return false;
921 : }
922 : if (DarwinRefKind != MCSymbolRefExpr::VK_None)
923 : return false;
924 :
925 : for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
926 : if (ELFRefKind == AllowedModifiers[i])
927 : return true;
928 : }
929 212 :
930 212 : return false;
931 140 : }
932 :
933 72 : bool isMovZSymbolG3() const {
934 : return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
935 284 : }
936 284 :
937 124 : bool isMovZSymbolG2() const {
938 : return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
939 160 : AArch64MCExpr::VK_TPREL_G2,
940 : AArch64MCExpr::VK_DTPREL_G2});
941 0 : }
942 :
943 : bool isMovZSymbolG1() const {
944 160 : return isMovWSymbol({
945 : AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
946 : AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
947 : AArch64MCExpr::VK_DTPREL_G1,
948 : });
949 : }
950 :
951 : bool isMovZSymbolG0() const {
952 : return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
953 160 : AArch64MCExpr::VK_TPREL_G0,
954 160 : AArch64MCExpr::VK_DTPREL_G0});
955 96 : }
956 :
957 64 : bool isMovKSymbolG3() const {
958 : return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
959 284 : }
960 284 :
961 124 : bool isMovKSymbolG2() const {
962 : return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
963 160 : }
964 :
965 0 : bool isMovKSymbolG1() const {
966 : return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
967 : AArch64MCExpr::VK_TPREL_G1_NC,
968 160 : AArch64MCExpr::VK_DTPREL_G1_NC});
969 : }
970 :
971 : bool isMovKSymbolG0() const {
972 : return isMovWSymbol(
973 : {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
974 : AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
975 : }
976 :
977 160 : template<int RegWidth, int Shift>
978 160 : bool isMOVZMovAlias() const {
979 96 : if (!isImm()) return false;
980 :
981 64 : const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
982 : if (!CE) return false;
983 284 : uint64_t Value = CE->getValue();
984 284 :
985 124 : return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
986 : }
987 160 :
988 : template<int RegWidth, int Shift>
989 0 : bool isMOVNMovAlias() const {
990 : if (!isImm()) return false;
991 :
992 160 : const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
993 : if (!CE) return false;
994 : uint64_t Value = CE->getValue();
995 :
996 : return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
997 : }
998 :
999 : bool isFPImm() const {
1000 : return Kind == k_FPImm &&
1001 160 : AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1002 160 : }
1003 96 :
1004 : bool isBarrier() const { return Kind == k_Barrier; }
1005 64 : bool isSysReg() const { return Kind == k_SysReg; }
1006 :
1007 164 : bool isMRSSystemRegister() const {
1008 164 : if (!isSysReg()) return false;
1009 48 :
1010 : return SysReg.MRSReg != -1U;
1011 116 : }
1012 :
1013 0 : bool isMSRSystemRegister() const {
1014 : if (!isSysReg()) return false;
1015 : return SysReg.MSRReg != -1U;
1016 116 : }
1017 :
1018 : bool isSystemPStateFieldWithImm0_1() const {
1019 : if (!isSysReg()) return false;
1020 : return (SysReg.PStateField == AArch64PState::PAN ||
1021 : SysReg.PStateField == AArch64PState::DIT ||
1022 : SysReg.PStateField == AArch64PState::UAO ||
1023 : SysReg.PStateField == AArch64PState::SSBS);
1024 : }
1025 116 :
1026 116 : bool isSystemPStateFieldWithImm0_15() const {
1027 76 : if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1028 : return SysReg.PStateField != -1U;
1029 40 : }
1030 :
1031 76 : bool isReg() const override {
1032 76 : return Kind == k_Register;
1033 8 : }
1034 :
1035 68 : bool isScalarReg() const {
1036 : return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1037 0 : }
1038 :
1039 : bool isNeonVectorReg() const {
1040 68 : return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1041 : }
1042 :
1043 : bool isNeonVectorRegLo() const {
1044 : return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1045 : AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1046 : Reg.RegNum);
1047 : }
1048 :
1049 68 : template <unsigned Class> bool isSVEVectorReg() const {
1050 68 : RegKind RK;
1051 44 : switch (Class) {
1052 : case AArch64::ZPRRegClassID:
1053 24 : case AArch64::ZPR_3bRegClassID:
1054 : case AArch64::ZPR_4bRegClassID:
1055 : RK = RegKind::SVEDataVector;
1056 : break;
1057 8316 : case AArch64::PPRRegClassID:
1058 : case AArch64::PPR_3bRegClassID:
1059 7804 : RK = RegKind::SVEPredicateVector;
1060 : break;
1061 : default:
1062 7588 : llvm_unreachable("Unsupport register class");
1063 7588 : }
1064 :
1065 : return (Kind == k_Register && Reg.Kind == RK) &&
1066 : AArch64MCRegisterClasses[Class].contains(getReg());
1067 : }
1068 0 :
1069 : template <unsigned Class> bool isFPRasZPR() const {
1070 : return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1071 : AArch64MCRegisterClasses[Class].contains(getReg());
1072 0 : }
1073 :
1074 : template <int ElementWidth, unsigned Class>
1075 : DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1076 0 : if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1077 : return DiagnosticPredicateTy::NoMatch;
1078 :
1079 0 : if (isSVEVectorReg<Class>() &&
1080 0 : (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1081 0 : return DiagnosticPredicateTy::Match;
1082 0 :
1083 0 : return DiagnosticPredicateTy::NearMatch;
1084 0 : }
1085 0 :
1086 0 : template <int ElementWidth, unsigned Class>
1087 0 : DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1088 0 : if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1089 : return DiagnosticPredicateTy::NoMatch;
1090 :
1091 : if (isSVEVectorReg<Class>() &&
1092 : (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1093 0 : return DiagnosticPredicateTy::Match;
1094 0 :
1095 : return DiagnosticPredicateTy::NearMatch;
1096 : }
1097 0 :
1098 : template <int ElementWidth, unsigned Class,
1099 : AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1100 : bool ShiftWidthAlwaysSame>
1101 : DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1102 : auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1103 1281 : if (!VectorMatch.isMatch())
1104 1281 : return DiagnosticPredicateTy::NoMatch;
1105 :
1106 : // Give a more specific diagnostic when the user has explicitly typed in
1107 1081 : // a shift-amount that does not match what is expected, but for which
1108 : // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1109 579 : bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1110 : if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1111 502 : ShiftExtendTy == AArch64_AM::SXTW) &&
1112 502 : !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1113 : return DiagnosticPredicateTy::NoMatch;
1114 412 :
1115 412 : if (MatchShift && ShiftExtendTy == getShiftExtendType())
1116 : return DiagnosticPredicateTy::Match;
1117 :
1118 372 : return DiagnosticPredicateTy::NearMatch;
1119 : }
1120 196 :
1121 : bool isGPR32as64() const {
1122 176 : return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1123 176 : AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1124 : }
1125 270 :
1126 270 : bool isGPR64as32() const {
1127 : return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1128 : AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1129 217 : }
1130 :
1131 109 : bool isWSeqPair() const {
1132 : return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1133 108 : AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1134 108 : Reg.RegNum);
1135 : }
1136 251 :
1137 251 : bool isXSeqPair() const {
1138 : return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1139 : AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1140 209 : Reg.RegNum);
1141 : }
1142 108 :
1143 : template<int64_t Angle, int64_t Remainder>
1144 101 : DiagnosticPredicate isComplexRotation() const {
1145 101 : if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1146 :
1147 243 : const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1148 243 : if (!CE) return DiagnosticPredicateTy::NoMatch;
1149 : uint64_t Value = CE->getValue();
1150 :
1151 205 : if (Value % Angle == Remainder && Value <= 270)
1152 : return DiagnosticPredicateTy::Match;
1153 116 : return DiagnosticPredicateTy::NearMatch;
1154 : }
1155 89 :
1156 89 : template <unsigned RegClassID> bool isGPR64() const {
1157 : return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1158 105 : AArch64MCRegisterClasses[RegClassID].contains(getReg());
1159 105 : }
1160 :
1161 : template <unsigned RegClassID, int ExtWidth>
1162 78 : DiagnosticPredicate isGPR64WithShiftExtend() const {
1163 : if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1164 50 : return DiagnosticPredicateTy::NoMatch;
1165 :
1166 28 : if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1167 28 : getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1168 : return DiagnosticPredicateTy::Match;
1169 : return DiagnosticPredicateTy::NearMatch;
1170 : }
1171 :
1172 5422 : /// Is this a vector list with the type implicit (presumably attached to the
1173 : /// instruction itself)?
1174 4971 : template <RegKind VectorKind, unsigned NumRegs>
1175 : bool isImplicitlyTypedVectorList() const {
1176 : return Kind == k_VectorList && VectorList.Count == NumRegs &&
1177 4681 : VectorList.NumElements == 0 &&
1178 4681 : VectorList.RegisterKind == VectorKind;
1179 : }
1180 :
1181 : template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1182 : unsigned ElementWidth>
1183 : bool isTypedVectorList() const {
1184 1234 : if (Kind != k_VectorList)
1185 1234 : return false;
1186 : if (VectorList.Count != NumRegs)
1187 653 : return false;
1188 : if (VectorList.RegisterKind != VectorKind)
1189 : return false;
1190 : if (VectorList.ElementWidth != ElementWidth)
1191 651 : return false;
1192 410 : return VectorList.NumElements == NumElements;
1193 : }
1194 410 :
1195 : template <int Min, int Max>
1196 : DiagnosticPredicate isVectorIndex() const {
1197 593 : if (Kind != k_VectorIndex)
1198 : return DiagnosticPredicateTy::NoMatch;
1199 238 : if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1200 238 : return DiagnosticPredicateTy::Match;
1201 : return DiagnosticPredicateTy::NearMatch;
1202 120 : }
1203 :
1204 : bool isToken() const override { return Kind == k_Token; }
1205 :
1206 120 : bool isTokenEqual(StringRef Str) const {
1207 120 : return Kind == k_Token && getToken() == Str;
1208 : }
1209 120 : bool isSysCR() const { return Kind == k_SysCR; }
1210 : bool isPrefetch() const { return Kind == k_Prefetch; }
1211 : bool isPSBHint() const { return Kind == k_PSBHint; }
1212 100 : bool isBTIHint() const { return Kind == k_BTIHint; }
1213 : bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1214 93 : bool isShifter() const {
1215 93 : if (!isShiftExtend())
1216 : return false;
1217 93 :
1218 : AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1219 : return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1220 : ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1221 93 : ST == AArch64_AM::MSL);
1222 93 : }
1223 :
1224 93 : template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1225 : if (Kind != k_FPImm)
1226 : return DiagnosticPredicateTy::NoMatch;
1227 79 :
1228 : if (getFPImmIsExact()) {
1229 518 : // Lookup the immediate from table of supported immediates.
1230 518 : auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1231 : assert(Desc && "Unknown enum value");
1232 243 :
1233 : // Calculate its FP value.
1234 : APFloat RealVal(APFloat::IEEEdouble());
1235 : if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1236 241 : APFloat::opOK)
1237 : llvm_unreachable("FP immediate is not exact");
1238 :
1239 : if (getFPImm().bitwiseIsEqual(RealVal))
1240 : return DiagnosticPredicateTy::Match;
1241 : }
1242 241 :
1243 : return DiagnosticPredicateTy::NearMatch;
1244 385 : }
1245 385 :
1246 : template <unsigned ImmA, unsigned ImmB>
1247 197 : DiagnosticPredicate isExactFPImm() const {
1248 : DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1249 : if ((Res = isExactFPImm<ImmA>()))
1250 : return DiagnosticPredicateTy::Match;
1251 197 : if ((Res = isExactFPImm<ImmB>()))
1252 197 : return DiagnosticPredicateTy::Match;
1253 : return Res;
1254 197 : }
1255 :
1256 : bool isExtend() const {
1257 173 : if (!isShiftExtend())
1258 : return false;
1259 :
1260 0 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1261 : return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1262 : ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1263 : ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1264 : ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1265 : ET == AArch64_AM::LSL) &&
1266 3581 : getShiftExtendAmount() <= 4;
1267 3581 : }
1268 966 :
1269 924 : bool isExtend64() const {
1270 : if (!isExtend())
1271 2657 : return false;
1272 2611 : // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1273 1967 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1274 1967 : return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1275 : }
1276 :
1277 : bool isExtendLSL64() const {
1278 : if (!isExtend())
1279 : return false;
1280 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1281 : return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1282 2140 : ET == AArch64_AM::LSL) &&
1283 2140 : getShiftExtendAmount() <= 4;
1284 750 : }
1285 750 :
1286 : template<int Width> bool isMemXExtend() const {
1287 1390 : if (!isExtend())
1288 1390 : return false;
1289 1390 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1290 1390 : return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1291 : (getShiftExtendAmount() == Log2_32(Width / 8) ||
1292 : getShiftExtendAmount() == 0);
1293 : }
1294 :
1295 : template<int Width> bool isMemWExtend() const {
1296 : if (!isExtend())
1297 : return false;
1298 1441 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1299 1441 : return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1300 216 : (getShiftExtendAmount() == Log2_32(Width / 8) ||
1301 174 : getShiftExtendAmount() == 0);
1302 : }
1303 1267 :
1304 1221 : template <unsigned width>
1305 577 : bool isArithmeticShifter() const {
1306 577 : if (!isShifter())
1307 : return false;
1308 :
1309 : // An arithmetic shifter is LSL, LSR, or ASR.
1310 : AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1311 : return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1312 : ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1313 : }
1314 :
1315 990 : template <unsigned width>
1316 990 : bool isLogicalShifter() const {
1317 : if (!isShifter())
1318 : return false;
1319 :
1320 : // A logical shifter is LSL, LSR, ASR or ROR.
1321 : AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1322 550 : return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1323 82 : ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1324 82 : getShiftExtendAmount() < width;
1325 82 : }
1326 :
1327 : bool isMovImm32Shifter() const {
1328 468 : if (!isShifter())
1329 : return false;
1330 :
1331 : // A MOVi shifter is LSL of 0, 16, 32, or 48.
1332 : AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1333 : if (ST != AArch64_AM::LSL)
1334 546 : return false;
1335 : uint64_t Val = getShiftExtendAmount();
1336 : return (Val == 0 || Val == 16);
1337 189 : }
1338 176 :
1339 176 : bool isMovImm64Shifter() const {
1340 127 : if (!isShifter())
1341 121 : return false;
1342 102 :
1343 84 : // A MOVi shifter is LSL of 0 or 16.
1344 70 : AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1345 51 : if (ST != AArch64_AM::LSL)
1346 30 : return false;
1347 6 : uint64_t Val = getShiftExtendAmount();
1348 369 : return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1349 : }
1350 :
1351 : bool isLogicalVecShifter() const {
1352 357 : if (!isShifter())
1353 253 : return false;
1354 :
1355 : // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1356 : unsigned Shift = getShiftExtendAmount();
1357 104 : return getShiftExtendType() == AArch64_AM::LSL &&
1358 : (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1359 : }
1360 1012 :
1361 1012 : bool isLogicalVecHalfWordShifter() const {
1362 : if (!isLogicalVecShifter())
1363 : return false;
1364 :
1365 572 : // A logical vector shifter is a left shift by 0 or 8.
1366 275 : unsigned Shift = getShiftExtendAmount();
1367 : return getShiftExtendType() == AArch64_AM::LSL &&
1368 297 : (Shift == 0 || Shift == 8);
1369 : }
1370 :
1371 : bool isMoveVecShifter() const {
1372 : if (!isShiftExtend())
1373 : return false;
1374 :
1375 : // A logical vector shifter is a left shift by 8 or 16.
1376 : unsigned Shift = getShiftExtendAmount();
1377 1515 : return getShiftExtendType() == AArch64_AM::MSL &&
1378 1515 : (Shift == 8 || Shift == 16);
1379 727 : }
1380 :
1381 : // Fallback unscaled operands are for aliases of LDR/STR that fall back
1382 : // to LDUR/STUR when the offset is not legal for the former but is for
1383 788 : // the latter. As such, in addition to checking for being a legal unscaled
1384 576 : // address, also check that it is not a legal scaled address. This avoids
1385 1512 : // ambiguity in the matcher.
1386 756 : template<int Width>
1387 550 : bool isSImm9OffsetFB() const {
1388 : return isSImm<9>() && !isUImm12Offset<Width / 8>();
1389 238 : }
1390 :
1391 257 : bool isAdrpLabel() const {
1392 257 : // Validation was handled during parsing, so we just sanity check that
1393 145 : // something didn't go haywire.
1394 : if (!isImm())
1395 : return false;
1396 :
1397 112 : if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1398 112 : int64_t Val = CE->getValue();
1399 160 : int64_t Min = - (4096 * (1LL << (21 - 1)));
1400 80 : int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1401 70 : return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1402 : }
1403 42 :
1404 : return true;
1405 492 : }
1406 492 :
1407 244 : bool isAdrLabel() const {
1408 : // Validation was handled during parsing, so we just sanity check that
1409 : // something didn't go haywire.
1410 : if (!isImm())
1411 248 : return false;
1412 248 :
1413 496 : if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1414 248 : int64_t Val = CE->getValue();
1415 170 : int64_t Min = - (1LL << (21 - 1));
1416 : int64_t Max = ((1LL << (21 - 1)) - 1);
1417 78 : return Val >= Min && Val <= Max;
1418 : }
1419 387 :
1420 387 : return true;
1421 171 : }
1422 :
1423 : void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1424 : // Add as immediates when possible. Null MCExpr = 0.
1425 216 : if (!Expr)
1426 216 : Inst.addOperand(MCOperand::createImm(0));
1427 432 : else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1428 216 : Inst.addOperand(MCOperand::createImm(CE->getValue()));
1429 148 : else
1430 : Inst.addOperand(MCOperand::createExpr(Expr));
1431 68 : }
1432 :
1433 379 : void addRegOperands(MCInst &Inst, unsigned N) const {
1434 379 : assert(N == 1 && "Invalid number of operands!");
1435 167 : Inst.addOperand(MCOperand::createReg(getReg()));
1436 : }
1437 :
1438 : void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1439 212 : assert(N == 1 && "Invalid number of operands!");
1440 : assert(
1441 424 : AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1442 212 :
1443 162 : const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1444 : uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1445 50 : RI->getEncodingValue(getReg()));
1446 :
1447 : Inst.addOperand(MCOperand::createReg(Reg));
1448 : }
1449 :
1450 : void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1451 994 : assert(N == 1 && "Invalid number of operands!");
1452 994 : assert(
1453 279 : AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1454 :
1455 : const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1456 : uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1457 715 : RI->getEncodingValue(getReg()));
1458 126 :
1459 1262 : Inst.addOperand(MCOperand::createReg(Reg));
1460 589 : }
1461 533 :
1462 : template <int Width>
1463 182 : void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1464 : unsigned Base;
1465 186 : switch (Width) {
1466 186 : case 8: Base = AArch64::B0; break;
1467 60 : case 16: Base = AArch64::H0; break;
1468 : case 32: Base = AArch64::S0; break;
1469 : case 64: Base = AArch64::D0; break;
1470 : case 128: Base = AArch64::Q0; break;
1471 126 : default:
1472 126 : llvm_unreachable("Unsupported width");
1473 84 : }
1474 : Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1475 70 : }
1476 :
1477 56 : void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1478 : assert(N == 1 && "Invalid number of operands!");
1479 295 : assert(
1480 295 : AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1481 70 : Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1482 : }
1483 :
1484 : void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1485 225 : assert(N == 1 && "Invalid number of operands!");
1486 : assert(
1487 450 : AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1488 225 : Inst.addOperand(MCOperand::createReg(getReg()));
1489 183 : }
1490 :
1491 42 : void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1492 : assert(N == 1 && "Invalid number of operands!");
1493 247 : Inst.addOperand(MCOperand::createReg(getReg()));
1494 247 : }
1495 65 :
1496 : enum VecListIndexType {
1497 : VecListIdx_DReg = 0,
1498 : VecListIdx_QReg = 1,
1499 182 : VecListIdx_ZReg = 2,
1500 : };
1501 364 :
1502 182 : template <VecListIndexType RegTy, unsigned NumRegs>
1503 140 : void addVectorListOperands(MCInst &Inst, unsigned N) const {
1504 : assert(N == 1 && "Invalid number of operands!");
1505 42 : static const unsigned FirstRegs[][5] = {
1506 : /* DReg */ { AArch64::Q0,
1507 266 : AArch64::D0, AArch64::D0_D1,
1508 266 : AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1509 84 : /* QReg */ { AArch64::Q0,
1510 : AArch64::Q0, AArch64::Q0_Q1,
1511 : AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1512 : /* ZReg */ { AArch64::Z0,
1513 182 : AArch64::Z0, AArch64::Z0_Z1,
1514 : AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1515 364 : };
1516 182 :
1517 140 : assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1518 : " NumRegs must be <= 4 for ZRegs");
1519 42 :
1520 : unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1521 : Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1522 478 : FirstRegs[(unsigned)RegTy][0]));
1523 478 : }
1524 27 :
1525 451 : void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1526 : assert(N == 1 && "Invalid number of operands!");
1527 192 : Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1528 192 : }
1529 17 :
1530 175 : template <unsigned ImmIs0, unsigned ImmIs1>
1531 : void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1532 146 : assert(N == 1 && "Invalid number of operands!");
1533 146 : assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1534 10 : Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1535 136 : }
1536 :
1537 140 : void addImmOperands(MCInst &Inst, unsigned N) const {
1538 140 : assert(N == 1 && "Invalid number of operands!");
1539 0 : // If this is a pageoff symrefexpr with an addend, adjust the addend
1540 140 : // to be only the page-offset portion. Otherwise, just add the expr
1541 : // as-is.
1542 : addExpr(Inst, getImm());
1543 0 : }
1544 :
1545 : template <int Shift>
1546 8 : void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1547 : assert(N == 2 && "Invalid number of operands!");
1548 8 : if (auto ShiftedVal = getShiftedVal<Shift>()) {
1549 : Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1550 : Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1551 8 : } else if (isShiftedImm()) {
1552 : addExpr(Inst, getShiftedImmVal());
1553 : Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1554 : } else {
1555 : addExpr(Inst, getImm());
1556 1100 : Inst.addOperand(MCOperand::createImm(0));
1557 : }
1558 330 : }
1559 :
1560 : template <int Shift>
1561 51 : void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1562 51 : assert(N == 2 && "Invalid number of operands!");
1563 : if (auto ShiftedVal = getShiftedVal<Shift>()) {
1564 : Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1565 43 : Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1566 : } else
1567 : llvm_unreachable("Not a shifted negative immediate");
1568 : }
1569 649 :
1570 649 : void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1571 : assert(N == 1 && "Invalid number of operands!");
1572 : Inst.addOperand(MCOperand::createImm(getCondCode()));
1573 : }
1574 :
1575 : void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1576 649 : assert(N == 1 && "Invalid number of operands!");
1577 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1578 : if (!MCE)
1579 : addExpr(Inst, getImm());
1580 535 : else
1581 : Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1582 : }
1583 2124 :
1584 3582 : void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1585 : addImmOperands(Inst, N);
1586 : }
1587 :
1588 : template<int Scale>
1589 : void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1590 : assert(N == 1 && "Invalid number of operands!");
1591 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1592 82 :
1593 : if (!MCE) {
1594 : Inst.addOperand(MCOperand::createExpr(getImm()));
1595 : return;
1596 75 : }
1597 : Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1598 : }
1599 :
1600 : void addUImm6Operands(MCInst &Inst, unsigned N) const {
1601 : assert(N == 1 && "Invalid number of operands!");
1602 149 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1603 : Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1604 : }
1605 :
1606 : template <int Scale>
1607 : void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1608 : assert(N == 1 && "Invalid number of operands!");
1609 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1610 188 : Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1611 : }
1612 :
1613 : template <typename T>
1614 : void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1615 : assert(N == 1 && "Invalid number of operands!");
1616 28 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1617 : typename std::make_unsigned<T>::type Val = MCE->getValue();
1618 : uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1619 : Inst.addOperand(MCOperand::createImm(encoding));
1620 44 : }
1621 :
1622 : template <typename T>
1623 : void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1624 65 : assert(N == 1 && "Invalid number of operands!");
1625 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1626 : typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1627 : uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1628 : Inst.addOperand(MCOperand::createImm(encoding));
1629 : }
1630 95 :
1631 : void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1632 : assert(N == 1 && "Invalid number of operands!");
1633 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1634 : uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1635 : Inst.addOperand(MCOperand::createImm(encoding));
1636 : }
1637 94 :
1638 : void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1639 42 : // Branch operands don't encode the low bits, so shift them off
1640 : // here. If it's a label, however, just put it on directly as there's
1641 34 : // not enough information now to do anything.
1642 : assert(N == 1 && "Invalid number of operands!");
1643 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1644 : if (!MCE) {
1645 : addExpr(Inst, getImm());
1646 : return;
1647 71 : }
1648 71 : assert(MCE && "Invalid constant immediate operand!");
1649 : Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1650 19 : }
1651 :
1652 11 : void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1653 : // Branch operands don't encode the low bits, so shift them off
1654 : // here. If it's a label, however, just put it on directly as there's
1655 : // not enough information now to do anything.
1656 12 : assert(N == 1 && "Invalid number of operands!");
1657 12 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1658 : if (!MCE) {
1659 3 : addExpr(Inst, getImm());
1660 : return;
1661 1 : }
1662 : assert(MCE && "Invalid constant immediate operand!");
1663 : Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1664 : }
1665 12 :
1666 12 : void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1667 : // Branch operands don't encode the low bits, so shift them off
1668 3 : // here. If it's a label, however, just put it on directly as there's
1669 : // not enough information now to do anything.
1670 1 : assert(N == 1 && "Invalid number of operands!");
1671 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1672 : if (!MCE) {
1673 : addExpr(Inst, getImm());
1674 12 : return;
1675 12 : }
1676 : assert(MCE && "Invalid constant immediate operand!");
1677 3 : Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1678 : }
1679 1 :
1680 : void addFPImmOperands(MCInst &Inst, unsigned N) const {
1681 : assert(N == 1 && "Invalid number of operands!");
1682 : Inst.addOperand(MCOperand::createImm(
1683 13 : AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1684 13 : }
1685 :
1686 4 : void addBarrierOperands(MCInst &Inst, unsigned N) const {
1687 : assert(N == 1 && "Invalid number of operands!");
1688 2 : Inst.addOperand(MCOperand::createImm(getBarrier()));
1689 : }
1690 :
1691 : void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1692 9 : assert(N == 1 && "Invalid number of operands!");
1693 9 :
1694 : Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1695 1 : }
1696 :
1697 1 : void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1698 : assert(N == 1 && "Invalid number of operands!");
1699 :
1700 : Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1701 13 : }
1702 13 :
1703 : void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1704 5 : assert(N == 1 && "Invalid number of operands!");
1705 :
1706 5 : Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1707 : }
1708 :
1709 : void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1710 : assert(N == 1 && "Invalid number of operands!");
1711 5422 :
1712 9483 : Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1713 13544 : }
1714 :
1715 : void addSysCROperands(MCInst &Inst, unsigned N) const {
1716 0 : assert(N == 1 && "Invalid number of operands!");
1717 0 : Inst.addOperand(MCOperand::createImm(getSysCR()));
1718 : }
1719 :
1720 1468 : void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1721 : assert(N == 1 && "Invalid number of operands!");
1722 1468 : Inst.addOperand(MCOperand::createImm(getPrefetch()));
1723 : }
1724 :
1725 : void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1726 1461 : assert(N == 1 && "Invalid number of operands!");
1727 1461 : Inst.addOperand(MCOperand::createImm(getPSBHint()));
1728 : }
1729 :
1730 : void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1731 593 : assert(N == 1 && "Invalid number of operands!");
1732 1192 : Inst.addOperand(MCOperand::createImm(getBTIHint()));
1733 1166 : }
1734 2350 :
1735 : void addShifterOperands(MCInst &Inst, unsigned N) const {
1736 : assert(N == 1 && "Invalid number of operands!");
1737 : unsigned Imm =
1738 : AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1739 599 : Inst.addOperand(MCOperand::createImm(Imm));
1740 576 : }
1741 :
1742 : void addExtendOperands(MCInst &Inst, unsigned N) const {
1743 4476 : assert(N == 1 && "Invalid number of operands!");
1744 572791 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1745 : if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1746 : unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1747 0 : Inst.addOperand(MCOperand::createImm(Imm));
1748 268 : }
1749 :
1750 : void addExtend64Operands(MCInst &Inst, unsigned N) const {
1751 0 : assert(N == 1 && "Invalid number of operands!");
1752 48562 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1753 : if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1754 : unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1755 237 : Inst.addOperand(MCOperand::createImm(Imm));
1756 466 : }
1757 231 :
1758 231 : void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1759 : assert(N == 2 && "Invalid number of operands!");
1760 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1761 220537 : bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1762 : Inst.addOperand(MCOperand::createImm(IsSigned));
1763 : Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1764 : }
1765 :
1766 : // For 8-bit load/store instructions with a register offset, both the
1767 : // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1768 : // they're disambiguated by whether the shift was explicit or implicit rather
1769 : // than its size.
1770 : void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1771 : assert(N == 2 && "Invalid number of operands!");
1772 : AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1773 : bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1774 : Inst.addOperand(MCOperand::createImm(IsSigned));
1775 : Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1776 : }
1777 440854 :
1778 220537 : template<int Shift>
1779 : void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1780 116 : assert(N == 1 && "Invalid number of operands!");
1781 :
1782 : const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1783 : uint64_t Value = CE->getValue();
1784 : Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1785 : }
1786 :
1787 : template<int Shift>
1788 : void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1789 : assert(N == 1 && "Invalid number of operands!");
1790 :
1791 : const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1792 : uint64_t Value = CE->getValue();
1793 : Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1794 : }
1795 :
1796 222 : void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1797 116 : assert(N == 1 && "Invalid number of operands!");
1798 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1799 122 : Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1800 : }
1801 :
1802 : void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1803 : assert(N == 1 && "Invalid number of operands!");
1804 : const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1805 : Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1806 : }
1807 :
1808 : void print(raw_ostream &OS) const override;
1809 :
1810 : static std::unique_ptr<AArch64Operand>
1811 : CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1812 : auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1813 : Op->Tok.Data = Str.data();
1814 : Op->Tok.Length = Str.size();
1815 236 : Op->Tok.IsSuffix = IsSuffix;
1816 122 : Op->StartLoc = S;
1817 : Op->EndLoc = S;
1818 153809 : return Op;
1819 : }
1820 :
1821 : static std::unique_ptr<AArch64Operand>
1822 : CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1823 : RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1824 : AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1825 : unsigned ShiftAmount = 0,
1826 : unsigned HasExplicitAmount = false) {
1827 : auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1828 : Op->Reg.RegNum = RegNum;
1829 : Op->Reg.Kind = Kind;
1830 : Op->Reg.ElementWidth = 0;
1831 : Op->Reg.EqualityTy = EqTy;
1832 : Op->Reg.ShiftExtend.Type = ExtTy;
1833 : Op->Reg.ShiftExtend.Amount = ShiftAmount;
1834 307618 : Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1835 153809 : Op->StartLoc = S;
1836 : Op->EndLoc = E;
1837 43218 : return Op;
1838 : }
1839 :
1840 : static std::unique_ptr<AArch64Operand>
1841 : CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1842 : SMLoc S, SMLoc E, MCContext &Ctx,
1843 : AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1844 : unsigned ShiftAmount = 0,
1845 : unsigned HasExplicitAmount = false) {
1846 : assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1847 : Kind == RegKind::SVEPredicateVector) &&
1848 : "Invalid vector kind");
1849 : auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1850 : HasExplicitAmount);
1851 : Op->Reg.ElementWidth = ElementWidth;
1852 : return Op;
1853 86234 : }
1854 43218 :
1855 : static std::unique_ptr<AArch64Operand>
1856 23272 : CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1857 : unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1858 : MCContext &Ctx) {
1859 : auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1860 : Op->VectorList.RegNum = RegNum;
1861 : Op->VectorList.Count = Count;
1862 : Op->VectorList.NumElements = NumElements;
1863 : Op->VectorList.ElementWidth = ElementWidth;
1864 : Op->VectorList.RegisterKind = RegisterKind;
1865 : Op->StartLoc = S;
1866 : Op->EndLoc = E;
1867 : return Op;
1868 : }
1869 :
1870 : static std::unique_ptr<AArch64Operand>
1871 : CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1872 46544 : auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1873 23272 : Op->VectorIndex.Val = Idx;
1874 : Op->StartLoc = S;
1875 : Op->EndLoc = E;
1876 586 : return Op;
1877 627 : }
1878 51 :
1879 : static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1880 19 : SMLoc E, MCContext &Ctx) {
1881 24 : auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1882 5 : Op->Imm.Val = Val;
1883 : Op->StartLoc = S;
1884 181 : Op->EndLoc = E;
1885 192 : return Op;
1886 17 : }
1887 :
1888 140 : static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1889 151 : unsigned ShiftAmount,
1890 11 : SMLoc S, SMLoc E,
1891 : MCContext &Ctx) {
1892 140 : auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1893 147 : Op->ShiftedImm .Val = Val;
1894 9 : Op->ShiftedImm.ShiftAmount = ShiftAmount;
1895 : Op->StartLoc = S;
1896 106 : Op->EndLoc = E;
1897 113 : return Op;
1898 9 : }
1899 :
1900 : static std::unique_ptr<AArch64Operand>
1901 : CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1902 76490 : auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1903 76490 : Op->CondCode.Code = Code;
1904 10000 : Op->StartLoc = S;
1905 : Op->EndLoc = E;
1906 66490 : return Op;
1907 17964 : }
1908 53030 :
1909 : static std::unique_ptr<AArch64Operand>
1910 13460 : CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1911 : auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1912 7023 : Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1913 7023 : Op->FPImm.IsExact = IsExact;
1914 1715 : Op->StartLoc = S;
1915 : Op->EndLoc = S;
1916 5308 : return Op;
1917 : }
1918 5308 :
1919 : static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1920 0 : StringRef Str,
1921 : SMLoc S,
1922 7528 : MCContext &Ctx) {
1923 7528 : auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1924 3022 : Op->Barrier.Val = Val;
1925 : Op->Barrier.Data = Str.data();
1926 4506 : Op->Barrier.Length = Str.size();
1927 4506 : Op->StartLoc = S;
1928 2308 : Op->EndLoc = S;
1929 : return Op;
1930 2198 : }
1931 :
1932 4548 : static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1933 4548 : uint32_t MRSReg,
1934 1251 : uint32_t MSRReg,
1935 : uint32_t PStateField,
1936 3297 : MCContext &Ctx) {
1937 3297 : auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1938 824 : Op->SysReg.Data = Str.data();
1939 : Op->SysReg.Length = Str.size();
1940 2473 : Op->SysReg.MRSReg = MRSReg;
1941 : Op->SysReg.MSRReg = MSRReg;
1942 44704 : Op->SysReg.PStateField = PStateField;
1943 44704 : Op->StartLoc = S;
1944 1486 : Op->EndLoc = S;
1945 : return Op;
1946 43218 : }
1947 :
1948 41654 : static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1949 : SMLoc E, MCContext &Ctx) {
1950 1564 : auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1951 : Op->SysCRImm.Val = Val;
1952 0 : Op->StartLoc = S;
1953 0 : Op->EndLoc = E;
1954 0 : return Op;
1955 : }
1956 0 :
1957 0 : static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1958 0 : StringRef Str,
1959 : SMLoc S,
1960 0 : MCContext &Ctx) {
1961 : auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1962 0 : Op->Prefetch.Val = Val;
1963 0 : Op->Barrier.Data = Str.data();
1964 0 : Op->Barrier.Length = Str.size();
1965 : Op->StartLoc = S;
1966 0 : Op->EndLoc = S;
1967 0 : return Op;
1968 0 : }
1969 :
1970 0 : static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1971 : StringRef Str,
1972 0 : SMLoc S,
1973 0 : MCContext &Ctx) {
1974 0 : auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1975 : Op->PSBHint.Val = Val;
1976 0 : Op->PSBHint.Data = Str.data();
1977 0 : Op->PSBHint.Length = Str.size();
1978 0 : Op->StartLoc = S;
1979 : Op->EndLoc = S;
1980 0 : return Op;
1981 : }
1982 0 :
1983 0 : static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1984 0 : StringRef Str,
1985 : SMLoc S,
1986 0 : MCContext &Ctx) {
1987 0 : auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1988 0 : Op->BTIHint.Val = Val << 1 | 32;
1989 : Op->BTIHint.Data = Str.data();
1990 0 : Op->BTIHint.Length = Str.size();
1991 : Op->StartLoc = S;
1992 6085 : Op->EndLoc = S;
1993 6085 : return Op;
1994 1275 : }
1995 :
1996 4810 : static std::unique_ptr<AArch64Operand>
1997 4810 : CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1998 1908 : bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1999 : auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2000 2902 : Op->ShiftExtend.Type = ShOp;
2001 : Op->ShiftExtend.Amount = Val;
2002 6602 : Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2003 6602 : Op->StartLoc = S;
2004 1251 : Op->EndLoc = E;
2005 : return Op;
2006 5351 : }
2007 5351 : };
2008 1028 :
2009 : } // end anonymous namespace.
2010 4323 :
2011 : void AArch64Operand::print(raw_ostream &OS) const {
2012 : switch (Kind) {
2013 : case k_FPImm:
2014 232296 : OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2015 232296 : if (!getFPImmIsExact())
2016 78249 : OS << " (inexact)";
2017 : OS << ">";
2018 154047 : break;
2019 150786 : case k_Barrier: {
2020 66190 : StringRef Name = getBarrierName();
2021 : if (!Name.empty())
2022 87857 : OS << "<barrier " << Name << ">";
2023 : else
2024 5133 : OS << "<barrier invalid #" << getBarrier() << ">";
2025 5133 : break;
2026 1918 : }
2027 : case k_Immediate:
2028 3215 : OS << *getImm();
2029 : break;
2030 3215 : case k_ShiftedImm: {
2031 : unsigned Shift = getShiftedImmShift();
2032 0 : OS << "<shiftedimm ";
2033 : OS << *getShiftedImmVal();
2034 22071 : OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2035 22071 : break;
2036 5459 : }
2037 : case k_CondCode:
2038 16612 : OS << "<condcode " << getCondCode() << ">";
2039 16612 : break;
2040 6144 : case k_VectorList: {
2041 : OS << "<vectorlist ";
2042 10468 : unsigned Reg = getVectorListStart();
2043 : for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2044 84190 : OS << Reg + i << " ";
2045 84190 : OS << ">";
2046 32948 : break;
2047 : }
2048 51242 : case k_VectorIndex:
2049 51242 : OS << "<vectorindex " << getVectorIndex() << ">";
2050 32693 : break;
2051 : case k_SysReg:
2052 18549 : OS << "<sysreg: " << getSysReg() << '>';
2053 : break;
2054 54 : case k_Token:
2055 54 : OS << "'" << getToken() << "'";
2056 0 : break;
2057 : case k_SysCR:
2058 54 : OS << "c" << getSysCR();
2059 46 : break;
2060 46 : case k_Prefetch: {
2061 : StringRef Name = getPrefetchName();
2062 8 : if (!Name.empty())
2063 : OS << "<prfop " << Name << ">";
2064 20 : else
2065 20 : OS << "<prfop invalid #" << getPrefetch() << ">";
2066 0 : break;
2067 : }
2068 20 : case k_PSBHint:
2069 20 : OS << getPSBHintName();
2070 20 : break;
2071 : case k_Register:
2072 0 : OS << "<register " << getReg() << ">";
2073 : if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2074 42 : break;
2075 42 : LLVM_FALLTHROUGH;
2076 0 : case k_BTIHint:
2077 : OS << getBTIHintName();
2078 42 : break;
2079 30 : case k_ShiftExtend:
2080 30 : OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2081 : << getShiftExtendAmount();
2082 12 : if (!hasShiftExtendAmount())
2083 : OS << "<imp>";
2084 26 : OS << '>';
2085 26 : break;
2086 0 : }
2087 : }
2088 26 :
2089 18 : /// @name Auto-generated Match Functions
2090 18 : /// {
2091 :
2092 8 : static unsigned MatchRegisterName(StringRef Name);
2093 :
2094 40 : /// }
2095 40 :
2096 0 : static unsigned MatchNeonVectorRegName(StringRef Name) {
2097 : return StringSwitch<unsigned>(Name.lower())
2098 40 : .Case("v0", AArch64::Q0)
2099 32 : .Case("v1", AArch64::Q1)
2100 32 : .Case("v2", AArch64::Q2)
2101 : .Case("v3", AArch64::Q3)
2102 8 : .Case("v4", AArch64::Q4)
2103 : .Case("v5", AArch64::Q5)
2104 56 : .Case("v6", AArch64::Q6)
2105 56 : .Case("v7", AArch64::Q7)
2106 0 : .Case("v8", AArch64::Q8)
2107 : .Case("v9", AArch64::Q9)
2108 56 : .Case("v10", AArch64::Q10)
2109 46 : .Case("v11", AArch64::Q11)
2110 46 : .Case("v12", AArch64::Q12)
2111 : .Case("v13", AArch64::Q13)
2112 10 : .Case("v14", AArch64::Q14)
2113 : .Case("v15", AArch64::Q15)
2114 68332 : .Case("v16", AArch64::Q16)
2115 68332 : .Case("v17", AArch64::Q17)
2116 24826 : .Case("v18", AArch64::Q18)
2117 : .Case("v19", AArch64::Q19)
2118 43506 : .Case("v20", AArch64::Q20)
2119 43506 : .Case("v21", AArch64::Q21)
2120 13789 : .Case("v22", AArch64::Q22)
2121 : .Case("v23", AArch64::Q23)
2122 29717 : .Case("v24", AArch64::Q24)
2123 : .Case("v25", AArch64::Q25)
2124 50464 : .Case("v26", AArch64::Q26)
2125 50464 : .Case("v27", AArch64::Q27)
2126 12725 : .Case("v28", AArch64::Q28)
2127 : .Case("v29", AArch64::Q29)
2128 37739 : .Case("v30", AArch64::Q30)
2129 37739 : .Case("v31", AArch64::Q31)
2130 10078 : .Default(0);
2131 : }
2132 27661 :
2133 : /// Returns an optional pair of (#elements, element-width) if Suffix
2134 1868 : /// is a valid vector kind. Where the number of elements in a vector
2135 1868 : /// or the vector width is implicit or explicitly unknown (but still a
2136 373 : /// valid suffix kind), 0 is used.
2137 : static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2138 1495 : RegKind VectorKind) {
2139 1495 : std::pair<int, int> Res = {-1, -1};
2140 79 :
2141 : switch (VectorKind) {
2142 1416 : case RegKind::NeonVector:
2143 : Res =
2144 : StringSwitch<std::pair<int, int>>(Suffix.lower())
2145 : .Case("", {0, 0})
2146 : .Case(".1d", {1, 64})
2147 : .Case(".1q", {1, 128})
2148 9151 : // '.2h' needed for fp16 scalar pairwise reductions
2149 9151 : .Case(".2h", {2, 16})
2150 9151 : .Case(".2s", {2, 32})
2151 4776 : .Case(".2d", {2, 64})
2152 : // '.4b' is another special case for the ARMv8.2a dot product
2153 : // operand
2154 : .Case(".4b", {4, 8})
2155 : .Case(".4h", {4, 16})
2156 : .Case(".4s", {4, 32})
2157 : .Case(".8b", {8, 8})
2158 : .Case(".8h", {8, 16})
2159 3082 : .Case(".16b", {16, 8})
2160 524 : // Accept the width neutral ones, too, for verbose syntax. If those
2161 : // aren't used in the right places, the token operand won't match so
2162 5890 : // all will work out.
2163 887 : .Case(".b", {0, 8})
2164 : .Case(".h", {0, 16})
2165 2964 : .Case(".s", {0, 32})
2166 : .Case(".d", {0, 64})
2167 256 : .Default({-1, -1});
2168 256 : break;
2169 256 : case RegKind::SVEPredicateVector:
2170 173 : case RegKind::SVEDataVector:
2171 : Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2172 : .Case("", {0, 0})
2173 : .Case(".b", {0, 8})
2174 : .Case(".h", {0, 16})
2175 : .Case(".s", {0, 32})
2176 : .Case(".d", {0, 64})
2177 : .Case(".q", {0, 128})
2178 : .Default({-1, -1});
2179 : break;
2180 : default:
2181 154 : llvm_unreachable("Unsupported RegKind");
2182 35 : }
2183 :
2184 48 : if (Res == std::make_pair(-1, -1))
2185 : return Optional<std::pair<int, int>>();
2186 33 :
2187 33 : return Optional<std::pair<int, int>>(Res);
2188 33 : }
2189 2 :
2190 : static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2191 : return parseVectorKind(Suffix, VectorKind).hasValue();
2192 : }
2193 :
2194 : static unsigned matchSVEDataVectorRegName(StringRef Name) {
2195 : return StringSwitch<unsigned>(Name.lower())
2196 : .Case("z0", AArch64::Z0)
2197 : .Case("z1", AArch64::Z1)
2198 : .Case("z2", AArch64::Z2)
2199 : .Case("z3", AArch64::Z3)
2200 36 : .Case("z4", AArch64::Z4)
2201 5 : .Case("z5", AArch64::Z5)
2202 : .Case("z6", AArch64::Z6)
2203 26 : .Case("z7", AArch64::Z7)
2204 : .Case("z8", AArch64::Z8)
2205 30 : .Case("z9", AArch64::Z9)
2206 30 : .Case("z10", AArch64::Z10)
2207 30 : .Case("z11", AArch64::Z11)
2208 2 : .Case("z12", AArch64::Z12)
2209 : .Case("z13", AArch64::Z13)
2210 : .Case("z14", AArch64::Z14)
2211 : .Case("z15", AArch64::Z15)
2212 : .Case("z16", AArch64::Z16)
2213 : .Case("z17", AArch64::Z17)
2214 : .Case("z18", AArch64::Z18)
2215 : .Case("z19", AArch64::Z19)
2216 : .Case("z20", AArch64::Z20)
2217 : .Case("z21", AArch64::Z21)
2218 : .Case("z22", AArch64::Z22)
2219 33 : .Case("z23", AArch64::Z23)
2220 5 : .Case("z24", AArch64::Z24)
2221 : .Case("z25", AArch64::Z25)
2222 23 : .Case("z26", AArch64::Z26)
2223 : .Case("z27", AArch64::Z27)
2224 27 : .Case("z28", AArch64::Z28)
2225 27 : .Case("z29", AArch64::Z29)
2226 27 : .Case("z30", AArch64::Z30)
2227 2 : .Case("z31", AArch64::Z31)
2228 : .Default(0);
2229 : }
2230 :
2231 : static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2232 : return StringSwitch<unsigned>(Name.lower())
2233 : .Case("p0", AArch64::P0)
2234 : .Case("p1", AArch64::P1)
2235 : .Case("p2", AArch64::P2)
2236 : .Case("p3", AArch64::P3)
2237 : .Case("p4", AArch64::P4)
2238 30 : .Case("p5", AArch64::P5)
2239 5 : .Case("p6", AArch64::P6)
2240 : .Case("p7", AArch64::P7)
2241 20 : .Case("p8", AArch64::P8)
2242 : .Case("p9", AArch64::P9)
2243 24 : .Case("p10", AArch64::P10)
2244 24 : .Case("p11", AArch64::P11)
2245 24 : .Case("p12", AArch64::P12)
2246 2 : .Case("p13", AArch64::P13)
2247 : .Case("p14", AArch64::P14)
2248 : .Case("p15", AArch64::P15)
2249 : .Default(0);
2250 : }
2251 :
2252 : bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2253 : SMLoc &EndLoc) {
2254 : StartLoc = getLoc();
2255 : auto Res = tryParseScalarRegister(RegNo);
2256 : EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2257 36 : return Res != MatchOperand_Success;
2258 10 : }
2259 :
2260 12 : // Matches a register name or register alias previously defined by '.req'
2261 : unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2262 339 : RegKind Kind) {
2263 339 : unsigned RegNum = 0;
2264 339 : if ((RegNum = matchSVEDataVectorRegName(Name)))
2265 191 : return Kind == RegKind::SVEDataVector ? RegNum : 0;
2266 :
2267 : if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2268 : return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2269 :
2270 : if ((RegNum = MatchNeonVectorRegName(Name)))
2271 : return Kind == RegKind::NeonVector ? RegNum : 0;
2272 :
2273 148 : // The parsed register must be of RegKind Scalar
2274 : if ((RegNum = MatchRegisterName(Name)))
2275 : return Kind == RegKind::Scalar ? RegNum : 0;
2276 220 :
2277 30 : if (!RegNum) {
2278 : // Handle a few common aliases of registers.
2279 118 : if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2280 : .Case("fp", AArch64::FP)
2281 273 : .Case("lr", AArch64::LR)
2282 273 : .Case("x31", AArch64::XZR)
2283 273 : .Case("w31", AArch64::WZR)
2284 177 : .Default(0))
2285 : return Kind == RegKind::Scalar ? RegNum : 0;
2286 :
2287 : // Check for aliases registered via .req. Canonicalize to lower case.
2288 : // That's more consistent since register names are case insensitive, and
2289 : // it's how the original entry was passed in from MC/MCParser/AsmParser.
2290 : auto Entry = RegisterReqs.find(Name.lower());
2291 : if (Entry == RegisterReqs.end())
2292 96 : return 0;
2293 :
2294 : // set RegNum if the match is the right kind of register
2295 144 : if (Kind == Entry->getValue().first)
2296 20 : RegNum = Entry->getValue().second;
2297 : }
2298 76 : return RegNum;
2299 : }
2300 109 :
2301 109 : /// tryParseScalarRegister - Try to parse a register name. The token must be an
2302 109 : /// Identifier when called, and if it is a register name the token is eaten and
2303 97 : /// the register is added to the operand list.
2304 : OperandMatchResultTy
2305 : AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2306 : MCAsmParser &Parser = getParser();
2307 : const AsmToken &Tok = Parser.getTok();
2308 : if (Tok.isNot(AsmToken::Identifier))
2309 : return MatchOperand_NoMatch;
2310 :
2311 12 : std::string lowerCase = Tok.getString().lower();
2312 : unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2313 : if (Reg == 0)
2314 22 : return MatchOperand_NoMatch;
2315 5 :
2316 : RegNum = Reg;
2317 7 : Parser.Lex(); // Eat identifier token.
2318 : return MatchOperand_Success;
2319 352 : }
2320 352 :
2321 352 : /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2322 168 : OperandMatchResultTy
2323 : AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2324 : MCAsmParser &Parser = getParser();
2325 : SMLoc S = getLoc();
2326 :
2327 : if (Parser.getTok().isNot(AsmToken::Identifier)) {
2328 : Error(S, "Expected cN operand where 0 <= N <= 15");
2329 : return MatchOperand_ParseFail;
2330 272 : }
2331 88 :
2332 : StringRef Tok = Parser.getTok().getIdentifier();
2333 192 : if (Tok[0] != 'c' && Tok[0] != 'C') {
2334 40 : Error(S, "Expected cN operand where 0 <= N <= 15");
2335 : return MatchOperand_ParseFail;
2336 56 : }
2337 :
2338 271 : uint32_t CRNum;
2339 271 : bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2340 271 : if (BadNum || CRNum > 15) {
2341 173 : Error(S, "Expected cN operand where 0 <= N <= 15");
2342 : return MatchOperand_ParseFail;
2343 : }
2344 :
2345 : Parser.Lex(); // Eat identifier token.
2346 : Operands.push_back(
2347 : AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2348 : return MatchOperand_Success;
2349 : }
2350 :
2351 : /// tryParsePrefetch - Try to parse a prefetch operand.
2352 184 : template <bool IsSVEPrefetch>
2353 25 : OperandMatchResultTy
2354 : AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2355 73 : MCAsmParser &Parser = getParser();
2356 : SMLoc S = getLoc();
2357 306 : const AsmToken &Tok = Parser.getTok();
2358 306 :
2359 306 : auto LookupByName = [](StringRef N) {
2360 191 : if (IsSVEPrefetch) {
2361 : if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2362 : return Optional<unsigned>(Res->Encoding);
2363 : } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2364 : return Optional<unsigned>(Res->Encoding);
2365 : return Optional<unsigned>();
2366 : };
2367 :
2368 115 : auto LookupByEncoding = [](unsigned E) {
2369 : if (IsSVEPrefetch) {
2370 : if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2371 169 : return Optional<StringRef>(Res->Name);
2372 30 : } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2373 : return Optional<StringRef>(Res->Name);
2374 85 : return Optional<StringRef>();
2375 : };
2376 252 : unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2377 252 :
2378 252 : // Either an identifier for named values or a 5-bit immediate.
2379 177 : // Eat optional hash.
2380 : if (parseOptionalToken(AsmToken::Hash) ||
2381 : Tok.is(AsmToken::Integer)) {
2382 : const MCExpr *ImmVal;
2383 : if (getParser().parseExpression(ImmVal))
2384 : return MatchOperand_ParseFail;
2385 :
2386 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2387 75 : if (!MCE) {
2388 : TokError("immediate value expected for prefetch operand");
2389 : return MatchOperand_ParseFail;
2390 111 : }
2391 20 : unsigned prfop = MCE->getValue();
2392 : if (prfop > MaxVal) {
2393 55 : TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2394 : "] expected");
2395 106 : return MatchOperand_ParseFail;
2396 106 : }
2397 106 :
2398 97 : auto PRFM = LookupByEncoding(MCE->getValue());
2399 : Operands.push_back(AArch64Operand::CreatePrefetch(
2400 : prfop, PRFM.getValueOr(""), S, getContext()));
2401 : return MatchOperand_Success;
2402 : }
2403 :
2404 : if (Tok.isNot(AsmToken::Identifier)) {
2405 : TokError("prefetch hint expected");
2406 9 : return MatchOperand_ParseFail;
2407 : }
2408 :
2409 16 : auto PRFM = LookupByName(Tok.getString());
2410 5 : if (!PRFM) {
2411 : TokError("prefetch hint expected");
2412 4 : return MatchOperand_ParseFail;
2413 : }
2414 304 :
2415 304 : Parser.Lex(); // Eat identifier token.
2416 304 : Operands.push_back(AArch64Operand::CreatePrefetch(
2417 168 : *PRFM, Tok.getString(), S, getContext()));
2418 : return MatchOperand_Success;
2419 : }
2420 :
2421 : /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2422 : OperandMatchResultTy
2423 : AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2424 : MCAsmParser &Parser = getParser();
2425 200 : SMLoc S = getLoc();
2426 64 : const AsmToken &Tok = Parser.getTok();
2427 : if (Tok.isNot(AsmToken::Identifier)) {
2428 144 : TokError("invalid operand for instruction");
2429 40 : return MatchOperand_ParseFail;
2430 : }
2431 32 :
2432 : auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2433 272 : if (!PSB) {
2434 272 : TokError("invalid operand for instruction");
2435 272 : return MatchOperand_ParseFail;
2436 194 : }
2437 :
2438 : Parser.Lex(); // Eat identifier token.
2439 : Operands.push_back(AArch64Operand::CreatePSBHint(
2440 : PSB->Encoding, Tok.getString(), S, getContext()));
2441 : return MatchOperand_Success;
2442 : }
2443 :
2444 : /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2445 : OperandMatchResultTy
2446 : AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2447 132 : MCAsmParser &Parser = getParser();
2448 30 : SMLoc S = getLoc();
2449 : const AsmToken &Tok = Parser.getTok();
2450 48 : if (Tok.isNot(AsmToken::Identifier)) {
2451 : TokError("invalid operand for instruction");
2452 462 : return MatchOperand_ParseFail;
2453 462 : }
2454 462 :
2455 188 : auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2456 : if (!BTI) {
2457 : TokError("invalid operand for instruction");
2458 : return MatchOperand_ParseFail;
2459 : }
2460 :
2461 : Parser.Lex(); // Eat identifier token.
2462 : Operands.push_back(AArch64Operand::CreateBTIHint(
2463 : BTI->Encoding, Tok.getString(), S, getContext()));
2464 : return MatchOperand_Success;
2465 : }
2466 381 :
2467 37 : /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2468 : /// instruction.
2469 237 : OperandMatchResultTy
2470 : AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2471 489 : MCAsmParser &Parser = getParser();
2472 489 : SMLoc S = getLoc();
2473 489 : const MCExpr *Expr;
2474 208 :
2475 : if (Parser.getTok().is(AsmToken::Hash)) {
2476 : Parser.Lex(); // Eat hash token.
2477 : }
2478 :
2479 : if (parseSymbolicImmVal(Expr))
2480 : return MatchOperand_ParseFail;
2481 :
2482 : AArch64MCExpr::VariantKind ELFRefKind;
2483 : MCSymbolRefExpr::VariantKind DarwinRefKind;
2484 : int64_t Addend;
2485 390 : if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2486 37 : if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2487 : ELFRefKind == AArch64MCExpr::VK_INVALID) {
2488 244 : // No modifier was specified at all; this is the syntax for an ELF basic
2489 : // ADRP relocation (unfortunately).
2490 370 : Expr =
2491 370 : AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2492 370 : } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2493 170 : DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2494 : Addend != 0) {
2495 : Error(S, "gotpage label reference not allowed an addend");
2496 : return MatchOperand_ParseFail;
2497 : } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2498 : DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2499 : DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2500 : ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2501 : ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2502 : ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2503 : // The operand must be an @page or @gotpage qualified symbolref.
2504 279 : Error(S, "page or gotpage label reference expected");
2505 27 : return MatchOperand_ParseFail;
2506 : }
2507 173 : }
2508 :
2509 1087 : // We have either a label reference possibly with addend or an immediate. The
2510 1087 : // addend is a raw value here. The linker will adjust it to only reference the
2511 1087 : // page.
2512 486 : SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2513 : Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2514 :
2515 : return MatchOperand_Success;
2516 : }
2517 :
2518 : /// tryParseAdrLabel - Parse and validate a source label for the ADR
2519 : /// instruction.
2520 : OperandMatchResultTy
2521 : AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2522 : SMLoc S = getLoc();
2523 918 : const MCExpr *Expr;
2524 107 :
2525 : // Leave anything with a bracket to the default for SVE
2526 494 : if (getParser().getTok().is(AsmToken::LBrac))
2527 : return MatchOperand_NoMatch;
2528 412 :
2529 412 : if (getParser().getTok().is(AsmToken::Hash))
2530 412 : getParser().Lex(); // Eat hash token.
2531 188 :
2532 : if (parseSymbolicImmVal(Expr))
2533 : return MatchOperand_ParseFail;
2534 :
2535 : AArch64MCExpr::VariantKind ELFRefKind;
2536 : MCSymbolRefExpr::VariantKind DarwinRefKind;
2537 : int64_t Addend;
2538 : if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2539 224 : if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2540 : ELFRefKind == AArch64MCExpr::VK_INVALID) {
2541 : // No modifier was specified at all; this is the syntax for an ELF basic
2542 308 : // ADR relocation (unfortunately).
2543 35 : Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2544 : } else {
2545 189 : Error(S, "unexpected adr label");
2546 : return MatchOperand_ParseFail;
2547 439 : }
2548 439 : }
2549 439 :
2550 208 : SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2551 : Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2552 : return MatchOperand_Success;
2553 : }
2554 :
2555 : /// tryParseFPImm - A floating point immediate expression operand.
2556 : template<bool AddFPZeroAsLiteral>
2557 : OperandMatchResultTy
2558 231 : AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2559 : MCAsmParser &Parser = getParser();
2560 : SMLoc S = getLoc();
2561 317 :
2562 35 : bool Hash = parseOptionalToken(AsmToken::Hash);
2563 :
2564 196 : // Handle negation, as that still comes through as a separate token.
2565 : bool isNegative = parseOptionalToken(AsmToken::Minus);
2566 332 :
2567 332 : const AsmToken &Tok = Parser.getTok();
2568 332 : if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2569 170 : if (!Hash)
2570 : return MatchOperand_NoMatch;
2571 : TokError("invalid floating point immediate");
2572 : return MatchOperand_ParseFail;
2573 : }
2574 :
2575 : // Parse hexadecimal representation.
2576 : if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2577 162 : if (Tok.getIntVal() > 255 || isNegative) {
2578 : TokError("encoded floating point value out of range");
2579 : return MatchOperand_ParseFail;
2580 224 : }
2581 27 :
2582 : APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2583 135 : Operands.push_back(
2584 : AArch64Operand::CreateFPImm(F, true, S, getContext()));
2585 682 : } else {
2586 682 : // Parse FP representation.
2587 682 : APFloat RealVal(APFloat::IEEEdouble());
2588 292 : auto Status =
2589 : RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2590 : if (isNegative)
2591 : RealVal.changeSign();
2592 :
2593 : if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2594 : Operands.push_back(
2595 : AArch64Operand::CreateToken("#0", false, S, getContext()));
2596 600 : Operands.push_back(
2597 210 : AArch64Operand::CreateToken(".0", false, S, getContext()));
2598 : } else
2599 360 : Operands.push_back(AArch64Operand::CreateFPImm(
2600 75 : RealVal, Status == APFloat::opOK, S, getContext()));
2601 : }
2602 105 :
2603 : Parser.Lex(); // Eat the token.
2604 290 :
2605 290 : return MatchOperand_Success;
2606 290 : }
2607 194 :
2608 : /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2609 : /// a shift suffix, for example '#1, lsl #12'.
2610 : OperandMatchResultTy
2611 : AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2612 : MCAsmParser &Parser = getParser();
2613 : SMLoc S = getLoc();
2614 :
2615 : if (Parser.getTok().is(AsmToken::Hash))
2616 : Parser.Lex(); // Eat '#'
2617 : else if (Parser.getTok().isNot(AsmToken::Integer))
2618 168 : // Operand should start from # or should be integer, emit error otherwise.
2619 30 : return MatchOperand_NoMatch;
2620 :
2621 66 : const MCExpr *Imm;
2622 : if (parseSymbolicImmVal(Imm))
2623 362 : return MatchOperand_ParseFail;
2624 362 : else if (Parser.getTok().isNot(AsmToken::Comma)) {
2625 362 : SMLoc E = Parser.getTok().getLoc();
2626 188 : Operands.push_back(
2627 : AArch64Operand::CreateImm(Imm, S, E, getContext()));
2628 : return MatchOperand_Success;
2629 : }
2630 :
2631 : // Eat ','
2632 : Parser.Lex();
2633 :
2634 174 : // The optional operand must be "lsl #N" where N is non-negative.
2635 : if (!Parser.getTok().is(AsmToken::Identifier) ||
2636 : !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2637 237 : Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2638 35 : return MatchOperand_ParseFail;
2639 : }
2640 139 :
2641 : // Eat 'lsl'
2642 389 : Parser.Lex();
2643 389 :
2644 389 : parseOptionalToken(AsmToken::Hash);
2645 208 :
2646 : if (Parser.getTok().isNot(AsmToken::Integer)) {
2647 : Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2648 : return MatchOperand_ParseFail;
2649 : }
2650 :
2651 : int64_t ShiftAmount = Parser.getTok().getIntVal();
2652 :
2653 181 : if (ShiftAmount < 0) {
2654 : Error(Parser.getTok().getLoc(), "positive shift amount required");
2655 : return MatchOperand_ParseFail;
2656 246 : }
2657 37 : Parser.Lex(); // Eat the number
2658 :
2659 144 : // Just in case the optional lsl #0 is used for immediates other than zero.
2660 : if (ShiftAmount == 0 && Imm != 0) {
2661 294 : SMLoc E = Parser.getTok().getLoc();
2662 294 : Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2663 294 : return MatchOperand_Success;
2664 170 : }
2665 :
2666 : SMLoc E = Parser.getTok().getLoc();
2667 : Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2668 : S, E, getContext()));
2669 : return MatchOperand_Success;
2670 : }
2671 :
2672 124 : /// parseCondCodeString - Parse a Condition Code string.
2673 : AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2674 : AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2675 169 : .Case("eq", AArch64CC::EQ)
2676 25 : .Case("ne", AArch64CC::NE)
2677 : .Case("cs", AArch64CC::HS)
2678 99 : .Case("hs", AArch64CC::HS)
2679 : .Case("cc", AArch64CC::LO)
2680 589 : .Case("lo", AArch64CC::LO)
2681 589 : .Case("mi", AArch64CC::MI)
2682 589 : .Case("pl", AArch64CC::PL)
2683 292 : .Case("vs", AArch64CC::VS)
2684 : .Case("vc", AArch64CC::VC)
2685 : .Case("hi", AArch64CC::HI)
2686 : .Case("ls", AArch64CC::LS)
2687 : .Case("ge", AArch64CC::GE)
2688 : .Case("lt", AArch64CC::LT)
2689 : .Case("gt", AArch64CC::GT)
2690 : .Case("le", AArch64CC::LE)
2691 459 : .Case("al", AArch64CC::AL)
2692 162 : .Case("nv", AArch64CC::NV)
2693 : .Default(AArch64CC::Invalid);
2694 270 :
2695 75 : if (CC == AArch64CC::Invalid &&
2696 : getSTI().getFeatureBits()[AArch64::FeatureSVE])
2697 60 : CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2698 : .Case("none", AArch64CC::EQ)
2699 : .Case("any", AArch64CC::NE)
2700 26 : .Case("nlast", AArch64CC::HS)
2701 52 : .Case("last", AArch64CC::LO)
2702 26 : .Case("first", AArch64CC::MI)
2703 : .Case("nfrst", AArch64CC::PL)
2704 : .Case("pmore", AArch64CC::HI)
2705 3420 : .Case("plast", AArch64CC::LS)
2706 4102 : .Case("tcont", AArch64CC::GE)
2707 732 : .Case("tstop", AArch64CC::LT)
2708 : .Default(AArch64CC::Invalid);
2709 :
2710 77 : return CC;
2711 127 : }
2712 77 :
2713 77 : /// parseCondCode - Parse a Condition Code operand.
2714 : bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2715 : bool invertCondCode) {
2716 54 : MCAsmParser &Parser = getParser();
2717 108 : SMLoc S = getLoc();
2718 54 : const AsmToken &Tok = Parser.getTok();
2719 54 : assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2720 :
2721 : StringRef Cond = Tok.getString();
2722 : AArch64CC::CondCode CC = parseCondCodeString(Cond);
2723 : if (CC == AArch64CC::Invalid)
2724 256 : return TokError("invalid condition code");
2725 : Parser.Lex(); // Eat identifier token.
2726 256 :
2727 : if (invertCondCode) {
2728 256 : if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2729 : return TokError("condition codes AL and NV are invalid for this instruction");
2730 256 : CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2731 : }
2732 :
2733 : Operands.push_back(
2734 : AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2735 27647 : return false;
2736 38772 : }
2737 22614 :
2738 : /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2739 211 : /// them if present.
2740 422 : OperandMatchResultTy
2741 211 : AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2742 : MCAsmParser &Parser = getParser();
2743 879 : const AsmToken &Tok = Parser.getTok();
2744 1758 : std::string LowerID = Tok.getString().lower();
2745 879 : AArch64_AM::ShiftExtendType ShOp =
2746 : StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2747 5206 : .Case("lsl", AArch64_AM::LSL)
2748 7731 : .Case("lsr", AArch64_AM::LSR)
2749 4370 : .Case("asr", AArch64_AM::ASR)
2750 : .Case("ror", AArch64_AM::ROR)
2751 5943 : .Case("msl", AArch64_AM::MSL)
2752 7484 : .Case("uxtb", AArch64_AM::UXTB)
2753 4775 : .Case("uxth", AArch64_AM::UXTH)
2754 : .Case("uxtw", AArch64_AM::UXTW)
2755 6349 : .Case("uxtx", AArch64_AM::UXTX)
2756 10184 : .Case("sxtb", AArch64_AM::SXTB)
2757 5120 : .Case("sxth", AArch64_AM::SXTH)
2758 : .Case("sxtw", AArch64_AM::SXTW)
2759 4639 : .Case("sxtx", AArch64_AM::SXTX)
2760 5970 : .Default(AArch64_AM::InvalidShiftExtend);
2761 3567 :
2762 : if (ShOp == AArch64_AM::InvalidShiftExtend)
2763 4287 : return MatchOperand_NoMatch;
2764 4957 :
2765 3559 : SMLoc S = Tok.getLoc();
2766 : Parser.Lex();
2767 133 :
2768 266 : bool Hash = parseOptionalToken(AsmToken::Hash);
2769 133 :
2770 : if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2771 : if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2772 : ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2773 3735 : ShOp == AArch64_AM::MSL) {
2774 3735 : // We expect a number here.
2775 2645 : TokError("expected #imm after shift specifier");
2776 : return MatchOperand_ParseFail;
2777 1090 : }
2778 :
2779 646 : // "extend" type operations don't need an immediate, #0 is implicit.
2780 444 : SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2781 : Operands.push_back(
2782 177 : AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2783 177 : return MatchOperand_Success;
2784 96 : }
2785 :
2786 81 : // Make sure we do actually have a number, identifier or a parenthesized
2787 : // expression.
2788 65 : SMLoc E = Parser.getTok().getLoc();
2789 16 : if (!Parser.getTok().is(AsmToken::Integer) &&
2790 : !Parser.getTok().is(AsmToken::LParen) &&
2791 56 : !Parser.getTok().is(AsmToken::Identifier)) {
2792 56 : Error(E, "expected integer shift amount");
2793 38 : return MatchOperand_ParseFail;
2794 : }
2795 18 :
2796 : const MCExpr *ImmVal;
2797 10 : if (getParser().parseExpression(ImmVal))
2798 8 : return MatchOperand_ParseFail;
2799 :
2800 158 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2801 158 : if (!MCE) {
2802 112 : Error(E, "expected constant '#imm' after shift specifier");
2803 : return MatchOperand_ParseFail;
2804 46 : }
2805 :
2806 30 : E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2807 16 : Operands.push_back(AArch64Operand::CreateShiftExtend(
2808 : ShOp, MCE->getValue(), true, S, E, getContext()));
2809 204 : return MatchOperand_Success;
2810 204 : }
2811 138 :
2812 : static const struct Extension {
2813 66 : const char *Name;
2814 : const FeatureBitset Features;
2815 50 : } ExtensionMap[] = {
2816 16 : { "crc", {AArch64::FeatureCRC} },
2817 : { "sm4", {AArch64::FeatureSM4} },
2818 873 : { "sha3", {AArch64::FeatureSHA3} },
2819 873 : { "sha2", {AArch64::FeatureSHA2} },
2820 633 : { "aes", {AArch64::FeatureAES} },
2821 : { "crypto", {AArch64::FeatureCrypto} },
2822 240 : { "fp", {AArch64::FeatureFPARMv8} },
2823 : { "simd", {AArch64::FeatureNEON} },
2824 154 : { "ras", {AArch64::FeatureRAS} },
2825 86 : { "lse", {AArch64::FeatureLSE} },
2826 : { "predctrl", {AArch64::FeaturePredCtrl} },
2827 598 : { "ccdp", {AArch64::FeatureCacheDeepPersist} },
2828 598 : { "mte", {AArch64::FeatureMTE} },
2829 410 :
2830 : // FIXME: Unsupported extensions
2831 188 : { "pan", {} },
2832 : { "lor", {} },
2833 94 : { "rdma", {} },
2834 94 : { "profile", {} },
2835 : };
2836 790 :
2837 790 :
2838 572 : static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2839 : if (FBS[AArch64::HasV8_1aOps])
2840 218 : Str += "ARMv8.1a";
2841 : else if (FBS[AArch64::HasV8_2aOps])
2842 114 : Str += "ARMv8.2a";
2843 104 : else if (FBS[AArch64::HasV8_3aOps])
2844 : Str += "ARMv8.3a";
2845 879 : else if (FBS[AArch64::HasV8_4aOps])
2846 879 : Str += "ARMv8.4a";
2847 646 : else if (FBS[AArch64::HasV8_5aOps])
2848 : Str += "ARMv8.5a";
2849 233 : else {
2850 : auto ext = std::find_if(std::begin(ExtensionMap),
2851 129 : std::end(ExtensionMap),
2852 104 : [&](const Extension& e)
2853 : // Use & in case multiple features are enabled
2854 : { return (FBS & e.Features) != FeatureBitset(); }
2855 : );
2856 :
2857 : Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2858 : }
2859 1944 : }
2860 3000 :
2861 1056 : void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2862 : SMLoc S) {
2863 : const uint16_t Op2 = Encoding & 7;
2864 : const uint16_t Cm = (Encoding & 0x78) >> 3;
2865 : const uint16_t Cn = (Encoding & 0x780) >> 7;
2866 : const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2867 105182 :
2868 : const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2869 62286 :
2870 : Operands.push_back(
2871 44772 : AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2872 : Operands.push_back(
2873 44166 : AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2874 : Operands.push_back(
2875 19878 : AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2876 : Expr = MCConstantExpr::create(Op2, getContext());
2877 : Operands.push_back(
2878 : AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2879 0 : }
2880 1825 :
2881 0 : /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2882 1786 : /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2883 0 : bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2884 0 : OperandVector &Operands) {
2885 : if (Name.find('.') != StringRef::npos)
2886 0 : return TokError("invalid operand");
2887 0 :
2888 0 : Mnemonic = Name;
2889 0 : Operands.push_back(
2890 0 : AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2891 0 :
2892 : MCAsmParser &Parser = getParser();
2893 0 : const AsmToken &Tok = Parser.getTok();
2894 0 : StringRef Op = Tok.getString();
2895 0 : SMLoc S = Tok.getLoc();
2896 0 :
2897 0 : if (Mnemonic == "ic") {
2898 0 : const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2899 : if (!IC)
2900 0 : return TokError("invalid operand for IC instruction");
2901 0 : else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2902 0 : std::string Str("IC " + std::string(IC->Name) + " requires ");
2903 0 : setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2904 0 : return TokError(Str.c_str());
2905 0 : }
2906 : createSysAlias(IC->Encoding, Operands, S);
2907 0 : } else if (Mnemonic == "dc") {
2908 0 : const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2909 0 : if (!DC)
2910 0 : return TokError("invalid operand for DC instruction");
2911 0 : else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2912 0 : std::string Str("DC " + std::string(DC->Name) + " requires ");
2913 : setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2914 0 : return TokError(Str.c_str());
2915 0 : }
2916 0 : createSysAlias(DC->Encoding, Operands, S);
2917 0 : } else if (Mnemonic == "at") {
2918 0 : const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2919 0 : if (!AT)
2920 : return TokError("invalid operand for AT instruction");
2921 0 : else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2922 0 : std::string Str("AT " + std::string(AT->Name) + " requires ");
2923 0 : setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2924 0 : return TokError(Str.c_str());
2925 0 : }
2926 0 : createSysAlias(AT->Encoding, Operands, S);
2927 : } else if (Mnemonic == "tlbi") {
2928 0 : const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2929 0 : if (!TLBI)
2930 0 : return TokError("invalid operand for TLBI instruction");
2931 0 : else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2932 0 : std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2933 0 : setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2934 : return TokError(Str.c_str());
2935 : }
2936 1060011 : createSysAlias(TLBI->Encoding, Operands, S);
2937 : } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2938 : const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2939 : if (!PRCTX)
2940 : return TokError("invalid operand for prediction restriction instruction");
2941 0 : else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2942 0 : std::string Str(
2943 0 : Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2944 0 : setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2945 0 : return TokError(Str.c_str());
2946 : }
2947 511 : uint16_t PRCTX_Op2 =
2948 : Mnemonic == "cfp" ? 4 :
2949 : Mnemonic == "dvp" ? 5 :
2950 : Mnemonic == "cpp" ? 7 :
2951 : 0;
2952 629 : assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2953 0 : createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2954 : }
2955 :
2956 1200 : Parser.Lex(); // Eat operand.
2957 1200 :
2958 192 : bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2959 : bool HasRegister = false;
2960 1008 :
2961 : // Check for the optional register operand.
2962 896 : if (parseOptionalToken(AsmToken::Comma)) {
2963 : if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2964 : return TokError("expected register operand");
2965 : HasRegister = true;
2966 896 : }
2967 1792 :
2968 : if (ExpectRegister && !HasRegister)
2969 0 : return TokError("specified " + Mnemonic + " op requires a register");
2970 : else if (!ExpectRegister && HasRegister)
2971 1792 : return TokError("specified " + Mnemonic + " op does not use a register");
2972 522 :
2973 : if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2974 : return true;
2975 486 :
2976 : return false;
2977 283 : }
2978 283 :
2979 48 : OperandMatchResultTy
2980 : AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2981 235 : MCAsmParser &Parser = getParser();
2982 : const AsmToken &Tok = Parser.getTok();
2983 211 :
2984 : if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2985 : TokError("'csync' operand expected");
2986 : return MatchOperand_ParseFail;
2987 211 : // Can be either a #imm style literal or an option name
2988 422 : } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2989 : // Immediate operand.
2990 0 : const MCExpr *ImmVal;
2991 : SMLoc ExprLoc = getLoc();
2992 422 : if (getParser().parseExpression(ImmVal))
2993 90 : return MatchOperand_ParseFail;
2994 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2995 : if (!MCE) {
2996 145 : Error(ExprLoc, "immediate value expected for barrier operand");
2997 : return MatchOperand_ParseFail;
2998 285 : }
2999 285 : if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3000 48 : Error(ExprLoc, "barrier operand out of range");
3001 : return MatchOperand_ParseFail;
3002 237 : }
3003 : auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3004 205 : Operands.push_back(AArch64Operand::CreateBarrier(
3005 : MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3006 : return MatchOperand_Success;
3007 : }
3008 205 :
3009 410 : if (Tok.isNot(AsmToken::Identifier)) {
3010 : TokError("invalid operand for instruction");
3011 0 : return MatchOperand_ParseFail;
3012 : }
3013 410 :
3014 80 : auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3015 : // The only valid named option for ISB is 'sy'
3016 : auto DB = AArch64DB::lookupDBByName(Tok.getString());
3017 157 : if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3018 : TokError("'sy' or #imm operand expected");
3019 76 : return MatchOperand_ParseFail;
3020 76 : // The only valid named option for TSB is 'csync'
3021 12 : } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3022 : TokError("'csync' operand expected");
3023 64 : return MatchOperand_ParseFail;
3024 : } else if (!DB && !TSB) {
3025 56 : TokError("invalid barrier option name");
3026 : return MatchOperand_ParseFail;
3027 : }
3028 :
3029 56 : Operands.push_back(AArch64Operand::CreateBarrier(
3030 112 : DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3031 : Parser.Lex(); // Consume the option
3032 0 :
3033 : return MatchOperand_Success;
3034 112 : }
3035 40 :
3036 : OperandMatchResultTy
3037 : AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3038 24 : MCAsmParser &Parser = getParser();
3039 : const AsmToken &Tok = Parser.getTok();
3040 556 :
3041 556 : if (Tok.isNot(AsmToken::Identifier))
3042 84 : return MatchOperand_NoMatch;
3043 :
3044 472 : int MRSReg, MSRReg;
3045 : auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3046 424 : if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3047 : MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3048 : MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3049 : } else
3050 424 : MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3051 848 :
3052 : auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3053 0 : unsigned PStateImm = -1;
3054 : if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3055 848 : PStateImm = PState->Encoding;
3056 312 :
3057 : Operands.push_back(
3058 : AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3059 160 : PStateImm, getContext()));
3060 : Parser.Lex(); // Eat identifier
3061 :
3062 : return MatchOperand_Success;
3063 568 : }
3064 :
3065 568 : /// tryParseNeonVectorRegister - Parse a vector register operand.
3066 170 : bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3067 398 : MCAsmParser &Parser = getParser();
3068 220 : if (Parser.getTok().isNot(AsmToken::Identifier))
3069 178 : return true;
3070 :
3071 283 : SMLoc S = getLoc();
3072 : // Check for a vector register specifier first.
3073 283 : StringRef Kind;
3074 90 : unsigned Reg;
3075 193 : OperandMatchResultTy Res =
3076 105 : tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3077 88 : if (Res != MatchOperand_Success)
3078 : return true;
3079 69 :
3080 : const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3081 69 : if (!KindRes)
3082 20 : return true;
3083 49 :
3084 25 : unsigned ElementWidth = KindRes->second;
3085 24 : Operands.push_back(
3086 : AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3087 216 : S, getLoc(), getContext()));
3088 :
3089 216 : // If there was an explicit qualifier, that goes on as a literal text
3090 60 : // operand.
3091 156 : if (!Kind.empty())
3092 90 : Operands.push_back(
3093 66 : AArch64Operand::CreateToken(Kind, false, S, getContext()));
3094 :
3095 : return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3096 399 : }
3097 399 :
3098 : OperandMatchResultTy
3099 : AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3100 : SMLoc SIdx = getLoc();
3101 399 : if (parseOptionalToken(AsmToken::LBrac)) {
3102 349 : const MCExpr *ImmVal;
3103 303 : if (getParser().parseExpression(ImmVal))
3104 178 : return MatchOperand_NoMatch;
3105 700 : const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3106 : if (!MCE) {
3107 : TokError("immediate value expected for vector index");
3108 : return MatchOperand_ParseFail;;
3109 34 : }
3110 34 :
3111 : SMLoc E = getLoc();
3112 :
3113 : if (parseToken(AsmToken::RBrac, "']' expected"))
3114 32 : return MatchOperand_ParseFail;;
3115 :
3116 : Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3117 64 : E, getContext()));
3118 64 : return MatchOperand_Success;
3119 : }
3120 :
3121 6 : return MatchOperand_NoMatch;
3122 50 : }
3123 :
3124 : // tryParseVectorRegister - Try to parse a vector register name with
3125 : // optional kind specifier. If it is a register specifier, eat the token
3126 46 : // and return it.
3127 46 : OperandMatchResultTy
3128 : AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3129 : RegKind MatchKind) {
3130 88 : MCAsmParser &Parser = getParser();
3131 17 : const AsmToken &Tok = Parser.getTok();
3132 :
3133 : if (Tok.isNot(AsmToken::Identifier))
3134 6 : return MatchOperand_NoMatch;
3135 6 :
3136 : StringRef Name = Tok.getString();
3137 : // If there is a kind specifier, it's separated from the register name by
3138 12 : // a '.'.
3139 : size_t Start = 0, Next = Name.find('.');
3140 : StringRef Head = Name.slice(Start, Next);
3141 : unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3142 13 :
3143 13 : if (RegNum) {
3144 : if (Next != StringRef::npos) {
3145 : Kind = Name.slice(Next, StringRef::npos);
3146 26 : if (!isValidVectorKind(Kind, MatchKind)) {
3147 6 : TokError("invalid vector kind qualifier");
3148 : return MatchOperand_ParseFail;
3149 : }
3150 15 : }
3151 15 : Parser.Lex(); // Eat the register token.
3152 :
3153 : Reg = RegNum;
3154 26 : return MatchOperand_Success;
3155 5 : }
3156 :
3157 : return MatchOperand_NoMatch;
3158 6 : }
3159 6 :
3160 : /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3161 : OperandMatchResultTy
3162 12 : AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3163 3 : // Check for a SVE predicate register specifier first.
3164 : const SMLoc S = getLoc();
3165 : StringRef Kind;
3166 6 : unsigned RegNum;
3167 6 : auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3168 : if (Res != MatchOperand_Success)
3169 : return Res;
3170 12 :
3171 3 : const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3172 : if (!KindRes)
3173 : return MatchOperand_NoMatch;
3174 :
3175 64 : unsigned ElementWidth = KindRes->second;
3176 64 : Operands.push_back(AArch64Operand::CreateVectorReg(
3177 : RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3178 : getLoc(), getContext()));
3179 118 :
3180 38 : // Not all predicates are followed by a '/m' or '/z'.
3181 : MCAsmParser &Parser = getParser();
3182 : if (Parser.getTok().isNot(AsmToken::Slash))
3183 8 : return MatchOperand_Success;
3184 8 :
3185 : // But when they do they shouldn't have an element type suffix.
3186 : if (!Kind.empty()) {
3187 16 : Error(S, "not expecting size suffix");
3188 : return MatchOperand_ParseFail;
3189 : }
3190 :
3191 15 : // Add a literal slash as operand
3192 15 : Operands.push_back(
3193 : AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3194 :
3195 28 : Parser.Lex(); // Eat the slash.
3196 10 :
3197 : // Zeroing or merging?
3198 : auto Pred = Parser.getTok().getString().lower();
3199 16 : if (Pred != "z" && Pred != "m") {
3200 16 : Error(getLoc(), "expecting 'm' or 'z' predication");
3201 : return MatchOperand_ParseFail;
3202 : }
3203 28 :
3204 10 : // Add zero/merge token.
3205 : const char *ZM = Pred == "z" ? "z" : "m";
3206 : Operands.push_back(
3207 12 : AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3208 12 :
3209 : Parser.Lex(); // Eat zero/merge token.
3210 : return MatchOperand_Success;
3211 24 : }
3212 10 :
3213 : /// parseRegister - Parse a register operand.
3214 : bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3215 13 : // Try for a Neon vector register.
3216 13 : if (!tryParseNeonVectorRegister(Operands))
3217 : return false;
3218 :
3219 22 : // Otherwise try for a scalar register.
3220 8 : if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3221 : return false;
3222 :
3223 : return true;
3224 : }
3225 :
3226 391 : bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3227 : MCAsmParser &Parser = getParser();
3228 : bool HasELFModifier = false;
3229 : AArch64MCExpr::VariantKind RefKind;
3230 :
3231 : if (parseOptionalToken(AsmToken::Colon)) {
3232 283 : HasELFModifier = true;
3233 :
3234 : if (Parser.getTok().isNot(AsmToken::Identifier))
3235 : return TokError("expect relocation specifier in operand after ':'");
3236 :
3237 105 : std::string LowerCase = Parser.getTok().getIdentifier().lower();
3238 : RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3239 : .Case("lo12", AArch64MCExpr::VK_LO12)
3240 : .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3241 : .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3242 : .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3243 105 : .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3244 : .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3245 : .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3246 : .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3247 : .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3248 15 : .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3249 : .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3250 : .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3251 : .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3252 : .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3253 15 : .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3254 : .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3255 15 : .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3256 15 : .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3257 : .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3258 : .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3259 16 : .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3260 16 : .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3261 : .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3262 : .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3263 : .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3264 : .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3265 16 : .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3266 : .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3267 16 : .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3268 16 : .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3269 : .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3270 : .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3271 102 : .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3272 102 : .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3273 : .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3274 : .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3275 : .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3276 : .Default(AArch64MCExpr::VK_INVALID);
3277 102 :
3278 83 : if (RefKind == AArch64MCExpr::VK_INVALID)
3279 : return TokError("expect relocation specifier in operand after ':'");
3280 :
3281 27 : Parser.Lex(); // Eat identifier
3282 27 :
3283 : if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3284 : return true;
3285 : }
3286 :
3287 25 : if (getParser().parseExpression(ImmVal))
3288 25 : return true;
3289 :
3290 : if (HasELFModifier)
3291 : ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3292 18 :
3293 : return false;
3294 : }
3295 :
3296 : template <RegKind VectorKind>
3297 18 : OperandMatchResultTy
3298 18 : AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3299 : bool ExpectMatch) {
3300 : MCAsmParser &Parser = getParser();
3301 : if (!Parser.getTok().is(AsmToken::LCurly))
3302 : return MatchOperand_NoMatch;
3303 :
3304 : // Wrapper around parse function
3305 : auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3306 : bool NoMatchIsError) {
3307 1098 : auto RegTok = Parser.getTok();
3308 215 : auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3309 : if (ParseRes == MatchOperand_Success) {
3310 216 : if (parseVectorKind(Kind, VectorKind))
3311 33 : return ParseRes;
3312 : llvm_unreachable("Expected a valid vector kind");
3313 328 : }
3314 80 :
3315 : if (RegTok.isNot(AsmToken::Identifier) ||
3316 237 : ParseRes == MatchOperand_ParseFail ||
3317 44 : (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3318 : Error(Loc, "vector register expected");
3319 222 : return MatchOperand_ParseFail;
3320 43 : }
3321 :
3322 95 : return MatchOperand_NoMatch;
3323 15 : };
3324 :
3325 : SMLoc S = getLoc();
3326 0 : auto LCurly = Parser.getTok();
3327 : Parser.Lex(); // Eat left bracket token.
3328 :
3329 204 : StringRef Kind;
3330 0 : unsigned FirstReg;
3331 : auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3332 204 :
3333 16 : // Put back the original left bracket if there was no match, so that
3334 : // different types of list-operands can be matched (e.g. SVE, Neon).
3335 : if (ParseRes == MatchOperand_NoMatch)
3336 16 : Parser.getLexer().UnLex(LCurly);
3337 :
3338 : if (ParseRes != MatchOperand_Success)
3339 : return ParseRes;
3340 :
3341 : int64_t PrevReg = FirstReg;
3342 0 : unsigned Count = 1;
3343 :
3344 : if (parseOptionalToken(AsmToken::Minus)) {
3345 44 : SMLoc Loc = getLoc();
3346 0 : StringRef NextKind;
3347 :
3348 44 : unsigned Reg;
3349 15 : ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3350 : if (ParseRes != MatchOperand_Success)
3351 : return ParseRes;
3352 15 :
3353 : // Any Kind suffices must match on all regs in the list.
3354 : if (Kind != NextKind) {
3355 : Error(Loc, "mismatched register size suffix");
3356 : return MatchOperand_ParseFail;
3357 : }
3358 0 :
3359 : unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3360 0 :
3361 0 : if (Space == 0 || Space > 3) {
3362 : Error(Loc, "invalid number of vectors");
3363 0 : return MatchOperand_ParseFail;
3364 : }
3365 0 :
3366 0 : Count += Space;
3367 : }
3368 0 : else {
3369 : while (parseOptionalToken(AsmToken::Comma)) {
3370 0 : SMLoc Loc = getLoc();
3371 0 : StringRef NextKind;
3372 : unsigned Reg;
3373 0 : ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3374 : if (ParseRes != MatchOperand_Success)
3375 : return ParseRes;
3376 :
3377 : // Any Kind suffices must match on all regs in the list.
3378 0 : if (Kind != NextKind) {
3379 0 : Error(Loc, "mismatched register size suffix");
3380 0 : return MatchOperand_ParseFail;
3381 : }
3382 0 :
3383 0 : // Registers must be incremental (with wraparound at 31)
3384 : if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3385 0 : (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3386 : Error(Loc, "registers must be sequential");
3387 : return MatchOperand_ParseFail;
3388 : }
3389 :
3390 0 : PrevReg = Reg;
3391 0 : ++Count;
3392 0 : }
3393 : }
3394 0 :
3395 0 : if (parseToken(AsmToken::RCurly, "'}' expected"))
3396 : return MatchOperand_ParseFail;
3397 :
3398 0 : if (Count > 4) {
3399 : Error(S, "invalid number of vectors");
3400 : return MatchOperand_ParseFail;
3401 : }
3402 :
3403 : unsigned NumElements = 0;
3404 : unsigned ElementWidth = 0;
3405 : if (!Kind.empty()) {
3406 : if (const auto &VK = parseVectorKind(Kind, VectorKind))
3407 : std::tie(NumElements, ElementWidth) = *VK;
3408 : }
3409 0 :
3410 0 : Operands.push_back(AArch64Operand::CreateVectorList(
3411 0 : FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3412 : getContext()));
3413 :
3414 : return MatchOperand_Success;
3415 : }
3416 :
3417 : /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3418 : bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3419 : auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3420 : if (ParseRes != MatchOperand_Success)
3421 : return true;
3422 0 :
3423 0 : return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3424 0 : }
3425 :
3426 : OperandMatchResultTy
3427 : AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3428 : SMLoc StartLoc = getLoc();
3429 :
3430 : unsigned RegNum;
3431 : OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3432 : if (Res != MatchOperand_Success)
3433 : return Res;
3434 :
3435 0 : if (!parseOptionalToken(AsmToken::Comma)) {
3436 0 : Operands.push_back(AArch64Operand::CreateReg(
3437 0 : RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3438 : return MatchOperand_Success;
3439 : }
3440 :
3441 : parseOptionalToken(AsmToken::Hash);
3442 :
3443 : if (getParser().getTok().isNot(AsmToken::Integer)) {
3444 : Error(getLoc(), "index must be absent or #0");
3445 : return MatchOperand_ParseFail;
3446 : }
3447 :
3448 0 : const MCExpr *ImmVal;
3449 0 : if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3450 0 : cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3451 : Error(getLoc(), "index must be absent or #0");
3452 : return MatchOperand_ParseFail;
3453 : }
3454 :
3455 : Operands.push_back(AArch64Operand::CreateReg(
3456 : RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3457 : return MatchOperand_Success;
3458 : }
3459 :
3460 : template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3461 0 : OperandMatchResultTy
3462 0 : AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3463 0 : SMLoc StartLoc = getLoc();
3464 :
3465 : unsigned RegNum;
3466 : OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3467 : if (Res != MatchOperand_Success)
3468 : return Res;
3469 :
3470 : // No shift/extend is the default.
3471 : if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3472 : Operands.push_back(AArch64Operand::CreateReg(
3473 : RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3474 0 : return MatchOperand_Success;
3475 0 : }
3476 :
3477 0 : // Eat the comma
3478 : getParser().Lex();
3479 :
3480 : // Match the shift
3481 0 : SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3482 0 : Res = tryParseOptionalShiftExtend(ExtOpnd);
3483 : if (Res != MatchOperand_Success)
3484 0 : return Res;
3485 :
3486 : auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3487 : Operands.push_back(AArch64Operand::CreateReg(
3488 0 : RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3489 0 : Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3490 : Ext->hasShiftExtendAmount()));
3491 0 :
3492 : return MatchOperand_Success;
3493 0 : }
3494 0 :
3495 : bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3496 : MCAsmParser &Parser = getParser();
3497 :
3498 : // Some SVE instructions have a decoration after the immediate, i.e.
3499 : // "mul vl". We parse them here and add tokens, which must be present in the
3500 : // asm string in the tablegen instruction.
3501 : bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3502 : bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3503 0 : if (!Parser.getTok().getString().equals_lower("mul") ||
3504 : !(NextIsVL || NextIsHash))
3505 : return true;
3506 :
3507 : Operands.push_back(
3508 : AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3509 : Parser.Lex(); // Eat the "mul"
3510 :
3511 : if (NextIsVL) {
3512 : Operands.push_back(
3513 : AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3514 : Parser.Lex(); // Eat the "vl"
3515 : return false;
3516 : }
3517 :
3518 : if (NextIsHash) {
3519 : Parser.Lex(); // Eat the #
3520 : SMLoc S = getLoc();
3521 0 :
3522 : // Parse immediate operand.
3523 0 : const MCExpr *ImmVal;
3524 0 : if (!Parser.parseExpression(ImmVal))
3525 : if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3526 : Operands.push_back(AArch64Operand::CreateImm(
3527 : MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3528 : getContext()));
3529 : return MatchOperand_Success;
3530 : }
3531 : }
3532 :
3533 : return Error(getLoc(), "expected 'vl' or '#<imm>'");
3534 : }
3535 :
3536 : /// parseOperand - Parse a arm instruction operand. For now this parses the
3537 : /// operand regardless of the mnemonic.
3538 : bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3539 : bool invertCondCode) {
3540 : MCAsmParser &Parser = getParser();
3541 :
3542 0 : OperandMatchResultTy ResTy =
3543 : MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3544 0 :
3545 0 : // Check if the current operand has a custom associated parser, if so, try to
3546 : // custom parse the operand, or fallback to the general approach.
3547 : if (ResTy == MatchOperand_Success)
3548 : return false;
3549 : // If there wasn't a custom match, try the generic matcher below. Otherwise,
3550 : // there was a match, but an error occurred, in which case, just return that
3551 : // the operand parsing failed.
3552 : if (ResTy == MatchOperand_ParseFail)
3553 : return true;
3554 :
3555 : // Nothing custom, so do general case parsing.
3556 : SMLoc S, E;
3557 : switch (getLexer().getKind()) {
3558 : default: {
3559 : SMLoc S = getLoc();
3560 : const MCExpr *Expr;
3561 : if (parseSymbolicImmVal(Expr))
3562 : return Error(S, "invalid operand");
3563 0 :
3564 : SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3565 0 : Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3566 0 : return false;
3567 : }
3568 : case AsmToken::LBrac: {
3569 : SMLoc Loc = Parser.getTok().getLoc();
3570 : Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3571 : getContext()));
3572 : Parser.Lex(); // Eat '['
3573 :
3574 : // There's no comma after a '[', so we can parse the next operand
3575 : // immediately.
3576 : return parseOperand(Operands, false, false);
3577 : }
3578 : case AsmToken::LCurly:
3579 : return parseNeonVectorList(Operands);
3580 : case AsmToken::Identifier: {
3581 : // If we're expecting a Condition Code operand, then just parse that.
3582 : if (isCondCode)
3583 : return parseCondCode(Operands, invertCondCode);
3584 0 :
3585 : // If it's a register name, parse it.
3586 0 : if (!parseRegister(Operands))
3587 0 : return false;
3588 :
3589 : // See if this is a "mul vl" decoration or "mul #<int>" operand used
3590 : // by SVE instructions.
3591 : if (!parseOptionalMulOperand(Operands))
3592 : return false;
3593 :
3594 : // This could be an optional "shift" or "extend" operand.
3595 : OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3596 : // We can only continue if no tokens were eaten.
3597 : if (GotShift != MatchOperand_NoMatch)
3598 : return GotShift;
3599 :
3600 : // This was not a register so parse other operands that start with an
3601 : // identifier (like labels) as expressions and create them as immediates.
3602 : const MCExpr *IdVal;
3603 : S = getLoc();
3604 : if (getParser().parseExpression(IdVal))
3605 0 : return true;
3606 : E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3607 0 : Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3608 0 : return false;
3609 : }
3610 : case AsmToken::Integer:
3611 : case AsmToken::Real:
3612 : case AsmToken::Hash: {
3613 : // #42 -> immediate.
3614 : S = getLoc();
3615 :
3616 : parseOptionalToken(AsmToken::Hash);
3617 :
3618 : // Parse a negative sign
3619 : bool isNegative = false;
3620 : if (Parser.getTok().is(AsmToken::Minus)) {
3621 : isNegative = true;
3622 : // We need to consume this token only when we have a Real, otherwise
3623 : // we let parseSymbolicImmVal take care of it
3624 : if (Parser.getLexer().peekTok().is(AsmToken::Real))
3625 : Parser.Lex();
3626 0 : }
3627 :
3628 0 : // The only Real that should come through here is a literal #0.0 for
3629 0 : // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3630 : // so convert the value.
3631 : const AsmToken &Tok = Parser.getTok();
3632 : if (Tok.is(AsmToken::Real)) {
3633 : APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3634 : uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3635 : if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3636 : Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3637 : Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3638 : return TokError("unexpected floating point literal");
3639 : else if (IntVal != 0 || isNegative)
3640 : return TokError("expected floating-point constant #0.0");
3641 : Parser.Lex(); // Eat the token.
3642 :
3643 : Operands.push_back(
3644 : AArch64Operand::CreateToken("#0", false, S, getContext()));
3645 : Operands.push_back(
3646 : AArch64Operand::CreateToken(".0", false, S, getContext()));
3647 0 : return false;
3648 : }
3649 0 :
3650 0 : const MCExpr *ImmVal;
3651 : if (parseSymbolicImmVal(ImmVal))
3652 : return true;
3653 :
3654 : E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3655 : Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3656 : return false;
3657 : }
3658 : case AsmToken::Equal: {
3659 : SMLoc Loc = getLoc();
3660 : if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3661 : return TokError("unexpected token in operand");
3662 : Parser.Lex(); // Eat '='
3663 : const MCExpr *SubExprVal;
3664 : if (getParser().parseExpression(SubExprVal))
3665 : return true;
3666 :
3667 : if (Operands.size() < 2 ||
3668 0 : !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3669 : return Error(Loc, "Only valid when first operand is register");
3670 0 :
3671 0 : bool IsXReg =
3672 : AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3673 : Operands[1]->getReg());
3674 :
3675 : MCContext& Ctx = getContext();
3676 : E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3677 : // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3678 : if (isa<MCConstantExpr>(SubExprVal)) {
3679 : uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3680 : uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3681 : while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3682 : ShiftAmt += 16;
3683 : Imm >>= 16;
3684 : }
3685 : if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3686 : Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3687 : Operands.push_back(AArch64Operand::CreateImm(
3688 : MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3689 0 : if (ShiftAmt)
3690 : Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3691 0 : ShiftAmt, true, S, E, Ctx));
3692 0 : return false;
3693 : }
3694 : APInt Simm = APInt(64, Imm << ShiftAmt);
3695 : // check if the immediate is an unsigned or signed 32-bit int for W regs
3696 : if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3697 : return Error(Loc, "Immediate too large for register");
3698 : }
3699 : // If it is a label or an imm that cannot fit in a movz, put it into CP.
3700 : const MCExpr *CPLoc =
3701 : getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3702 : Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3703 : return false;
3704 : }
3705 : }
3706 : }
3707 :
3708 : bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3709 : const MCParsedAsmOperand &Op2) const {
3710 0 : auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3711 : auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3712 0 : if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3713 0 : AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3714 : return MCTargetAsmParser::regsEqual(Op1, Op2);
3715 :
3716 : assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3717 : "Testing equality of non-scalar registers not supported");
3718 :
3719 : // Check if a registers match their sub/super register classes.
3720 : if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3721 : return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3722 : if (AOp1.getRegEqualityTy() == EqualsSubReg)
3723 : return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3724 : if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3725 : return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3726 : if (AOp2.getRegEqualityTy() == EqualsSubReg)
3727 : return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3728 :
3729 : return false;
3730 : }
3731 0 :
3732 : /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3733 0 : /// operands.
3734 0 : bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3735 : StringRef Name, SMLoc NameLoc,
3736 : OperandVector &Operands) {
3737 : MCAsmParser &Parser = getParser();
3738 : Name = StringSwitch<StringRef>(Name.lower())
3739 : .Case("beq", "b.eq")
3740 : .Case("bne", "b.ne")
3741 : .Case("bhs", "b.hs")
3742 : .Case("bcs", "b.cs")
3743 : .Case("blo", "b.lo")
3744 : .Case("bcc", "b.cc")
3745 : .Case("bmi", "b.mi")
3746 : .Case("bpl", "b.pl")
3747 : .Case("bvs", "b.vs")
3748 : .Case("bvc", "b.vc")
3749 : .Case("bhi", "b.hi")
3750 : .Case("bls", "b.ls")
3751 : .Case("bge", "b.ge")
3752 0 : .Case("blt", "b.lt")
3753 : .Case("bgt", "b.gt")
3754 0 : .Case("ble", "b.le")
3755 0 : .Case("bal", "b.al")
3756 : .Case("bnv", "b.nv")
3757 : .Default(Name);
3758 :
3759 : // First check for the AArch64-specific .req directive.
3760 : if (Parser.getTok().is(AsmToken::Identifier) &&
3761 : Parser.getTok().getIdentifier() == ".req") {
3762 : parseDirectiveReq(Name, NameLoc);
3763 : // We always return 'error' for this, as we're done with this
3764 : // statement and don't need to match the 'instruction."
3765 : return true;
3766 : }
3767 :
3768 : // Create the leading tokens for the mnemonic, split by '.' characters.
3769 : size_t Start = 0, Next = Name.find('.');
3770 : StringRef Head = Name.slice(Start, Next);
3771 :
3772 : // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3773 0 : // the SYS instruction.
3774 : if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3775 0 : Head == "cfp" || Head == "dvp" || Head == "cpp")
3776 : return parseSysAlias(Head, NameLoc, Operands);
3777 0 :
3778 : Operands.push_back(
3779 0 : AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3780 0 : Mnemonic = Head;
3781 :
3782 : // Handle condition codes for a branch mnemonic
3783 0 : if (Head == "b" && Next != StringRef::npos) {
3784 : Start = Next;
3785 : Next = Name.find('.', Start + 1);
3786 0 : Head = Name.slice(Start + 1, Next);
3787 0 :
3788 0 : SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3789 : (Head.data() - Name.data()));
3790 : AArch64CC::CondCode CC = parseCondCodeString(Head);
3791 0 : if (CC == AArch64CC::Invalid)
3792 0 : return Error(SuffixLoc, "invalid condition code");
3793 0 : Operands.push_back(
3794 : AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3795 : Operands.push_back(
3796 0 : AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3797 0 : }
3798 0 :
3799 : // Add the remaining tokens in the mnemonic.
3800 : while (Next != StringRef::npos) {
3801 0 : Start = Next;
3802 0 : Next = Name.find('.', Start + 1);
3803 : Head = Name.slice(Start, Next);
3804 0 : SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3805 : (Head.data() - Name.data()) + 1);
3806 : Operands.push_back(
3807 : AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3808 : }
3809 0 :
3810 0 : // Conditional compare instructions have a Condition Code operand, which needs
3811 : // to be parsed and an immediate operand created.
3812 : bool condCodeFourthOperand =
3813 0 : (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3814 : Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3815 0 : Head == "csinc" || Head == "csinv" || Head == "csneg");
3816 0 :
3817 0 : // These instructions are aliases to some of the conditional select
3818 0 : // instructions. However, the condition code is inverted in the aliased
3819 0 : // instruction.
3820 0 : //
3821 : // FIXME: Is this the correct way to handle these? Or should the parser
3822 0 : // generate the aliased instructions directly?
3823 0 : bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3824 : bool condCodeThirdOperand =
3825 0 : (Head == "cinc" || Head == "cinv" || Head == "cneg");
3826 0 :
3827 : // Read the remaining operands.
3828 0 : if (getLexer().isNot(AsmToken::EndOfStatement)) {
3829 0 :
3830 0 : unsigned N = 1;
3831 0 : do {
3832 0 : // Parse and remember the operand.
3833 0 : if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3834 : (N == 3 && condCodeThirdOperand) ||
3835 0 : (N == 2 && condCodeSecondOperand),
3836 0 : condCodeSecondOperand || condCodeThirdOperand)) {
3837 : return true;
3838 0 : }
3839 0 :
3840 : // After successfully parsing some operands there are two special cases to
3841 0 : // consider (i.e. notional operands not separated by commas). Both are due
3842 0 : // to memory specifiers:
3843 0 : // + An RBrac will end an address for load/store/prefetch
3844 0 : // + An '!' will indicate a pre-indexed operation.
3845 0 : //
3846 0 : // It's someone else's responsibility to make sure these tokens are sane
3847 : // in the given context!
3848 0 :
3849 0 : SMLoc RLoc = Parser.getTok().getLoc();
3850 : if (parseOptionalToken(AsmToken::RBrac))
3851 0 : Operands.push_back(
3852 : AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3853 : SMLoc ELoc = Parser.getTok().getLoc();
3854 0 : if (parseOptionalToken(AsmToken::Exclaim))
3855 : Operands.push_back(
3856 0 : AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3857 0 :
3858 0 : ++N;
3859 : } while (parseOptionalToken(AsmToken::Comma));
3860 0 : }
3861 0 :
3862 : if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3863 0 : return true;
3864 :
3865 0 : return false;
3866 0 : }
3867 :
3868 0 : static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3869 : assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3870 0 : return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3871 : (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3872 0 : (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3873 : (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3874 0 : (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3875 0 : (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3876 : }
3877 0 :
3878 : // FIXME: This entire function is a giant hack to provide us with decent
3879 0 : // operand range validation/diagnostics until TableGen/MC can be extended
3880 : // to support autogeneration of this kind of validation.
3881 : bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3882 0 : SmallVectorImpl<SMLoc> &Loc) {
3883 : const MCRegisterInfo *RI = getContext().getRegisterInfo();
3884 0 : const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3885 :
3886 : // A prefix only applies to the instruction following it. Here we extract
3887 0 : // prefix information for the next instruction before validating the current
3888 0 : // one so that in the case of failure we don't erronously continue using the
3889 : // current prefix.
3890 0 : PrefixInfo Prefix = NextPrefix;
3891 : NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3892 0 :
3893 : // Before validating the instruction in isolation we run through the rules
3894 0 : // applicable when it follows a prefix instruction.
3895 : // NOTE: brk & hlt can be prefixed but require no additional validation.
3896 : if (Prefix.isActive() &&
3897 0 : (Inst.getOpcode() != AArch64::BRK) &&
3898 0 : (Inst.getOpcode() != AArch64::HLT)) {
3899 :
3900 0 : // Prefixed intructions must have a destructive operand.
3901 : if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3902 0 : AArch64::NotDestructive)
3903 : return Error(IDLoc, "instruction is unpredictable when following a"
3904 0 : " movprfx, suggest replacing movprfx with mov");
3905 :
3906 : // Destination operands must match.
3907 0 : if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3908 0 : return Error(Loc[0], "instruction is unpredictable when following a"
3909 : " movprfx writing to a different destination");
3910 0 :
3911 : // Destination operand must not be used in any other location.
3912 0 : for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3913 : if (Inst.getOperand(i).isReg() &&
3914 0 : (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3915 : isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3916 : return Error(Loc[0], "instruction is unpredictable when following a"
3917 0 : " movprfx and destination also used as non-destructive"
3918 0 : " source");
3919 : }
3920 0 :
3921 : auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3922 0 : if (Prefix.isPredicated()) {
3923 : int PgIdx = -1;
3924 0 :
3925 : // Find the instructions general predicate.
3926 : for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3927 0 : if (Inst.getOperand(i).isReg() &&
3928 0 : PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3929 : PgIdx = i;
3930 0 : break;
3931 : }
3932 0 :
3933 : // Instruction must be predicated if the movprfx is predicated.
3934 0 : if (PgIdx == -1 ||
3935 : (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3936 : return Error(IDLoc, "instruction is unpredictable when following a"
3937 0 : " predicated movprfx, suggest using unpredicated movprfx");
3938 0 :
3939 : // Instruction must use same general predicate as the movprfx.
3940 0 : if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3941 : return Error(IDLoc, "instruction is unpredictable when following a"
3942 : " predicated movprfx using a different general predicate");
3943 0 :
3944 : // Instruction element type must match the movprfx.
3945 0 : if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3946 0 : return Error(IDLoc, "instruction is unpredictable when following a"
3947 0 : " predicated movprfx with a different element size");
3948 : }
3949 : }
3950 0 :
3951 : // Check for indexed addressing modes w/ the base register being the
3952 0 : // same as a destination/source register or pair load where
3953 0 : // the Rt == Rt2. All of those are undefined behaviour.
3954 0 : switch (Inst.getOpcode()) {
3955 0 : case AArch64::LDPSWpre:
3956 : case AArch64::LDPWpost:
3957 0 : case AArch64::LDPWpre:
3958 0 : case AArch64::LDPXpost:
3959 0 : case AArch64::LDPXpre: {
3960 0 : unsigned Rt = Inst.getOperand(1).getReg();
3961 : unsigned Rt2 = Inst.getOperand(2).getReg();
3962 0 : unsigned Rn = Inst.getOperand(3).getReg();
3963 0 : if (RI->isSubRegisterEq(Rn, Rt))
3964 0 : return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3965 0 : "is also a destination");
3966 : if (RI->isSubRegisterEq(Rn, Rt2))
3967 0 : return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3968 0 : "is also a destination");
3969 0 : LLVM_FALLTHROUGH;
3970 0 : }
3971 : case AArch64::LDPDi:
3972 0 : case AArch64::LDPQi:
3973 0 : case AArch64::LDPSi:
3974 0 : case AArch64::LDPSWi:
3975 0 : case AArch64::LDPWi:
3976 : case AArch64::LDPXi: {
3977 0 : unsigned Rt = Inst.getOperand(0).getReg();
3978 0 : unsigned Rt2 = Inst.getOperand(1).getReg();
3979 0 : if (Rt == Rt2)
3980 0 : return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3981 : break;
3982 0 : }
3983 0 : case AArch64::LDPDpost:
3984 0 : case AArch64::LDPDpre:
3985 : case AArch64::LDPQpost:
3986 : case AArch64::LDPQpre:
3987 0 : case AArch64::LDPSpost:
3988 : case AArch64::LDPSpre:
3989 0 : case AArch64::LDPSWpost: {
3990 0 : unsigned Rt = Inst.getOperand(1).getReg();
3991 0 : unsigned Rt2 = Inst.getOperand(2).getReg();
3992 0 : if (Rt == Rt2)
3993 0 : return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3994 0 : break;
3995 : }
3996 0 : case AArch64::STPDpost:
3997 0 : case AArch64::STPDpre:
3998 0 : case AArch64::STPQpost:
3999 0 : case AArch64::STPQpre:
4000 0 : case AArch64::STPSpost:
4001 0 : case AArch64::STPSpre:
4002 : case AArch64::STPWpost:
4003 0 : case AArch64::STPWpre:
4004 0 : case AArch64::STPXpost:
4005 0 : case AArch64::STPXpre: {
4006 0 : unsigned Rt = Inst.getOperand(1).getReg();
4007 0 : unsigned Rt2 = Inst.getOperand(2).getReg();
4008 0 : unsigned Rn = Inst.getOperand(3).getReg();
4009 : if (RI->isSubRegisterEq(Rn, Rt))
4010 0 : return Error(Loc[0], "unpredictable STP instruction, writeback base "
4011 0 : "is also a source");
4012 : if (RI->isSubRegisterEq(Rn, Rt2))
4013 0 : return Error(Loc[1], "unpredictable STP instruction, writeback base "
4014 0 : "is also a source");
4015 0 : break;
4016 : }
4017 0 : case AArch64::LDRBBpre:
4018 0 : case AArch64::LDRBpre:
4019 0 : case AArch64::LDRHHpre:
4020 0 : case AArch64::LDRHpre:
4021 0 : case AArch64::LDRSBWpre:
4022 : case AArch64::LDRSBXpre:
4023 : case AArch64::LDRSHWpre:
4024 0 : case AArch64::LDRSHXpre:
4025 : case AArch64::LDRSWpre:
4026 0 : case AArch64::LDRWpre:
4027 0 : case AArch64::LDRXpre:
4028 0 : case AArch64::LDRBBpost:
4029 0 : case AArch64::LDRBpost:
4030 0 : case AArch64::LDRHHpost:
4031 0 : case AArch64::LDRHpost:
4032 : case AArch64::LDRSBWpost:
4033 0 : case AArch64::LDRSBXpost:
4034 0 : case AArch64::LDRSHWpost:
4035 0 : case AArch64::LDRSHXpost:
4036 0 : case AArch64::LDRSWpost:
4037 0 : case AArch64::LDRWpost:
4038 0 : case AArch64::LDRXpost: {
4039 : unsigned Rt = Inst.getOperand(1).getReg();
4040 0 : unsigned Rn = Inst.getOperand(2).getReg();
4041 0 : if (RI->isSubRegisterEq(Rn, Rt))
4042 0 : return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4043 0 : "is also a source");
4044 0 : break;
4045 0 : }
4046 : case AArch64::STRBBpost:
4047 0 : case AArch64::STRBpost:
4048 0 : case AArch64::STRHHpost:
4049 : case AArch64::STRHpost:
4050 0 : case AArch64::STRWpost:
4051 0 : case AArch64::STRXpost:
4052 0 : case AArch64::STRBBpre:
4053 : case AArch64::STRBpre:
4054 0 : case AArch64::STRHHpre:
4055 0 : case AArch64::STRHpre:
4056 0 : case AArch64::STRWpre:
4057 0 : case AArch64::STRXpre: {
4058 0 : unsigned Rt = Inst.getOperand(1).getReg();
4059 : unsigned Rn = Inst.getOperand(2).getReg();
4060 0 : if (RI->isSubRegisterEq(Rn, Rt))
4061 : return Error(Loc[0], "unpredictable STR instruction, writeback base "
4062 0 : "is also a source");
4063 0 : break;
4064 0 : }
4065 0 : case AArch64::STXRB:
4066 : case AArch64::STXRH:
4067 0 : case AArch64::STXRW:
4068 : case AArch64::STXRX:
4069 : case AArch64::STLXRB:
4070 : case AArch64::STLXRH:
4071 : case AArch64::STLXRW:
4072 0 : case AArch64::STLXRX: {
4073 : unsigned Rs = Inst.getOperand(0).getReg();
4074 0 : unsigned Rt = Inst.getOperand(1).getReg();
4075 0 : unsigned Rn = Inst.getOperand(2).getReg();
4076 : if (RI->isSubRegisterEq(Rt, Rs) ||
4077 : (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4078 0 : return Error(Loc[0],
4079 : "unpredictable STXR instruction, status is also a source");
4080 : break;
4081 0 : }
4082 : case AArch64::STXPW:
4083 : case AArch64::STXPX:
4084 : case AArch64::STLXPW:
4085 : case AArch64::STLXPX: {
4086 0 : unsigned Rs = Inst.getOperand(0).getReg();
4087 : unsigned Rt1 = Inst.getOperand(1).getReg();
4088 0 : unsigned Rt2 = Inst.getOperand(2).getReg();
4089 0 : unsigned Rn = Inst.getOperand(3).getReg();
4090 : if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4091 : (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4092 0 : return Error(Loc[0],
4093 : "unpredictable STXP instruction, status is also a source");
4094 : break;
4095 0 : }
4096 : case AArch64::LDGV: {
4097 : unsigned Rt = Inst.getOperand(0).getReg();
4098 : unsigned Rn = Inst.getOperand(1).getReg();
4099 : if (RI->isSubRegisterEq(Rt, Rn)) {
4100 0 : return Error(Loc[0],
4101 : "unpredictable LDGV instruction, writeback register is also "
4102 0 : "the target register");
4103 0 : }
4104 : }
4105 : }
4106 0 :
4107 :
4108 : // Now check immediate ranges. Separate from the above as there is overlap
4109 0 : // in the instructions being checked and this keeps the nested conditionals
4110 : // to a minimum.
4111 0 : switch (Inst.getOpcode()) {
4112 0 : case AArch64::ADDSWri:
4113 0 : case AArch64::ADDSXri:
4114 : case AArch64::ADDWri:
4115 0 : case AArch64::ADDXri:
4116 : case AArch64::SUBSWri:
4117 0 : case AArch64::SUBSXri:
4118 0 : case AArch64::SUBWri:
4119 : case AArch64::SUBXri: {
4120 0 : // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4121 : // some slight duplication here.
4122 : if (Inst.getOperand(2).isExpr()) {
4123 0 : const MCExpr *Expr = Inst.getOperand(2).getExpr();
4124 0 : AArch64MCExpr::VariantKind ELFRefKind;
4125 : MCSymbolRefExpr::VariantKind DarwinRefKind;
4126 0 : int64_t Addend;
4127 : if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4128 :
4129 0 : // Only allow these with ADDXri.
4130 0 : if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4131 : DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4132 0 : Inst.getOpcode() == AArch64::ADDXri)
4133 : return false;
4134 :
4135 0 : // Only allow these with ADDXri/ADDWri
4136 0 : if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4137 : ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4138 0 : ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4139 : ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4140 : ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4141 0 : ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4142 0 : ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4143 : ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4144 0 : ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4145 : ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4146 0 : (Inst.getOpcode() == AArch64::ADDXri ||
4147 0 : Inst.getOpcode() == AArch64::ADDWri))
4148 : return false;
4149 0 :
4150 : // Don't allow symbol refs in the immediate field otherwise
4151 0 : // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4152 0 : // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4153 : // 'cmp w0, 'borked')
4154 0 : return Error(Loc.back(), "invalid immediate expression");
4155 : }
4156 0 : // We don't validate more complex expressions here
4157 0 : }
4158 : return false;
4159 0 : }
4160 : default:
4161 0 : return false;
4162 0 : }
4163 : }
4164 0 :
4165 : static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4166 : unsigned VariantID = 0);
4167 :
4168 0 : bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4169 0 : uint64_t ErrorInfo,
4170 : OperandVector &Operands) {
4171 0 : switch (ErrCode) {
4172 : case Match_InvalidTiedOperand: {
4173 : RegConstraintEqualityTy EqTy =
4174 0 : static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4175 : .getRegEqualityTy();
4176 0 : switch (EqTy) {
4177 0 : case RegConstraintEqualityTy::EqualsSubReg:
4178 : return Error(Loc, "operand must be 64-bit form of destination register");
4179 0 : case RegConstraintEqualityTy::EqualsSuperReg:
4180 : return Error(Loc, "operand must be 32-bit form of destination register");
4181 : case RegConstraintEqualityTy::EqualsReg:
4182 0 : return Error(Loc, "operand must match destination register");
4183 : }
4184 0 : llvm_unreachable("Unknown RegConstraintEqualityTy");
4185 0 : }
4186 : case Match_MissingFeature:
4187 0 : return Error(Loc,
4188 : "instruction requires a CPU feature not currently enabled");
4189 : case Match_InvalidOperand:
4190 0 : return Error(Loc, "invalid operand for instruction");
4191 0 : case Match_InvalidSuffix:
4192 0 : return Error(Loc, "invalid type suffix for instruction");
4193 0 : case Match_InvalidCondCode:
4194 : return Error(Loc, "expected AArch64 condition code");
4195 : case Match_AddSubRegExtendSmall:
4196 : return Error(Loc,
4197 : "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4198 : case Match_AddSubRegExtendLarge:
4199 0 : return Error(Loc,
4200 : "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4201 : case Match_AddSubSecondSource:
4202 0 : return Error(Loc,
4203 0 : "expected compatible register, symbol or integer in range [0, 4095]");
4204 0 : case Match_LogicalSecondSource:
4205 0 : return Error(Loc, "expected compatible register or logical immediate");
4206 : case Match_InvalidMovImm32Shift:
4207 : return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4208 0 : case Match_InvalidMovImm64Shift:
4209 : return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4210 : case Match_AddSubRegShift32:
4211 0 : return Error(Loc,
4212 0 : "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4213 0 : case Match_AddSubRegShift64:
4214 0 : return Error(Loc,
4215 0 : "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4216 : case Match_InvalidFPImm:
4217 : return Error(Loc,
4218 0 : "expected compatible register or floating-point constant");
4219 0 : case Match_InvalidMemoryIndexedSImm6:
4220 0 : return Error(Loc, "index must be an integer in range [-32, 31].");
4221 0 : case Match_InvalidMemoryIndexedSImm5:
4222 0 : return Error(Loc, "index must be an integer in range [-16, 15].");
4223 : case Match_InvalidMemoryIndexed1SImm4:
4224 : return Error(Loc, "index must be an integer in range [-8, 7].");
4225 0 : case Match_InvalidMemoryIndexed2SImm4:
4226 0 : return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4227 0 : case Match_InvalidMemoryIndexed3SImm4:
4228 0 : return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4229 0 : case Match_InvalidMemoryIndexed4SImm4:
4230 : return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4231 : case Match_InvalidMemoryIndexed16SImm4:
4232 0 : return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4233 0 : case Match_InvalidMemoryIndexed1SImm6:
4234 0 : return Error(Loc, "index must be an integer in range [-32, 31].");
4235 0 : case Match_InvalidMemoryIndexedSImm8:
4236 0 : return Error(Loc, "index must be an integer in range [-128, 127].");
4237 : case Match_InvalidMemoryIndexedSImm9:
4238 : return Error(Loc, "index must be an integer in range [-256, 255].");
4239 0 : case Match_InvalidMemoryIndexed16SImm9:
4240 0 : return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4241 0 : case Match_InvalidMemoryIndexed8SImm10:
4242 0 : return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4243 : case Match_InvalidMemoryIndexed4SImm7:
4244 : return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4245 0 : case Match_InvalidMemoryIndexed8SImm7:
4246 : return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4247 : case Match_InvalidMemoryIndexed16SImm7:
4248 0 : return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4249 0 : case Match_InvalidMemoryIndexed8UImm5:
4250 0 : return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4251 0 : case Match_InvalidMemoryIndexed4UImm5:
4252 0 : return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4253 : case Match_InvalidMemoryIndexed2UImm5:
4254 : return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4255 0 : case Match_InvalidMemoryIndexed8UImm6:
4256 0 : return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4257 0 : case Match_InvalidMemoryIndexed16UImm6:
4258 0 : return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4259 0 : case Match_InvalidMemoryIndexed4UImm6:
4260 : return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4261 : case Match_InvalidMemoryIndexed2UImm6:
4262 0 : return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4263 0 : case Match_InvalidMemoryIndexed1UImm6:
4264 0 : return Error(Loc, "index must be in range [0, 63].");
4265 0 : case Match_InvalidMemoryWExtend8:
4266 0 : return Error(Loc,
4267 : "expected 'uxtw' or 'sxtw' with optional shift of #0");
4268 : case Match_InvalidMemoryWExtend16:
4269 0 : return Error(Loc,
4270 0 : "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4271 0 : case Match_InvalidMemoryWExtend32:
4272 0 : return Error(Loc,
4273 0 : "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4274 : case Match_InvalidMemoryWExtend64:
4275 : return Error(Loc,
4276 0 : "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4277 0 : case Match_InvalidMemoryWExtend128:
4278 0 : return Error(Loc,
4279 0 : "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4280 : case Match_InvalidMemoryXExtend8:
4281 0 : return Error(Loc,
4282 : "expected 'lsl' or 'sxtx' with optional shift of #0");
4283 0 : case Match_InvalidMemoryXExtend16:
4284 0 : return Error(Loc,
4285 0 : "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4286 : case Match_InvalidMemoryXExtend32:
4287 0 : return Error(Loc,
4288 : "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4289 0 : case Match_InvalidMemoryXExtend64:
4290 0 : return Error(Loc,
4291 0 : "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4292 : case Match_InvalidMemoryXExtend128:
4293 : return Error(Loc,
4294 : "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4295 : case Match_InvalidMemoryIndexed1:
4296 : return Error(Loc, "index must be an integer in range [0, 4095].");
4297 103480 : case Match_InvalidMemoryIndexed2:
4298 103480 : return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4299 103480 : case Match_InvalidMemoryIndexed4:
4300 103480 : return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4301 103480 : case Match_InvalidMemoryIndexed8:
4302 103478 : return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4303 : case Match_InvalidMemoryIndexed16:
4304 : return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4305 : case Match_InvalidImm0_1:
4306 : return Error(Loc, "immediate must be an integer in range [0, 1].");
4307 : case Match_InvalidImm0_7:
4308 : return Error(Loc, "immediate must be an integer in range [0, 7].");
4309 : case Match_InvalidImm0_15:
4310 : return Error(Loc, "immediate must be an integer in range [0, 15].");
4311 : case Match_InvalidImm0_31:
4312 129 : return Error(Loc, "immediate must be an integer in range [0, 31].");
4313 93268 : case Match_InvalidImm0_63:
4314 93268 : return Error(Loc, "immediate must be an integer in range [0, 63].");
4315 38910 : case Match_InvalidImm0_127:
4316 93268 : return Error(Loc, "immediate must be an integer in range [0, 127].");
4317 93268 : case Match_InvalidImm0_255:
4318 93268 : return Error(Loc, "immediate must be an integer in range [0, 255].");
4319 93268 : case Match_InvalidImm0_65535:
4320 93268 : return Error(Loc, "immediate must be an integer in range [0, 65535].");
4321 38910 : case Match_InvalidImm1_8:
4322 : return Error(Loc, "immediate must be an integer in range [1, 8].");
4323 : case Match_InvalidImm1_16:
4324 : return Error(Loc, "immediate must be an integer in range [1, 16].");
4325 : case Match_InvalidImm1_32:
4326 : return Error(Loc, "immediate must be an integer in range [1, 32].");
4327 : case Match_InvalidImm1_64:
4328 : return Error(Loc, "immediate must be an integer in range [1, 64].");
4329 : case Match_InvalidSVEAddSubImm8:
4330 : return Error(Loc, "immediate must be an integer in range [0, 255]"
4331 : " with a shift amount of 0");
4332 : case Match_InvalidSVEAddSubImm16:
4333 : case Match_InvalidSVEAddSubImm32:
4334 : case Match_InvalidSVEAddSubImm64:
4335 : return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4336 54358 : "multiple of 256 in range [256, 65280]");
4337 : case Match_InvalidSVECpyImm8:
4338 : return Error(Loc, "immediate must be an integer in range [-128, 255]"
4339 : " with a shift amount of 0");
4340 : case Match_InvalidSVECpyImm16:
4341 : return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4342 : "multiple of 256 in range [-32768, 65280]");
4343 : case Match_InvalidSVECpyImm32:
4344 4460 : case Match_InvalidSVECpyImm64:
4345 4460 : return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4346 4460 : "multiple of 256 in range [-32768, 32512]");
4347 4460 : case Match_InvalidIndexRange1_1:
4348 4460 : return Error(Loc, "expected lane specifier '[1]'");
4349 4460 : case Match_InvalidIndexRange0_15:
4350 4460 : return Error(Loc, "vector lane must be an integer in range [0, 15].");
4351 4460 : case Match_InvalidIndexRange0_7:
4352 : return Error(Loc, "vector lane must be an integer in range [0, 7].");
4353 : case Match_InvalidIndexRange0_3:
4354 : return Error(Loc, "vector lane must be an integer in range [0, 3].");
4355 : case Match_InvalidIndexRange0_1:
4356 : return Error(Loc, "vector lane must be an integer in range [0, 1].");
4357 1508 : case Match_InvalidSVEIndexRange0_63:
4358 1508 : return Error(Loc, "vector lane must be an integer in range [0, 63].");
4359 1508 : case Match_InvalidSVEIndexRange0_31:
4360 1508 : return Error(Loc, "vector lane must be an integer in range [0, 31].");
4361 : case Match_InvalidSVEIndexRange0_15:
4362 : return Error(Loc, "vector lane must be an integer in range [0, 15].");
4363 : case Match_InvalidSVEIndexRange0_7:
4364 : return Error(Loc, "vector lane must be an integer in range [0, 7].");
4365 : case Match_InvalidSVEIndexRange0_3:
4366 5420 : return Error(Loc, "vector lane must be an integer in range [0, 3].");
4367 15463 : case Match_InvalidLabel:
4368 15463 : return Error(Loc, "expected label or encodable integer pc offset");
4369 15457 : case Match_MRS:
4370 : return Error(Loc, "expected readable system register");
4371 : case Match_MSR:
4372 : return Error(Loc, "expected writable system register or pstate");
4373 : case Match_InvalidComplexRotationEven:
4374 : return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4375 : case Match_InvalidComplexRotationOdd:
4376 : return Error(Loc, "complex rotation must be 90 or 270.");
4377 453 : case Match_MnemonicFail: {
4378 453 : std::string Suggestion = AArch64MnemonicSpellCheck(
4379 453 : ((AArch64Operand &)*Operands[0]).getToken(),
4380 453 : ComputeAvailableFeatures(STI->getFeatureBits()));
4381 453 : return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4382 : }
4383 : case Match_InvalidGPR64shifted8:
4384 : return Error(Loc, "register must be x0..x30 or xzr, without shift");
4385 : case Match_InvalidGPR64shifted16:
4386 : return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4387 331 : case Match_InvalidGPR64shifted32:
4388 331 : return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4389 331 : case Match_InvalidGPR64shifted64:
4390 331 : return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4391 : case Match_InvalidGPR64NoXZRshifted8:
4392 : return Error(Loc, "register must be x0..x30 without shift");
4393 : case Match_InvalidGPR64NoXZRshifted16:
4394 : return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4395 3575 : case Match_InvalidGPR64NoXZRshifted32:
4396 3575 : return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4397 7150 : case Match_InvalidGPR64NoXZRshifted64:
4398 3575 : return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4399 3575 : case Match_InvalidZPR32UXTW8:
4400 3575 : case Match_InvalidZPR32SXTW8:
4401 3575 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4402 : case Match_InvalidZPR32UXTW16:
4403 : case Match_InvalidZPR32SXTW16:
4404 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4405 : case Match_InvalidZPR32UXTW32:
4406 : case Match_InvalidZPR32SXTW32:
4407 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4408 33 : case Match_InvalidZPR32UXTW64:
4409 48 : case Match_InvalidZPR32SXTW64:
4410 48 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4411 48 : case Match_InvalidZPR64UXTW8:
4412 48 : case Match_InvalidZPR64SXTW8:
4413 48 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4414 : case Match_InvalidZPR64UXTW16:
4415 : case Match_InvalidZPR64SXTW16:
4416 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4417 : case Match_InvalidZPR64UXTW32:
4418 : case Match_InvalidZPR64SXTW32:
4419 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4420 : case Match_InvalidZPR64UXTW64:
4421 : case Match_InvalidZPR64SXTW64:
4422 2491 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4423 2491 : case Match_InvalidZPR32LSL8:
4424 2491 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4425 2491 : case Match_InvalidZPR32LSL16:
4426 2491 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4427 2491 : case Match_InvalidZPR32LSL32:
4428 2491 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4429 2491 : case Match_InvalidZPR32LSL64:
4430 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4431 : case Match_InvalidZPR64LSL8:
4432 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4433 : case Match_InvalidZPR64LSL16:
4434 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4435 140 : case Match_InvalidZPR64LSL32:
4436 688 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4437 688 : case Match_InvalidZPR64LSL64:
4438 688 : return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4439 : case Match_InvalidZPR0:
4440 : return Error(Loc, "expected register without element width sufix");
4441 : case Match_InvalidZPR8:
4442 : case Match_InvalidZPR16:
4443 : case Match_InvalidZPR32:
4444 : case Match_InvalidZPR64:
4445 : case Match_InvalidZPR128:
4446 760 : return Error(Loc, "invalid element width");
4447 760 : case Match_InvalidZPR_3b8:
4448 760 : return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4449 760 : case Match_InvalidZPR_3b16:
4450 760 : return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4451 760 : case Match_InvalidZPR_3b32:
4452 : return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4453 : case Match_InvalidZPR_4b16:
4454 : return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4455 : case Match_InvalidZPR_4b32:
4456 : return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4457 : case Match_InvalidZPR_4b64:
4458 : return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4459 2 : case Match_InvalidSVEPattern:
4460 2 : return Error(Loc, "invalid predicate pattern");
4461 2 : case Match_InvalidSVEPredicateAnyReg:
4462 2 : case Match_InvalidSVEPredicateBReg:
4463 2 : case Match_InvalidSVEPredicateHReg:
4464 2 : case Match_InvalidSVEPredicateSReg:
4465 : case Match_InvalidSVEPredicateDReg:
4466 : return Error(Loc, "invalid predicate register.");
4467 : case Match_InvalidSVEPredicate3bAnyReg:
4468 9 : case Match_InvalidSVEPredicate3bBReg:
4469 : case Match_InvalidSVEPredicate3bHReg:
4470 : case Match_InvalidSVEPredicate3bSReg:
4471 : case Match_InvalidSVEPredicate3bDReg:
4472 9 : return Error(Loc, "restricted predicate has range [0, 7].");
4473 18 : case Match_InvalidSVEExactFPImmOperandHalfOne:
4474 9 : return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4475 9 : case Match_InvalidSVEExactFPImmOperandHalfTwo:
4476 9 : return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4477 9 : case Match_InvalidSVEExactFPImmOperandZeroOne:
4478 9 : return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4479 : default:
4480 : llvm_unreachable("unexpected error code!");
4481 : }
4482 : }
4483 :
4484 499 : static const char *getSubtargetFeatureName(uint64_t Val);
4485 1972 :
4486 1972 : bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4487 1972 : OperandVector &Operands,
4488 1972 : MCStreamer &Out,
4489 1972 : uint64_t &ErrorInfo,
4490 : bool MatchingInlineAsm) {
4491 : assert(!Operands.empty() && "Unexpect empty operand list!");
4492 : AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4493 : assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4494 :
4495 : StringRef Tok = Op.getToken();
4496 0 : unsigned NumOperands = Operands.size();
4497 0 :
4498 0 : if (NumOperands == 4 && Tok == "lsl") {
4499 0 : AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4500 0 : AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4501 0 : if (Op2.isScalarReg() && Op3.isImm()) {
4502 0 : const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4503 0 : if (Op3CE) {
4504 0 : uint64_t Op3Val = Op3CE->getValue();
4505 0 : uint64_t NewOp3Val = 0;
4506 0 : uint64_t NewOp4Val = 0;
4507 0 : if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4508 : Op2.getReg())) {
4509 0 : NewOp3Val = (32 - Op3Val) & 0x1f;
4510 : NewOp4Val = 31 - Op3Val;
4511 : } else {
4512 0 : NewOp3Val = (64 - Op3Val) & 0x3f;
4513 0 : NewOp4Val = 63 - Op3Val;
4514 : }
4515 0 :
4516 0 : const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4517 0 : const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4518 0 :
4519 0 : Operands[0] = AArch64Operand::CreateToken(
4520 0 : "ubfm", false, Op.getStartLoc(), getContext());
4521 : Operands.push_back(AArch64Operand::CreateImm(
4522 0 : NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4523 0 : Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4524 0 : Op3.getEndLoc(), getContext());
4525 0 : }
4526 0 : }
4527 0 : } else if (NumOperands == 4 && Tok == "bfc") {
4528 0 : // FIXME: Horrible hack to handle BFC->BFM alias.
4529 0 : AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4530 0 : AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4531 0 : AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4532 :
4533 0 : if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4534 0 : const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4535 0 : const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4536 0 :
4537 0 : if (LSBCE && WidthCE) {
4538 : uint64_t LSB = LSBCE->getValue();
4539 0 : uint64_t Width = WidthCE->getValue();
4540 0 :
4541 0 : uint64_t RegWidth = 0;
4542 0 : if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4543 0 : Op1.getReg()))
4544 : RegWidth = 64;
4545 0 : else
4546 0 : RegWidth = 32;
4547 0 :
4548 0 : if (LSB >= RegWidth)
4549 : return Error(LSBOp.getStartLoc(),
4550 0 : "expected integer in range [0, 31]");
4551 : if (Width < 1 || Width > RegWidth)
4552 : return Error(WidthOp.getStartLoc(),
4553 0 : "expected integer in range [1, 32]");
4554 0 :
4555 0 : uint64_t ImmR = 0;
4556 0 : if (RegWidth == 32)
4557 0 : ImmR = (32 - LSB) & 0x1f;
4558 0 : else
4559 : ImmR = (64 - LSB) & 0x3f;
4560 :
4561 : uint64_t ImmS = Width - 1;
4562 0 :
4563 0 : if (ImmR != 0 && ImmS >= ImmR)
4564 0 : return Error(WidthOp.getStartLoc(),
4565 0 : "requested insert overflows register");
4566 :
4567 0 : const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4568 0 : const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4569 : Operands[0] = AArch64Operand::CreateToken(
4570 : "bfm", false, Op.getStartLoc(), getContext());
4571 : Operands[2] = AArch64Operand::CreateReg(
4572 0 : RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4573 : SMLoc(), SMLoc(), getContext());
4574 : Operands[3] = AArch64Operand::CreateImm(
4575 : ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4576 : Operands.emplace_back(
4577 : AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4578 : WidthOp.getEndLoc(), getContext()));
4579 : }
4580 : }
4581 271449 : } else if (NumOperands == 5) {
4582 542898 : // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4583 : // UBFIZ -> UBFM aliases.
4584 : if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4585 : AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4586 : AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4587 : AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4588 :
4589 : if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4590 : const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4591 : const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4592 :
4593 : if (Op3CE && Op4CE) {
4594 : uint64_t Op3Val = Op3CE->getValue();
4595 : uint64_t Op4Val = Op4CE->getValue();
4596 :
4597 : uint64_t RegWidth = 0;
4598 : if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4599 : Op1.getReg()))
4600 : RegWidth = 64;
4601 : else
4602 : RegWidth = 32;
4603 :
4604 : if (Op3Val >= RegWidth)
4605 : return Error(Op3.getStartLoc(),
4606 : "expected integer in range [0, 31]");
4607 : if (Op4Val < 1 || Op4Val > RegWidth)
4608 : return Error(Op4.getStartLoc(),
4609 : "expected integer in range [1, 32]");
4610 :
4611 : uint64_t NewOp3Val = 0;
4612 : if (RegWidth == 32)
4613 : NewOp3Val = (32 - Op3Val) & 0x1f;
4614 : else
4615 271449 : NewOp3Val = (64 - Op3Val) & 0x3f;
4616 :
4617 : uint64_t NewOp4Val = Op4Val - 1;
4618 :
4619 : if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4620 : return Error(Op4.getStartLoc(),
4621 : "requested insert overflows register");
4622 112367 :
4623 : const MCExpr *NewOp3 =
4624 : MCConstantExpr::create(NewOp3Val, getContext());
4625 : const MCExpr *NewOp4 =
4626 112367 : MCConstantExpr::create(NewOp4Val, getContext());
4627 30866 : Operands[3] = AArch64Operand::CreateImm(
4628 : NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4629 61732 : Operands[4] = AArch64Operand::CreateImm(
4630 : NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4631 : if (Tok == "bfi")
4632 : Operands[0] = AArch64Operand::CreateToken(
4633 : "bfm", false, Op.getStartLoc(), getContext());
4634 : else if (Tok == "sbfiz")
4635 : Operands[0] = AArch64Operand::CreateToken(
4636 : "sbfm", false, Op.getStartLoc(), getContext());
4637 : else if (Tok == "ubfiz")
4638 : Operands[0] = AArch64Operand::CreateToken(
4639 : "ubfm", false, Op.getStartLoc(), getContext());
4640 : else
4641 : llvm_unreachable("No valid mnemonic for alias?");
4642 : }
4643 : }
4644 :
4645 : // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4646 : // UBFX -> UBFM aliases.
4647 : } else if (NumOperands == 5 &&
4648 : (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4649 : AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4650 : AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4651 : AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4652 :
4653 30866 : if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4654 81501 : const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4655 : const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4656 163002 :
4657 : if (Op3CE && Op4CE) {
4658 : uint64_t Op3Val = Op3CE->getValue();
4659 : uint64_t Op4Val = Op4CE->getValue();
4660 :
4661 : uint64_t RegWidth = 0;
4662 : if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4663 : Op1.getReg()))
4664 81501 : RegWidth = 64;
4665 0 : else
4666 0 : RegWidth = 32;
4667 :
4668 : if (Op3Val >= RegWidth)
4669 : return Error(Op3.getStartLoc(),
4670 : "expected integer in range [0, 31]");
4671 : if (Op4Val < 1 || Op4Val > RegWidth)
4672 : return Error(Op4.getStartLoc(),
4673 : "expected integer in range [1, 32]");
4674 :
4675 : uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4676 90448 :
4677 : if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4678 : return Error(Op4.getStartLoc(),
4679 338545 : "requested extract overflows register");
4680 677090 :
4681 : const MCExpr *NewOp4 =
4682 : MCConstantExpr::create(NewOp4Val, getContext());
4683 : Operands[4] = AArch64Operand::CreateImm(
4684 : NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4685 : if (Tok == "bfxil")
4686 : Operands[0] = AArch64Operand::CreateToken(
4687 : "bfm", false, Op.getStartLoc(), getContext());
4688 : else if (Tok == "sbfx")
4689 : Operands[0] = AArch64Operand::CreateToken(
4690 : "sbfm", false, Op.getStartLoc(), getContext());
4691 : else if (Tok == "ubfx")
4692 : Operands[0] = AArch64Operand::CreateToken(
4693 : "ubfm", false, Op.getStartLoc(), getContext());
4694 : else
4695 : llvm_unreachable("No valid mnemonic for alias?");
4696 : }
4697 : }
4698 : }
4699 : }
4700 :
4701 : // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4702 : // instruction for FP registers correctly in some rare circumstances. Convert
4703 : // it to a safe instruction and warn (because silently changing someone's
4704 : // assembly is rude).
4705 : if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4706 : NumOperands == 4 && Tok == "movi") {
4707 : AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4708 : AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4709 : AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4710 : if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4711 : (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4712 : StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4713 338545 : if (Suffix.lower() == ".2d" &&
4714 : cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4715 : Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4716 301637 : " correctly on this CPU, converting to equivalent movi.16b");
4717 603274 : // Switch the suffix to .16b.
4718 : unsigned Idx = Op1.isToken() ? 1 : 2;
4719 : Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4720 : getContext());
4721 : }
4722 : }
4723 : }
4724 :
4725 : // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4726 : // InstAlias can't quite handle this since the reg classes aren't
4727 : // subclasses.
4728 : if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4729 : // The source register can be Wn here, but the matcher expects a
4730 : // GPR64. Twiddle it here if necessary.
4731 : AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4732 : if (Op.isScalarReg()) {
4733 : unsigned Reg = getXRegFromWReg(Op.getReg());
4734 301637 : Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4735 : Op.getStartLoc(), Op.getEndLoc(),
4736 : getContext());
4737 24 : }
4738 : }
4739 24 : // FIXME: Likewise for sxt[bh] with a Xd dst operand
4740 24 : else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4741 24 : AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4742 24 : if (Op.isScalarReg() &&
4743 : AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4744 : Op.getReg())) {
4745 : // The source register can be Wn here, but the matcher expects a
4746 338545 : // GPR64. Twiddle it here if necessary.
4747 : AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4748 : if (Op.isScalarReg()) {
4749 338545 : unsigned Reg = getXRegFromWReg(Op.getReg());
4750 43235 : Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4751 : Op.getStartLoc(),
4752 301637 : Op.getEndLoc(), getContext());
4753 44163 : }
4754 : }
4755 271449 : }
4756 85306 : // FIXME: Likewise for uxt[bh] with a Xd dst operand
4757 : else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4758 : AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4759 220518 : if (Op.isScalarReg() &&
4760 378183 : AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4761 : Op.getReg())) {
4762 : // The source register can be Wn here, but the matcher expects a
4763 : // GPR32. Twiddle it here if necessary.
4764 23828 : AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4765 : if (Op.isScalarReg()) {
4766 : unsigned Reg = getWRegFromXReg(Op.getReg());
4767 : Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4768 : Op.getStartLoc(),
4769 : Op.getEndLoc(), getContext());
4770 380 : }
4771 : }
4772 : }
4773 :
4774 : MCInst Inst;
4775 11700 : // First try to match against the secondary set of tables containing the
4776 23400 : // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4777 : unsigned MatchResult =
4778 : MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4779 :
4780 805 : // If that fails, try against the alternate table containing long-form NEON:
4781 20 : // "fadd v0.2s, v1.2s, v2.2s"
4782 : if (MatchResult != Match_Success) {
4783 : // But first, save the short-form match result: we can use it in case the
4784 : // long-form match also fails.
4785 : auto ShortFormNEONErrorInfo = ErrorInfo;
4786 : auto ShortFormNEONMatchResult = MatchResult;
4787 :
4788 : MatchResult =
4789 : MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4790 54665 :
4791 54665 : // Now, both matches failed, and the long-form match failed on the mnemonic
4792 54665 : // suffix token operand. The short-form match failure is probably more
4793 54665 : // relevant: use it instead.
4794 : if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4795 : Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4796 46023 : ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4797 46023 : MatchResult = ShortFormNEONMatchResult;
4798 46023 : ErrorInfo = ShortFormNEONErrorInfo;
4799 : }
4800 : }
4801 39091 :
4802 39091 : switch (MatchResult) {
4803 39091 : case Match_Success: {
4804 : // Perform range checking and other semantic validations
4805 : SmallVector<SMLoc, 8> OperandLocs;
4806 : NumOperands = Operands.size();
4807 : for (unsigned i = 1; i < NumOperands; ++i)
4808 146 : OperandLocs.push_back(Operands[i]->getStartLoc());
4809 146 : if (validateInstruction(Inst, IDLoc, OperandLocs))
4810 : return true;
4811 :
4812 146 : Inst.setLoc(IDLoc);
4813 0 : Out.EmitInstruction(Inst, getSTI());
4814 0 : return false;
4815 : }
4816 : case Match_MissingFeature: {
4817 292 : assert(ErrorInfo && "Unknown missing feature!");
4818 146 : // Special case the error message for the very common case where only
4819 0 : // a single subtarget feature is missing (neon, e.g.).
4820 0 : std::string Msg = "instruction requires:";
4821 : uint64_t Mask = 1;
4822 : for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4823 : if (ErrorInfo & Mask) {
4824 0 : Msg += " ";
4825 146 : Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4826 6 : }
4827 6 : Mask <<= 1;
4828 : }
4829 : return Error(IDLoc, Msg);
4830 140 : }
4831 420 : case Match_MnemonicFail:
4832 140 : return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4833 140 : case Match_InvalidOperand: {
4834 : SMLoc ErrorLoc = IDLoc;
4835 :
4836 : if (ErrorInfo != ~0ULL) {
4837 : if (ErrorInfo >= Operands.size())
4838 : return Error(IDLoc, "too few operands for instruction",
4839 779 : SMRange(IDLoc, getTok().getLoc()));
4840 779 :
4841 : ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4842 779 : if (ErrorLoc == SMLoc())
4843 : ErrorLoc = IDLoc;
4844 : }
4845 : // If the match failed on a suffix token operand, tweak the diagnostic
4846 316 : // accordingly.
4847 308 : if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4848 41 : ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4849 40 : MatchResult = Match_InvalidSuffix;
4850 :
4851 : return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4852 : }
4853 : case Match_InvalidTiedOperand:
4854 : case Match_InvalidMemoryIndexed1:
4855 : case Match_InvalidMemoryIndexed2:
4856 : case Match_InvalidMemoryIndexed4:
4857 : case Match_InvalidMemoryIndexed8:
4858 : case Match_InvalidMemoryIndexed16:
4859 : case Match_InvalidCondCode:
4860 : case Match_AddSubRegExtendSmall:
4861 : case Match_AddSubRegExtendLarge:
4862 : case Match_AddSubSecondSource:
4863 : case Match_LogicalSecondSource:
4864 : case Match_AddSubRegShift32:
4865 779 : case Match_AddSubRegShift64:
4866 357 : case Match_InvalidMovImm32Shift:
4867 : case Match_InvalidMovImm64Shift:
4868 422 : case Match_InvalidFPImm:
4869 : case Match_InvalidMemoryWExtend8:
4870 : case Match_InvalidMemoryWExtend16:
4871 422 : case Match_InvalidMemoryWExtend32:
4872 : case Match_InvalidMemoryWExtend64:
4873 4 : case Match_InvalidMemoryWExtend128:
4874 4 : case Match_InvalidMemoryXExtend8:
4875 : case Match_InvalidMemoryXExtend16:
4876 418 : case Match_InvalidMemoryXExtend32:
4877 418 : case Match_InvalidMemoryXExtend64:
4878 18 : case Match_InvalidMemoryXExtend128:
4879 : case Match_InvalidMemoryIndexed1SImm4:
4880 6 : case Match_InvalidMemoryIndexed2SImm4:
4881 : case Match_InvalidMemoryIndexed3SImm4:
4882 : case Match_InvalidMemoryIndexed4SImm4:
4883 412 : case Match_InvalidMemoryIndexed1SImm6:
4884 1236 : case Match_InvalidMemoryIndexed16SImm4:
4885 : case Match_InvalidMemoryIndexed4SImm7:
4886 : case Match_InvalidMemoryIndexed8SImm7:
4887 : case Match_InvalidMemoryIndexed16SImm7:
4888 : case Match_InvalidMemoryIndexed8UImm5:
4889 357 : case Match_InvalidMemoryIndexed4UImm5:
4890 0 : case Match_InvalidMemoryIndexed2UImm5:
4891 0 : case Match_InvalidMemoryIndexed1UImm6:
4892 : case Match_InvalidMemoryIndexed2UImm6:
4893 : case Match_InvalidMemoryIndexed4UImm6:
4894 357 : case Match_InvalidMemoryIndexed8UImm6:
4895 357 : case Match_InvalidMemoryIndexed16UImm6:
4896 9 : case Match_InvalidMemoryIndexedSImm6:
4897 9 : case Match_InvalidMemoryIndexedSImm5:
4898 : case Match_InvalidMemoryIndexedSImm8:
4899 : case Match_InvalidMemoryIndexedSImm9:
4900 348 : case Match_InvalidMemoryIndexed16SImm9:
4901 1044 : case Match_InvalidMemoryIndexed8SImm10:
4902 : case Match_InvalidImm0_1:
4903 348 : case Match_InvalidImm0_7:
4904 : case Match_InvalidImm0_15:
4905 729 : case Match_InvalidImm0_31:
4906 729 : case Match_InvalidImm0_63:
4907 : case Match_InvalidImm0_127:
4908 729 : case Match_InvalidImm0_255:
4909 : case Match_InvalidImm0_65535:
4910 : case Match_InvalidImm1_8:
4911 : case Match_InvalidImm1_16:
4912 316 : case Match_InvalidImm1_32:
4913 308 : case Match_InvalidImm1_64:
4914 : case Match_InvalidSVEAddSubImm8:
4915 : case Match_InvalidSVEAddSubImm16:
4916 : case Match_InvalidSVEAddSubImm32:
4917 : case Match_InvalidSVEAddSubImm64:
4918 : case Match_InvalidSVECpyImm8:
4919 : case Match_InvalidSVECpyImm16:
4920 : case Match_InvalidSVECpyImm32:
4921 : case Match_InvalidSVECpyImm64:
4922 : case Match_InvalidIndexRange1_1:
4923 : case Match_InvalidIndexRange0_15:
4924 : case Match_InvalidIndexRange0_7:
4925 : case Match_InvalidIndexRange0_3:
4926 : case Match_InvalidIndexRange0_1:
4927 : case Match_InvalidSVEIndexRange0_63:
4928 : case Match_InvalidSVEIndexRange0_31:
4929 : case Match_InvalidSVEIndexRange0_15:
4930 : case Match_InvalidSVEIndexRange0_7:
4931 729 : case Match_InvalidSVEIndexRange0_3:
4932 316 : case Match_InvalidLabel:
4933 : case Match_InvalidComplexRotationEven:
4934 413 : case Match_InvalidComplexRotationOdd:
4935 : case Match_InvalidGPR64shifted8:
4936 : case Match_InvalidGPR64shifted16:
4937 413 : case Match_InvalidGPR64shifted32:
4938 : case Match_InvalidGPR64shifted64:
4939 4 : case Match_InvalidGPR64NoXZRshifted8:
4940 4 : case Match_InvalidGPR64NoXZRshifted16:
4941 : case Match_InvalidGPR64NoXZRshifted32:
4942 409 : case Match_InvalidGPR64NoXZRshifted64:
4943 409 : case Match_InvalidZPR32UXTW8:
4944 12 : case Match_InvalidZPR32UXTW16:
4945 : case Match_InvalidZPR32UXTW32:
4946 4 : case Match_InvalidZPR32UXTW64:
4947 : case Match_InvalidZPR32SXTW8:
4948 : case Match_InvalidZPR32SXTW16:
4949 405 : case Match_InvalidZPR32SXTW32:
4950 1215 : case Match_InvalidZPR32SXTW64:
4951 : case Match_InvalidZPR64UXTW8:
4952 : case Match_InvalidZPR64SXTW8:
4953 : case Match_InvalidZPR64UXTW16:
4954 : case Match_InvalidZPR64SXTW16:
4955 316 : case Match_InvalidZPR64UXTW32:
4956 0 : case Match_InvalidZPR64SXTW32:
4957 0 : case Match_InvalidZPR64UXTW64:
4958 : case Match_InvalidZPR64SXTW64:
4959 : case Match_InvalidZPR32LSL8:
4960 316 : case Match_InvalidZPR32LSL16:
4961 316 : case Match_InvalidZPR32LSL32:
4962 8 : case Match_InvalidZPR32LSL64:
4963 8 : case Match_InvalidZPR64LSL8:
4964 : case Match_InvalidZPR64LSL16:
4965 : case Match_InvalidZPR64LSL32:
4966 308 : case Match_InvalidZPR64LSL64:
4967 924 : case Match_InvalidZPR0:
4968 : case Match_InvalidZPR8:
4969 308 : case Match_InvalidZPR16:
4970 : case Match_InvalidZPR32:
4971 50 : case Match_InvalidZPR64:
4972 50 : case Match_InvalidZPR128:
4973 : case Match_InvalidZPR_3b8:
4974 50 : case Match_InvalidZPR_3b16:
4975 : case Match_InvalidZPR_3b32:
4976 : case Match_InvalidZPR_4b16:
4977 : case Match_InvalidZPR_4b32:
4978 : case Match_InvalidZPR_4b64:
4979 : case Match_InvalidSVEPredicateAnyReg:
4980 41 : case Match_InvalidSVEPattern:
4981 40 : case Match_InvalidSVEPredicateBReg:
4982 : case Match_InvalidSVEPredicateHReg:
4983 : case Match_InvalidSVEPredicateSReg:
4984 : case Match_InvalidSVEPredicateDReg:
4985 : case Match_InvalidSVEPredicate3bAnyReg:
4986 : case Match_InvalidSVEPredicate3bBReg:
4987 : case Match_InvalidSVEPredicate3bHReg:
4988 : case Match_InvalidSVEPredicate3bSReg:
4989 : case Match_InvalidSVEPredicate3bDReg:
4990 : case Match_InvalidSVEExactFPImmOperandHalfOne:
4991 : case Match_InvalidSVEExactFPImmOperandHalfTwo:
4992 : case Match_InvalidSVEExactFPImmOperandZeroOne:
4993 : case Match_MSR:
4994 : case Match_MRS: {
4995 : if (ErrorInfo >= Operands.size())
4996 : return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4997 50 : // Any time we get here, there's nothing fancy to do. Just get the
4998 41 : // operand SMLoc and display the diagnostic.
4999 : SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5000 9 : if (ErrorLoc == SMLoc())
5001 : ErrorLoc = IDLoc;
5002 : return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5003 9 : }
5004 : }
5005 0 :
5006 0 : llvm_unreachable("Implement any new match types added!");
5007 : }
5008 9 :
5009 9 : /// ParseDirective parses the arm specific directives
5010 6 : bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5011 : const MCObjectFileInfo::Environment Format =
5012 2 : getContext().getObjectFileInfo()->getObjectFileType();
5013 : bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5014 :
5015 7 : StringRef IDVal = DirectiveID.getIdentifier();
5016 21 : SMLoc Loc = DirectiveID.getLoc();
5017 : if (IDVal == ".arch")
5018 : parseDirectiveArch(Loc);
5019 : else if (IDVal == ".cpu")
5020 : parseDirectiveCPU(Loc);
5021 41 : else if (IDVal == ".tlsdesccall")
5022 0 : parseDirectiveTLSDescCall(Loc);
5023 0 : else if (IDVal == ".ltorg" || IDVal == ".pool")
5024 : parseDirectiveLtorg(Loc);
5025 : else if (IDVal == ".unreq")
5026 41 : parseDirectiveUnreq(Loc);
5027 41 : else if (IDVal == ".inst")
5028 1 : parseDirectiveInst(Loc);
5029 1 : else if (IsMachO) {
5030 : if (IDVal == MCLOHDirectiveName())
5031 : parseDirectiveLOH(IDVal, Loc);
5032 40 : else
5033 120 : return true;
5034 : } else
5035 40 : return true;
5036 : return false;
5037 : }
5038 :
5039 : static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5040 2 : SmallVector<StringRef, 4> &RequestedExtensions) {
5041 2 : const bool NoCrypto =
5042 : (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5043 2 : "nocrypto") != std::end(RequestedExtensions));
5044 2 : const bool Crypto =
5045 0 : (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5046 0 : "crypto") != std::end(RequestedExtensions));
5047 :
5048 : if (!NoCrypto && Crypto) {
5049 2 : switch (ArchKind) {
5050 2 : default:
5051 0 : // Map 'generic' (and others) to sha2 and aes, because
5052 0 : // that was the traditional meaning of crypto.
5053 : case AArch64::ArchKind::ARMV8_1A:
5054 : case AArch64::ArchKind::ARMV8_2A:
5055 2 : case AArch64::ArchKind::ARMV8_3A:
5056 6 : RequestedExtensions.push_back("sha2");
5057 2 : RequestedExtensions.push_back("aes");
5058 2 : break;
5059 : case AArch64::ArchKind::ARMV8_4A:
5060 : case AArch64::ArchKind::ARMV8_5A:
5061 : RequestedExtensions.push_back("sm4");
5062 : RequestedExtensions.push_back("sha3");
5063 12 : RequestedExtensions.push_back("sha2");
5064 12 : RequestedExtensions.push_back("aes");
5065 12 : break;
5066 12 : }
5067 12 : } else if (NoCrypto) {
5068 0 : switch (ArchKind) {
5069 0 : default:
5070 : // Map 'generic' (and others) to sha2 and aes, because
5071 : // that was the traditional meaning of crypto.
5072 12 : case AArch64::ArchKind::ARMV8_1A:
5073 12 : case AArch64::ArchKind::ARMV8_2A:
5074 3 : case AArch64::ArchKind::ARMV8_3A:
5075 3 : RequestedExtensions.push_back("nosha2");
5076 : RequestedExtensions.push_back("noaes");
5077 : break;
5078 9 : case AArch64::ArchKind::ARMV8_4A:
5079 27 : case AArch64::ArchKind::ARMV8_5A:
5080 9 : RequestedExtensions.push_back("nosm4");
5081 9 : RequestedExtensions.push_back("nosha3");
5082 : RequestedExtensions.push_back("nosha2");
5083 : RequestedExtensions.push_back("noaes");
5084 : break;
5085 : }
5086 : }
5087 201 : }
5088 201 :
5089 : /// parseDirectiveArch
5090 : /// ::= .arch token
5091 : bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5092 201 : SMLoc ArchLoc = getLoc();
5093 12 :
5094 : StringRef Arch, ExtensionString;
5095 : std::tie(Arch, ExtensionString) =
5096 201 : getParser().parseStringToEndOfStatement().trim().split('+');
5097 :
5098 : AArch64::ArchKind ID = AArch64::parseArch(Arch);
5099 : if (ID == AArch64::ArchKind::INVALID)
5100 : return Error(ArchLoc, "unknown arch name");
5101 :
5102 201 : if (parseToken(AsmToken::EndOfStatement))
5103 185 : return true;
5104 174 :
5105 : // Get the architecture and extension features.
5106 : std::vector<StringRef> AArch64Features;
5107 112 : AArch64::getArchFeatures(ID, AArch64Features);
5108 112 : AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5109 73 : AArch64Features);
5110 5 :
5111 5 : MCSubtargetInfo &STI = copySTI();
5112 0 : std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5113 0 : STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5114 73 :
5115 64 : SmallVector<StringRef, 4> RequestedExtensions;
5116 62 : if (!ExtensionString.empty())
5117 62 : ExtensionString.split(RequestedExtensions, '+');
5118 21 :
5119 : ExpandCryptoAEK(ID, RequestedExtensions);
5120 :
5121 1 : FeatureBitset Features = STI.getFeatureBits();
5122 1 : for (auto Name : RequestedExtensions) {
5123 : bool EnableFeature = true;
5124 :
5125 : if (Name.startswith_lower("no")) {
5126 : EnableFeature = false;
5127 : Name = Name.substr(2);
5128 : }
5129 200 :
5130 400 : for (const auto &Extension : ExtensionMap) {
5131 : if (Extension.Name != Name)
5132 200 : continue;
5133 :
5134 : if (Extension.Features.none())
5135 : report_fatal_error("unsupported architectural extension: " + Name);
5136 :
5137 : FeatureBitset ToggleFeatures = EnableFeature
5138 230 : ? (~Features & Extension.Features)
5139 : : ( Features & Extension.Features);
5140 : uint64_t Features =
5141 : ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5142 : setAvailableFeatures(Features);
5143 230 : break;
5144 : }
5145 : }
5146 44 : return false;
5147 12 : }
5148 :
5149 44 : static SMLoc incrementLoc(SMLoc L, int Offset) {
5150 : return SMLoc::getFromPointer(L.getPointer() + Offset);
5151 : }
5152 :
5153 : /// parseDirectiveCPU
5154 : /// ::= .cpu id
5155 44 : bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5156 26 : SMLoc CurLoc = getLoc();
5157 26 :
5158 : StringRef CPU, ExtensionString;
5159 : std::tie(CPU, ExtensionString) =
5160 24 : getParser().parseStringToEndOfStatement().trim().split('+');
5161 :
5162 2 : if (parseToken(AsmToken::EndOfStatement))
5163 2 : return true;
5164 :
5165 : SmallVector<StringRef, 4> RequestedExtensions;
5166 : if (!ExtensionString.empty())
5167 42 : ExtensionString.split(RequestedExtensions, '+');
5168 84 :
5169 42 : // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5170 : // once that is tablegen'ed
5171 : if (!getSTI().isCPUStringValid(CPU)) {
5172 : Error(CurLoc, "unknown CPU name");
5173 : return false;
5174 : }
5175 16786 :
5176 16786 : MCSubtargetInfo &STI = copySTI();
5177 353 : STI.setDefaultFeatures(CPU, "");
5178 : CurLoc = incrementLoc(CurLoc, CPU.size());
5179 :
5180 : ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5181 :
5182 : FeatureBitset Features = STI.getFeatureBits();
5183 : for (auto Name : RequestedExtensions) {
5184 16786 : // Advance source location past '+'.
5185 16786 : CurLoc = incrementLoc(CurLoc, 1);
5186 13184 :
5187 : bool EnableFeature = true;
5188 0 :
5189 0 : if (Name.startswith_lower("no")) {
5190 : EnableFeature = false;
5191 : Name = Name.substr(2);
5192 : }
5193 3602 :
5194 8 : bool FoundExtension = false;
5195 2 : for (const auto &Extension : ExtensionMap) {
5196 2 : if (Extension.Name != Name)
5197 : continue;
5198 :
5199 6 : if (Extension.Features.none())
5200 18 : report_fatal_error("unsupported architectural extension: " + Name);
5201 :
5202 : FeatureBitset ToggleFeatures = EnableFeature
5203 : ? (~Features & Extension.Features)
5204 3594 : : ( Features & Extension.Features);
5205 : uint64_t Features =
5206 3594 : ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5207 3594 : setAvailableFeatures(Features);
5208 1606 : FoundExtension = true;
5209 :
5210 3241 : break;
5211 75 : }
5212 :
5213 50 : if (!FoundExtension)
5214 : Error(CurLoc, "unsupported architectural extension");
5215 :
5216 7138 : CurLoc = incrementLoc(CurLoc, Name.size());
5217 : }
5218 : return false;
5219 : }
5220 3600 :
5221 : /// parseDirectiveInst
5222 3600 : /// ::= .inst opcode [, ...]
5223 : bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5224 353 : if (getLexer().is(AsmToken::EndOfStatement))
5225 353 : return Error(Loc, "expected expression following '.inst' directive");
5226 353 :
5227 : auto parseOp = [&]() -> bool {
5228 : SMLoc L = getLoc();
5229 : const MCExpr *Expr;
5230 : if (check(getParser().parseExpression(Expr), L, "expected expression"))
5231 : return true;
5232 : const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5233 353 : if (check(!Value, L, "expected constant expression"))
5234 353 : return true;
5235 0 : getTargetStreamer().emitInst(Value->getValue());
5236 : return false;
5237 0 : };
5238 0 :
5239 : if (parseMany(parseOp))
5240 : return addErrorSuffix(" in '.inst' directive");
5241 : return false;
5242 353 : }
5243 0 :
5244 0 : // parseDirectiveTLSDescCall:
5245 0 : // ::= .tlsdesccall symbol
5246 : bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5247 : StringRef Name;
5248 0 : if (check(getParser().parseIdentifier(Name), L,
5249 0 : "expected symbol after directive") ||
5250 : parseToken(AsmToken::EndOfStatement))
5251 : return true;
5252 :
5253 353 : MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5254 : const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5255 353 : Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5256 353 :
5257 4 : MCInst Inst;
5258 : Inst.setOpcode(AArch64::TLSDESCCALL);
5259 : Inst.addOperand(MCOperand::createExpr(Expr));
5260 :
5261 : getParser().getStreamer().EmitInstruction(Inst, getSTI());
5262 : return false;
5263 : }
5264 :
5265 706 : /// ::= .loh <lohName | lohId> label1, ..., labelN
5266 : /// The number of arguments depends on the loh identifier.
5267 : bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5268 : MCLOHType Kind;
5269 353 : if (getParser().getTok().isNot(AsmToken::Identifier)) {
5270 : if (getParser().getTok().isNot(AsmToken::Integer))
5271 353 : return TokError("expected an identifier or a number in directive");
5272 : // We successfully get a numeric value for the identifier.
5273 16433 : // Check if it is valid.
5274 16433 : int64_t Id = getParser().getTok().getIntVal();
5275 : if (Id <= -1U && !isValidMCLOHType(Id))
5276 : return TokError("invalid numeric identifier in directive");
5277 : Kind = (MCLOHType)Id;
5278 : } else {
5279 : StringRef Name = getTok().getIdentifier();
5280 : // We successfully parse an identifier.
5281 : // Check if it is a recognized one.
5282 16433 : int Id = MCLOHNameToId(Name);
5283 16433 :
5284 13184 : if (Id == -1)
5285 : return TokError("invalid identifier in directive");
5286 0 : Kind = (MCLOHType)Id;
5287 0 : }
5288 : // Consume the identifier.
5289 : Lex();
5290 : // Get the number of arguments of this LOH.
5291 3249 : int NbArgs = MCLOHIdToNbArgs(Kind);
5292 8 :
5293 2 : assert(NbArgs != -1 && "Invalid number of arguments");
5294 2 :
5295 : SmallVector<MCSymbol *, 3> Args;
5296 : for (int Idx = 0; Idx < NbArgs; ++Idx) {
5297 6 : StringRef Name;
5298 18 : if (getParser().parseIdentifier(Name))
5299 : return TokError("expected identifier in directive");
5300 : Args.push_back(getContext().getOrCreateSymbol(Name));
5301 :
5302 3241 : if (Idx + 1 == NbArgs)
5303 : break;
5304 3241 : if (parseToken(AsmToken::Comma,
5305 3241 : "unexpected token in '" + Twine(IDVal) + "' directive"))
5306 1602 : return true;
5307 : }
5308 3241 : if (parseToken(AsmToken::EndOfStatement,
5309 75 : "unexpected token in '" + Twine(IDVal) + "' directive"))
5310 : return true;
5311 50 :
5312 : getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5313 : return false;
5314 6432 : }
5315 :
5316 : /// parseDirectiveLtorg
5317 : /// ::= .ltorg | .pool
5318 3247 : bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5319 : if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5320 3247 : return true;
5321 : getTargetStreamer().emitCurrentConstantPool();
5322 : return false;
5323 : }
5324 :
5325 : /// parseDirectiveReq
5326 17307 : /// ::= name .req registername
5327 17307 : bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5328 : MCAsmParser &Parser = getParser();
5329 : Parser.Lex(); // Eat the '.req' token.
5330 17307 : SMLoc SRegLoc = getLoc();
5331 1562 : RegKind RegisterKind = RegKind::Scalar;
5332 15745 : unsigned RegNum;
5333 : OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5334 :
5335 : if (ParseRes != MatchOperand_Success) {
5336 : StringRef Kind;
5337 1567 : RegisterKind = RegKind::NeonVector;
5338 : ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5339 1567 :
5340 1084 : if (ParseRes == MatchOperand_ParseFail)
5341 3252 : return true;
5342 2168 :
5343 : if (ParseRes == MatchOperand_Success && !Kind.empty())
5344 : return Error(SRegLoc, "vector register without type specifier expected");
5345 : }
5346 :
5347 483 : if (ParseRes != MatchOperand_Success) {
5348 : StringRef Kind;
5349 : RegisterKind = RegKind::SVEDataVector;
5350 483 : ParseRes =
5351 966 : tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5352 0 :
5353 0 : if (ParseRes == MatchOperand_ParseFail)
5354 : return true;
5355 :
5356 : if (ParseRes == MatchOperand_Success && !Kind.empty())
5357 483 : return Error(SRegLoc,
5358 : "sve vector register without type specifier expected");
5359 : }
5360 :
5361 483 : if (ParseRes != MatchOperand_Success) {
5362 1 : StringRef Kind;
5363 1 : RegisterKind = RegKind::SVEPredicateVector;
5364 : ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5365 :
5366 482 : if (ParseRes == MatchOperand_ParseFail)
5367 : return true;
5368 482 :
5369 0 : if (ParseRes == MatchOperand_Success && !Kind.empty())
5370 0 : return Error(SRegLoc,
5371 : "sve predicate register without type specifier expected");
5372 482 : }
5373 :
5374 : if (ParseRes != MatchOperand_Success)
5375 482 : return Error(SRegLoc, "register name or alias expected");
5376 29 :
5377 58 : // Shouldn't be anything else.
5378 : if (parseToken(AsmToken::EndOfStatement,
5379 : "unexpected input in .req directive"))
5380 : return true;
5381 453 :
5382 906 : auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5383 : if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5384 453 : Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5385 :
5386 : return false;
5387 : }
5388 348 :
5389 696 : /// parseDirectiveUneq
5390 : /// ::= .unreq registername
5391 : bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5392 : MCAsmParser &Parser = getParser();
5393 : if (getTok().isNot(AsmToken::Identifier))
5394 : return TokError("unexpected input in .unreq directive.");
5395 : RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5396 : Parser.Lex(); // Eat the identifier.
5397 : if (parseToken(AsmToken::EndOfStatement))
5398 : return addErrorSuffix("in '.unreq' directive");
5399 : return false;
5400 : }
5401 :
5402 : bool
5403 : AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5404 : AArch64MCExpr::VariantKind &ELFRefKind,
5405 : MCSymbolRefExpr::VariantKind &DarwinRefKind,
5406 : int64_t &Addend) {
5407 : ELFRefKind = AArch64MCExpr::VK_INVALID;
5408 : DarwinRefKind = MCSymbolRefExpr::VK_None;
5409 : Addend = 0;
5410 370 :
5411 22 : if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5412 20 : ELFRefKind = AE->getKind();
5413 : Expr = AE->getSubExpr();
5414 : }
5415 :
5416 : const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5417 : if (SE) {
5418 : // It's a simple symbol reference with no addend.
5419 : DarwinRefKind = SE->getKind();
5420 : return true;
5421 : }
5422 :
5423 : // Check that it looks like a symbol + an addend
5424 : MCValue Res;
5425 348 : bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5426 : if (!Relocatable || !Res.getSymA() || Res.getSymB())
5427 : return false;
5428 :
5429 226 : DarwinRefKind = Res.getSymA()->getKind();
5430 : Addend = Res.getConstant();
5431 226 :
5432 : // It's some symbol reference + a constant addend, but really
5433 226 : // shouldn't use both Darwin and ELF syntax.
5434 : return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5435 : DarwinRefKind == MCSymbolRefExpr::VK_None;
5436 226 : }
5437 226 :
5438 226 : /// Force static initialization.
5439 0 : extern "C" void LLVMInitializeAArch64AsmParser() {
5440 226 : RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5441 : RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5442 226 : RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5443 43 : }
5444 5 :
5445 : #define GET_REGISTER_MATCHER
5446 : #define GET_SUBTARGET_FEATURE_NAME
5447 : #define GET_MATCHER_IMPLEMENTATION
5448 663 : #define GET_MNEMONIC_SPELL_CHECKER
5449 221 : #include "AArch64GenAsmMatcher.inc"
5450 221 :
5451 : // Define this matcher function after the auto-generated include so we
5452 : // have the match class enum definitions.
5453 : unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5454 : unsigned Kind) {
5455 : AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5456 2598 : // If the kind is a token for a literal immediate, check if our asm
5457 2598 : // operand matches. This is for InstAliases which have a fixed-value
5458 2598 : // immediate in the syntax.
5459 2598 : int64_t ExpectedVal;
5460 : switch (Kind) {
5461 563 : default:
5462 : return Match_InvalidOperand;
5463 : case MCK__35_0:
5464 : ExpectedVal = 0;
5465 : break;
5466 : case MCK__35_1:
5467 : ExpectedVal = 1;
5468 : break;
5469 : case MCK__35_12:
5470 : ExpectedVal = 12;
5471 : break;
5472 : case MCK__35_16:
5473 : ExpectedVal = 16;
5474 : break;
5475 : case MCK__35_2:
5476 : ExpectedVal = 2;
5477 2035 : break;
5478 563 : case MCK__35_24:
5479 : ExpectedVal = 24;
5480 2035 : break;
5481 2035 : case MCK__35_3:
5482 : ExpectedVal = 3;
5483 : break;
5484 : case MCK__35_32:
5485 2542 : ExpectedVal = 32;
5486 505 : break;
5487 505 : case MCK__35_4:
5488 : ExpectedVal = 4;
5489 : break;
5490 10 : case MCK__35_48:
5491 10 : ExpectedVal = 48;
5492 : break;
5493 : case MCK__35_6:
5494 : ExpectedVal = 6;
5495 495 : break;
5496 1485 : case MCK__35_64:
5497 495 : ExpectedVal = 64;
5498 495 : break;
5499 : case MCK__35_8:
5500 : ExpectedVal = 8;
5501 : break;
5502 : }
5503 1530 : if (!Op.isImm())
5504 1530 : return Match_InvalidOperand;
5505 1530 : const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5506 67 : if (!CE)
5507 56 : return Match_InvalidOperand;
5508 56 : if (CE->getValue() == ExpectedVal)
5509 : return Match_Success;
5510 : return Match_InvalidOperand;
5511 : }
5512 1474 :
5513 : OperandMatchResultTy
5514 : AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5515 1474 :
5516 : SMLoc S = getLoc();
5517 1 :
5518 1 : if (getParser().getTok().isNot(AsmToken::Identifier)) {
5519 : Error(S, "expected register");
5520 : return MatchOperand_ParseFail;
5521 1473 : }
5522 4419 :
5523 1473 : unsigned FirstReg;
5524 1473 : OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5525 : if (Res != MatchOperand_Success)
5526 : return MatchOperand_ParseFail;
5527 :
5528 : const MCRegisterClass &WRegClass =
5529 : AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5530 : const MCRegisterClass &XRegClass =
5531 : AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5532 :
5533 : bool isXReg = XRegClass.contains(FirstReg),
5534 : isWReg = WRegClass.contains(FirstReg);
5535 : if (!isXReg && !isWReg) {
5536 : Error(S, "expected first even register of a "
5537 : "consecutive same-size even/odd register pair");
5538 : return MatchOperand_ParseFail;
5539 : }
5540 :
5541 : const MCRegisterInfo *RI = getContext().getRegisterInfo();
5542 : unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5543 :
5544 : if (FirstEncoding & 0x1) {
5545 : Error(S, "expected first even register of a "
5546 : "consecutive same-size even/odd register pair");
5547 : return MatchOperand_ParseFail;
5548 : }
5549 :
5550 : if (getParser().getTok().isNot(AsmToken::Comma)) {
5551 : Error(getLoc(), "expected comma");
5552 : return MatchOperand_ParseFail;
5553 93 : }
5554 93 : // Eat the comma
5555 : getParser().Lex();
5556 93 :
5557 : SMLoc E = getLoc();
5558 90 : unsigned SecondReg;
5559 : Res = tryParseScalarRegister(SecondReg);
5560 90 : if (Res != MatchOperand_Success)
5561 : return MatchOperand_ParseFail;
5562 40 :
5563 : if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5564 : (isXReg && !XRegClass.contains(SecondReg)) ||
5565 : (isWReg && !WRegClass.contains(SecondReg))) {
5566 : Error(E,"expected second odd register of a "
5567 : "consecutive same-size even/odd register pair");
5568 : return MatchOperand_ParseFail;
5569 : }
5570 :
5571 : unsigned Pair = 0;
5572 40 : if (isXReg) {
5573 : Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5574 93 : &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5575 : } else {
5576 274 : Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5577 : &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5578 274 : }
5579 274 :
5580 274 : Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5581 274 : getLoc(), getContext()));
5582 :
5583 274 : return MatchOperand_Success;
5584 : }
5585 822 :
5586 274 : template <bool ParseShiftExtend, bool ParseSuffix>
5587 548 : OperandMatchResultTy
5588 548 : AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5589 548 : const SMLoc S = getLoc();
5590 548 : // Check for a SVE vector register specifier first.
5591 274 : unsigned RegNum;
5592 548 : StringRef Kind;
5593 274 :
5594 274 : OperandMatchResultTy Res =
5595 : tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5596 :
5597 : if (Res != MatchOperand_Success)
5598 371 : return Res;
5599 :
5600 371 : if (ParseSuffix && Kind.empty())
5601 0 : return MatchOperand_NoMatch;
5602 :
5603 371 : const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5604 1113 : if (!KindRes)
5605 371 : return MatchOperand_NoMatch;
5606 :
5607 371 : unsigned ElementWidth = KindRes->second;
5608 371 :
5609 371 : // No shift/extend is the default.
5610 371 : if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5611 : Operands.push_back(AArch64Operand::CreateVectorReg(
5612 : RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5613 12 :
5614 12 : OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5615 1 : if (Res == MatchOperand_ParseFail)
5616 22 : return MatchOperand_ParseFail;
5617 0 : return MatchOperand_Success;
5618 0 : }
5619 0 :
5620 : // Eat the comma
5621 11 : getParser().Lex();
5622 :
5623 83 : // Match the shift
5624 83 : SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5625 0 : Res = tryParseOptionalShiftExtend(ExtOpnd);
5626 166 : if (Res != MatchOperand_Success)
5627 76 : return Res;
5628 38 :
5629 38 : auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5630 : Operands.push_back(AArch64Operand::CreateVectorReg(
5631 45 : RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5632 : getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5633 40 : Ext->hasShiftExtendAmount()));
5634 40 :
5635 0 : return MatchOperand_Success;
5636 80 : }
5637 4 :
5638 2 : OperandMatchResultTy
5639 2 : AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5640 : MCAsmParser &Parser = getParser();
5641 38 :
5642 : SMLoc SS = getLoc();
5643 221 : const AsmToken &TokE = Parser.getTok();
5644 221 : bool IsHash = TokE.is(AsmToken::Hash);
5645 0 :
5646 442 : if (!IsHash && TokE.isNot(AsmToken::Identifier))
5647 100 : return MatchOperand_NoMatch;
5648 50 :
5649 50 : int64_t Pattern;
5650 : if (IsHash) {
5651 171 : Parser.Lex(); // Eat hash
5652 :
5653 15 : // Parse the immediate operand.
5654 15 : const MCExpr *ImmVal;
5655 3 : SS = getLoc();
5656 24 : if (Parser.parseExpression(ImmVal))
5657 : return MatchOperand_ParseFail;
5658 6 :
5659 3 : auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5660 3 : if (!MCE)
5661 : return MatchOperand_ParseFail;
5662 :
5663 : Pattern = MCE->getValue();
5664 : } else {
5665 : // Parse the pattern
5666 : auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5667 : if (!Pat)
5668 9 : return MatchOperand_NoMatch;
5669 :
5670 : Parser.Lex();
5671 274 : Pattern = Pat->Encoding;
5672 : assert(Pattern >= 0 && Pattern < 32);
5673 548 : }
5674 :
5675 : Operands.push_back(
5676 : AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5677 274 : SS, getLoc(), getContext()));
5678 181 :
5679 0 : return MatchOperand_Success;
5680 : }
|