File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp |
Warning: | line 6348, column 34 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "AArch64InstrInfo.h" |
10 | #include "MCTargetDesc/AArch64AddressingModes.h" |
11 | #include "MCTargetDesc/AArch64InstPrinter.h" |
12 | #include "MCTargetDesc/AArch64MCExpr.h" |
13 | #include "MCTargetDesc/AArch64MCTargetDesc.h" |
14 | #include "MCTargetDesc/AArch64TargetStreamer.h" |
15 | #include "TargetInfo/AArch64TargetInfo.h" |
16 | #include "Utils/AArch64BaseInfo.h" |
17 | #include "llvm/ADT/APFloat.h" |
18 | #include "llvm/ADT/APInt.h" |
19 | #include "llvm/ADT/ArrayRef.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/SmallSet.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/StringExtras.h" |
24 | #include "llvm/ADT/StringMap.h" |
25 | #include "llvm/ADT/StringRef.h" |
26 | #include "llvm/ADT/StringSwitch.h" |
27 | #include "llvm/ADT/Twine.h" |
28 | #include "llvm/MC/MCContext.h" |
29 | #include "llvm/MC/MCExpr.h" |
30 | #include "llvm/MC/MCInst.h" |
31 | #include "llvm/MC/MCLinkerOptimizationHint.h" |
32 | #include "llvm/MC/MCObjectFileInfo.h" |
33 | #include "llvm/MC/MCParser/MCAsmLexer.h" |
34 | #include "llvm/MC/MCParser/MCAsmParser.h" |
35 | #include "llvm/MC/MCParser/MCAsmParserExtension.h" |
36 | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
37 | #include "llvm/MC/MCParser/MCTargetAsmParser.h" |
38 | #include "llvm/MC/MCRegisterInfo.h" |
39 | #include "llvm/MC/MCStreamer.h" |
40 | #include "llvm/MC/MCSubtargetInfo.h" |
41 | #include "llvm/MC/MCSymbol.h" |
42 | #include "llvm/MC/MCTargetOptions.h" |
43 | #include "llvm/MC/MCValue.h" |
44 | #include "llvm/MC/SubtargetFeature.h" |
45 | #include "llvm/MC/TargetRegistry.h" |
46 | #include "llvm/Support/Casting.h" |
47 | #include "llvm/Support/Compiler.h" |
48 | #include "llvm/Support/ErrorHandling.h" |
49 | #include "llvm/Support/MathExtras.h" |
50 | #include "llvm/Support/SMLoc.h" |
51 | #include "llvm/Support/AArch64TargetParser.h" |
52 | #include "llvm/Support/TargetParser.h" |
53 | #include "llvm/Support/raw_ostream.h" |
54 | #include <cassert> |
55 | #include <cctype> |
56 | #include <cstdint> |
57 | #include <cstdio> |
58 | #include <string> |
59 | #include <tuple> |
60 | #include <utility> |
61 | #include <vector> |
62 | |
63 | using namespace llvm; |
64 | |
65 | namespace { |
66 | |
67 | enum class RegKind { |
68 | Scalar, |
69 | NeonVector, |
70 | SVEDataVector, |
71 | SVEPredicateVector, |
72 | Matrix |
73 | }; |
74 | |
75 | enum class MatrixKind { Array, Tile, Row, Col }; |
76 | |
77 | enum RegConstraintEqualityTy { |
78 | EqualsReg, |
79 | EqualsSuperReg, |
80 | EqualsSubReg |
81 | }; |
82 | |
83 | class AArch64AsmParser : public MCTargetAsmParser { |
84 | private: |
85 | StringRef Mnemonic; ///< Instruction mnemonic. |
86 | |
87 | // Map of register aliases registers via the .req directive. |
88 | StringMap<std::pair<RegKind, unsigned>> RegisterReqs; |
89 | |
90 | class PrefixInfo { |
91 | public: |
92 | static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) { |
93 | PrefixInfo Prefix; |
94 | switch (Inst.getOpcode()) { |
95 | case AArch64::MOVPRFX_ZZ: |
96 | Prefix.Active = true; |
97 | Prefix.Dst = Inst.getOperand(0).getReg(); |
98 | break; |
99 | case AArch64::MOVPRFX_ZPmZ_B: |
100 | case AArch64::MOVPRFX_ZPmZ_H: |
101 | case AArch64::MOVPRFX_ZPmZ_S: |
102 | case AArch64::MOVPRFX_ZPmZ_D: |
103 | Prefix.Active = true; |
104 | Prefix.Predicated = true; |
105 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; |
106 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 107 , __extension__ __PRETTY_FUNCTION__)) |
107 | "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 107 , __extension__ __PRETTY_FUNCTION__)); |
108 | Prefix.Dst = Inst.getOperand(0).getReg(); |
109 | Prefix.Pg = Inst.getOperand(2).getReg(); |
110 | break; |
111 | case AArch64::MOVPRFX_ZPzZ_B: |
112 | case AArch64::MOVPRFX_ZPzZ_H: |
113 | case AArch64::MOVPRFX_ZPzZ_S: |
114 | case AArch64::MOVPRFX_ZPzZ_D: |
115 | Prefix.Active = true; |
116 | Prefix.Predicated = true; |
117 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; |
118 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 119 , __extension__ __PRETTY_FUNCTION__)) |
119 | "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 119 , __extension__ __PRETTY_FUNCTION__)); |
120 | Prefix.Dst = Inst.getOperand(0).getReg(); |
121 | Prefix.Pg = Inst.getOperand(1).getReg(); |
122 | break; |
123 | default: |
124 | break; |
125 | } |
126 | |
127 | return Prefix; |
128 | } |
129 | |
130 | PrefixInfo() : Active(false), Predicated(false) {} |
131 | bool isActive() const { return Active; } |
132 | bool isPredicated() const { return Predicated; } |
133 | unsigned getElementSize() const { |
134 | assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail ("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 134, __extension__ __PRETTY_FUNCTION__)); |
135 | return ElementSize; |
136 | } |
137 | unsigned getDstReg() const { return Dst; } |
138 | unsigned getPgReg() const { |
139 | assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail ("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 139, __extension__ __PRETTY_FUNCTION__)); |
140 | return Pg; |
141 | } |
142 | |
143 | private: |
144 | bool Active; |
145 | bool Predicated; |
146 | unsigned ElementSize; |
147 | unsigned Dst; |
148 | unsigned Pg; |
149 | } NextPrefix; |
150 | |
151 | AArch64TargetStreamer &getTargetStreamer() { |
152 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); |
153 | return static_cast<AArch64TargetStreamer &>(TS); |
154 | } |
155 | |
156 | SMLoc getLoc() const { return getParser().getTok().getLoc(); } |
157 | |
158 | bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); |
159 | void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S); |
160 | AArch64CC::CondCode parseCondCodeString(StringRef Cond, |
161 | std::string &Suggestion); |
162 | bool parseCondCode(OperandVector &Operands, bool invertCondCode); |
163 | unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); |
164 | bool parseRegister(OperandVector &Operands); |
165 | bool parseSymbolicImmVal(const MCExpr *&ImmVal); |
166 | bool parseNeonVectorList(OperandVector &Operands); |
167 | bool parseOptionalMulOperand(OperandVector &Operands); |
168 | bool parseKeywordOperand(OperandVector &Operands); |
169 | bool parseOperand(OperandVector &Operands, bool isCondCode, |
170 | bool invertCondCode); |
171 | bool parseImmExpr(int64_t &Out); |
172 | bool parseComma(); |
173 | bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First, |
174 | unsigned Last); |
175 | |
176 | bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, |
177 | OperandVector &Operands); |
178 | |
179 | bool parseDirectiveArch(SMLoc L); |
180 | bool parseDirectiveArchExtension(SMLoc L); |
181 | bool parseDirectiveCPU(SMLoc L); |
182 | bool parseDirectiveInst(SMLoc L); |
183 | |
184 | bool parseDirectiveTLSDescCall(SMLoc L); |
185 | |
186 | bool parseDirectiveLOH(StringRef LOH, SMLoc L); |
187 | bool parseDirectiveLtorg(SMLoc L); |
188 | |
189 | bool parseDirectiveReq(StringRef Name, SMLoc L); |
190 | bool parseDirectiveUnreq(SMLoc L); |
191 | bool parseDirectiveCFINegateRAState(); |
192 | bool parseDirectiveCFIBKeyFrame(); |
193 | |
194 | bool parseDirectiveVariantPCS(SMLoc L); |
195 | |
196 | bool parseDirectiveSEHAllocStack(SMLoc L); |
197 | bool parseDirectiveSEHPrologEnd(SMLoc L); |
198 | bool parseDirectiveSEHSaveR19R20X(SMLoc L); |
199 | bool parseDirectiveSEHSaveFPLR(SMLoc L); |
200 | bool parseDirectiveSEHSaveFPLRX(SMLoc L); |
201 | bool parseDirectiveSEHSaveReg(SMLoc L); |
202 | bool parseDirectiveSEHSaveRegX(SMLoc L); |
203 | bool parseDirectiveSEHSaveRegP(SMLoc L); |
204 | bool parseDirectiveSEHSaveRegPX(SMLoc L); |
205 | bool parseDirectiveSEHSaveLRPair(SMLoc L); |
206 | bool parseDirectiveSEHSaveFReg(SMLoc L); |
207 | bool parseDirectiveSEHSaveFRegX(SMLoc L); |
208 | bool parseDirectiveSEHSaveFRegP(SMLoc L); |
209 | bool parseDirectiveSEHSaveFRegPX(SMLoc L); |
210 | bool parseDirectiveSEHSetFP(SMLoc L); |
211 | bool parseDirectiveSEHAddFP(SMLoc L); |
212 | bool parseDirectiveSEHNop(SMLoc L); |
213 | bool parseDirectiveSEHSaveNext(SMLoc L); |
214 | bool parseDirectiveSEHEpilogStart(SMLoc L); |
215 | bool parseDirectiveSEHEpilogEnd(SMLoc L); |
216 | bool parseDirectiveSEHTrapFrame(SMLoc L); |
217 | bool parseDirectiveSEHMachineFrame(SMLoc L); |
218 | bool parseDirectiveSEHContext(SMLoc L); |
219 | bool parseDirectiveSEHClearUnwoundToCall(SMLoc L); |
220 | |
221 | bool validateInstruction(MCInst &Inst, SMLoc &IDLoc, |
222 | SmallVectorImpl<SMLoc> &Loc); |
223 | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
224 | OperandVector &Operands, MCStreamer &Out, |
225 | uint64_t &ErrorInfo, |
226 | bool MatchingInlineAsm) override; |
227 | /// @name Auto-generated Match Functions |
228 | /// { |
229 | |
230 | #define GET_ASSEMBLER_HEADER |
231 | #include "AArch64GenAsmMatcher.inc" |
232 | |
233 | /// } |
234 | |
235 | OperandMatchResultTy tryParseScalarRegister(unsigned &Reg); |
236 | OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind, |
237 | RegKind MatchKind); |
238 | OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands); |
239 | OperandMatchResultTy tryParseSVCR(OperandVector &Operands); |
240 | OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); |
241 | OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); |
242 | OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands); |
243 | OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); |
244 | OperandMatchResultTy tryParseSysReg(OperandVector &Operands); |
245 | OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); |
246 | template <bool IsSVEPrefetch = false> |
247 | OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); |
248 | OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); |
249 | OperandMatchResultTy tryParseBTIHint(OperandVector &Operands); |
250 | OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); |
251 | OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); |
252 | template<bool AddFPZeroAsLiteral> |
253 | OperandMatchResultTy tryParseFPImm(OperandVector &Operands); |
254 | OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands); |
255 | OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); |
256 | bool tryParseNeonVectorRegister(OperandVector &Operands); |
257 | OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); |
258 | OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); |
259 | template <bool ParseShiftExtend, |
260 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> |
261 | OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); |
262 | template <bool ParseShiftExtend, bool ParseSuffix> |
263 | OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); |
264 | OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands); |
265 | template <RegKind VectorKind> |
266 | OperandMatchResultTy tryParseVectorList(OperandVector &Operands, |
267 | bool ExpectMatch = false); |
268 | OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands); |
269 | OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands); |
270 | OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands); |
271 | |
272 | public: |
273 | enum AArch64MatchResultTy { |
274 | Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, |
275 | #define GET_OPERAND_DIAGNOSTIC_TYPES |
276 | #include "AArch64GenAsmMatcher.inc" |
277 | }; |
278 | bool IsILP32; |
279 | |
280 | AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, |
281 | const MCInstrInfo &MII, const MCTargetOptions &Options) |
282 | : MCTargetAsmParser(Options, STI, MII) { |
283 | IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; |
284 | MCAsmParserExtension::Initialize(Parser); |
285 | MCStreamer &S = getParser().getStreamer(); |
286 | if (S.getTargetStreamer() == nullptr) |
287 | new AArch64TargetStreamer(S); |
288 | |
289 | // Alias .hword/.word/.[dx]word to the target-independent |
290 | // .2byte/.4byte/.8byte directives as they have the same form and |
291 | // semantics: |
292 | /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ] |
293 | Parser.addAliasForDirective(".hword", ".2byte"); |
294 | Parser.addAliasForDirective(".word", ".4byte"); |
295 | Parser.addAliasForDirective(".dword", ".8byte"); |
296 | Parser.addAliasForDirective(".xword", ".8byte"); |
297 | |
298 | // Initialize the set of available features. |
299 | setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); |
300 | } |
301 | |
302 | bool regsEqual(const MCParsedAsmOperand &Op1, |
303 | const MCParsedAsmOperand &Op2) const override; |
304 | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, |
305 | SMLoc NameLoc, OperandVector &Operands) override; |
306 | bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; |
307 | OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc, |
308 | SMLoc &EndLoc) override; |
309 | bool ParseDirective(AsmToken DirectiveID) override; |
310 | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, |
311 | unsigned Kind) override; |
312 | |
313 | static bool classifySymbolRef(const MCExpr *Expr, |
314 | AArch64MCExpr::VariantKind &ELFRefKind, |
315 | MCSymbolRefExpr::VariantKind &DarwinRefKind, |
316 | int64_t &Addend); |
317 | }; |
318 | |
319 | /// AArch64Operand - Instances of this class represent a parsed AArch64 machine |
320 | /// instruction. |
321 | class AArch64Operand : public MCParsedAsmOperand { |
322 | private: |
323 | enum KindTy { |
324 | k_Immediate, |
325 | k_ShiftedImm, |
326 | k_CondCode, |
327 | k_Register, |
328 | k_MatrixRegister, |
329 | k_MatrixTileList, |
330 | k_SVCR, |
331 | k_VectorList, |
332 | k_VectorIndex, |
333 | k_Token, |
334 | k_SysReg, |
335 | k_SysCR, |
336 | k_Prefetch, |
337 | k_ShiftExtend, |
338 | k_FPImm, |
339 | k_Barrier, |
340 | k_PSBHint, |
341 | k_BTIHint, |
342 | } Kind; |
343 | |
344 | SMLoc StartLoc, EndLoc; |
345 | |
346 | struct TokOp { |
347 | const char *Data; |
348 | unsigned Length; |
349 | bool IsSuffix; // Is the operand actually a suffix on the mnemonic. |
350 | }; |
351 | |
352 | // Separate shift/extend operand. |
353 | struct ShiftExtendOp { |
354 | AArch64_AM::ShiftExtendType Type; |
355 | unsigned Amount; |
356 | bool HasExplicitAmount; |
357 | }; |
358 | |
359 | struct RegOp { |
360 | unsigned RegNum; |
361 | RegKind Kind; |
362 | int ElementWidth; |
363 | |
364 | // The register may be allowed as a different register class, |
365 | // e.g. for GPR64as32 or GPR32as64. |
366 | RegConstraintEqualityTy EqualityTy; |
367 | |
368 | // In some cases the shift/extend needs to be explicitly parsed together |
369 | // with the register, rather than as a separate operand. This is needed |
370 | // for addressing modes where the instruction as a whole dictates the |
371 | // scaling/extend, rather than specific bits in the instruction. |
372 | // By parsing them as a single operand, we avoid the need to pass an |
373 | // extra operand in all CodeGen patterns (because all operands need to |
374 | // have an associated value), and we avoid the need to update TableGen to |
375 | // accept operands that have no associated bits in the instruction. |
376 | // |
377 | // An added benefit of parsing them together is that the assembler |
378 | // can give a sensible diagnostic if the scaling is not correct. |
379 | // |
380 | // The default is 'lsl #0' (HasExplicitAmount = false) if no |
381 | // ShiftExtend is specified. |
382 | ShiftExtendOp ShiftExtend; |
383 | }; |
384 | |
385 | struct MatrixRegOp { |
386 | unsigned RegNum; |
387 | unsigned ElementWidth; |
388 | MatrixKind Kind; |
389 | }; |
390 | |
391 | struct MatrixTileListOp { |
392 | unsigned RegMask = 0; |
393 | }; |
394 | |
395 | struct VectorListOp { |
396 | unsigned RegNum; |
397 | unsigned Count; |
398 | unsigned NumElements; |
399 | unsigned ElementWidth; |
400 | RegKind RegisterKind; |
401 | }; |
402 | |
403 | struct VectorIndexOp { |
404 | int Val; |
405 | }; |
406 | |
407 | struct ImmOp { |
408 | const MCExpr *Val; |
409 | }; |
410 | |
411 | struct ShiftedImmOp { |
412 | const MCExpr *Val; |
413 | unsigned ShiftAmount; |
414 | }; |
415 | |
416 | struct CondCodeOp { |
417 | AArch64CC::CondCode Code; |
418 | }; |
419 | |
420 | struct FPImmOp { |
421 | uint64_t Val; // APFloat value bitcasted to uint64_t. |
422 | bool IsExact; // describes whether parsed value was exact. |
423 | }; |
424 | |
425 | struct BarrierOp { |
426 | const char *Data; |
427 | unsigned Length; |
428 | unsigned Val; // Not the enum since not all values have names. |
429 | bool HasnXSModifier; |
430 | }; |
431 | |
432 | struct SysRegOp { |
433 | const char *Data; |
434 | unsigned Length; |
435 | uint32_t MRSReg; |
436 | uint32_t MSRReg; |
437 | uint32_t PStateField; |
438 | }; |
439 | |
440 | struct SysCRImmOp { |
441 | unsigned Val; |
442 | }; |
443 | |
444 | struct PrefetchOp { |
445 | const char *Data; |
446 | unsigned Length; |
447 | unsigned Val; |
448 | }; |
449 | |
450 | struct PSBHintOp { |
451 | const char *Data; |
452 | unsigned Length; |
453 | unsigned Val; |
454 | }; |
455 | |
456 | struct BTIHintOp { |
457 | const char *Data; |
458 | unsigned Length; |
459 | unsigned Val; |
460 | }; |
461 | |
462 | struct SVCROp { |
463 | const char *Data; |
464 | unsigned Length; |
465 | unsigned PStateField; |
466 | }; |
467 | |
468 | union { |
469 | struct TokOp Tok; |
470 | struct RegOp Reg; |
471 | struct MatrixRegOp MatrixReg; |
472 | struct MatrixTileListOp MatrixTileList; |
473 | struct VectorListOp VectorList; |
474 | struct VectorIndexOp VectorIndex; |
475 | struct ImmOp Imm; |
476 | struct ShiftedImmOp ShiftedImm; |
477 | struct CondCodeOp CondCode; |
478 | struct FPImmOp FPImm; |
479 | struct BarrierOp Barrier; |
480 | struct SysRegOp SysReg; |
481 | struct SysCRImmOp SysCRImm; |
482 | struct PrefetchOp Prefetch; |
483 | struct PSBHintOp PSBHint; |
484 | struct BTIHintOp BTIHint; |
485 | struct ShiftExtendOp ShiftExtend; |
486 | struct SVCROp SVCR; |
487 | }; |
488 | |
489 | // Keep the MCContext around as the MCExprs may need manipulated during |
490 | // the add<>Operands() calls. |
491 | MCContext &Ctx; |
492 | |
493 | public: |
494 | AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} |
495 | |
496 | AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { |
497 | Kind = o.Kind; |
498 | StartLoc = o.StartLoc; |
499 | EndLoc = o.EndLoc; |
500 | switch (Kind) { |
501 | case k_Token: |
502 | Tok = o.Tok; |
503 | break; |
504 | case k_Immediate: |
505 | Imm = o.Imm; |
506 | break; |
507 | case k_ShiftedImm: |
508 | ShiftedImm = o.ShiftedImm; |
509 | break; |
510 | case k_CondCode: |
511 | CondCode = o.CondCode; |
512 | break; |
513 | case k_FPImm: |
514 | FPImm = o.FPImm; |
515 | break; |
516 | case k_Barrier: |
517 | Barrier = o.Barrier; |
518 | break; |
519 | case k_Register: |
520 | Reg = o.Reg; |
521 | break; |
522 | case k_MatrixRegister: |
523 | MatrixReg = o.MatrixReg; |
524 | break; |
525 | case k_MatrixTileList: |
526 | MatrixTileList = o.MatrixTileList; |
527 | break; |
528 | case k_VectorList: |
529 | VectorList = o.VectorList; |
530 | break; |
531 | case k_VectorIndex: |
532 | VectorIndex = o.VectorIndex; |
533 | break; |
534 | case k_SysReg: |
535 | SysReg = o.SysReg; |
536 | break; |
537 | case k_SysCR: |
538 | SysCRImm = o.SysCRImm; |
539 | break; |
540 | case k_Prefetch: |
541 | Prefetch = o.Prefetch; |
542 | break; |
543 | case k_PSBHint: |
544 | PSBHint = o.PSBHint; |
545 | break; |
546 | case k_BTIHint: |
547 | BTIHint = o.BTIHint; |
548 | break; |
549 | case k_ShiftExtend: |
550 | ShiftExtend = o.ShiftExtend; |
551 | break; |
552 | case k_SVCR: |
553 | SVCR = o.SVCR; |
554 | break; |
555 | } |
556 | } |
557 | |
558 | /// getStartLoc - Get the location of the first token of this operand. |
559 | SMLoc getStartLoc() const override { return StartLoc; } |
560 | /// getEndLoc - Get the location of the last token of this operand. |
561 | SMLoc getEndLoc() const override { return EndLoc; } |
562 | |
563 | StringRef getToken() const { |
564 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 564 , __extension__ __PRETTY_FUNCTION__)); |
565 | return StringRef(Tok.Data, Tok.Length); |
566 | } |
567 | |
568 | bool isTokenSuffix() const { |
569 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 569 , __extension__ __PRETTY_FUNCTION__)); |
570 | return Tok.IsSuffix; |
571 | } |
572 | |
573 | const MCExpr *getImm() const { |
574 | assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 574 , __extension__ __PRETTY_FUNCTION__)); |
575 | return Imm.Val; |
576 | } |
577 | |
578 | const MCExpr *getShiftedImmVal() const { |
579 | assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 579 , __extension__ __PRETTY_FUNCTION__)); |
580 | return ShiftedImm.Val; |
581 | } |
582 | |
583 | unsigned getShiftedImmShift() const { |
584 | assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 584 , __extension__ __PRETTY_FUNCTION__)); |
585 | return ShiftedImm.ShiftAmount; |
586 | } |
587 | |
588 | AArch64CC::CondCode getCondCode() const { |
589 | assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 589 , __extension__ __PRETTY_FUNCTION__)); |
590 | return CondCode.Code; |
591 | } |
592 | |
593 | APFloat getFPImm() const { |
594 | assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 594 , __extension__ __PRETTY_FUNCTION__)); |
595 | return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); |
596 | } |
597 | |
598 | bool getFPImmIsExact() const { |
599 | assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 599 , __extension__ __PRETTY_FUNCTION__)); |
600 | return FPImm.IsExact; |
601 | } |
602 | |
603 | unsigned getBarrier() const { |
604 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 604 , __extension__ __PRETTY_FUNCTION__)); |
605 | return Barrier.Val; |
606 | } |
607 | |
608 | StringRef getBarrierName() const { |
609 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 609 , __extension__ __PRETTY_FUNCTION__)); |
610 | return StringRef(Barrier.Data, Barrier.Length); |
611 | } |
612 | |
613 | bool getBarriernXSModifier() const { |
614 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 614 , __extension__ __PRETTY_FUNCTION__)); |
615 | return Barrier.HasnXSModifier; |
616 | } |
617 | |
618 | unsigned getReg() const override { |
619 | assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 619 , __extension__ __PRETTY_FUNCTION__)); |
620 | return Reg.RegNum; |
621 | } |
622 | |
623 | unsigned getMatrixReg() const { |
624 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 624 , __extension__ __PRETTY_FUNCTION__)); |
625 | return MatrixReg.RegNum; |
626 | } |
627 | |
628 | unsigned getMatrixElementWidth() const { |
629 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 629 , __extension__ __PRETTY_FUNCTION__)); |
630 | return MatrixReg.ElementWidth; |
631 | } |
632 | |
633 | MatrixKind getMatrixKind() const { |
634 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 634 , __extension__ __PRETTY_FUNCTION__)); |
635 | return MatrixReg.Kind; |
636 | } |
637 | |
638 | unsigned getMatrixTileListRegMask() const { |
639 | assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!" ) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 639 , __extension__ __PRETTY_FUNCTION__)); |
640 | return MatrixTileList.RegMask; |
641 | } |
642 | |
643 | RegConstraintEqualityTy getRegEqualityTy() const { |
644 | assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 644 , __extension__ __PRETTY_FUNCTION__)); |
645 | return Reg.EqualityTy; |
646 | } |
647 | |
648 | unsigned getVectorListStart() const { |
649 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 649 , __extension__ __PRETTY_FUNCTION__)); |
650 | return VectorList.RegNum; |
651 | } |
652 | |
653 | unsigned getVectorListCount() const { |
654 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 654 , __extension__ __PRETTY_FUNCTION__)); |
655 | return VectorList.Count; |
656 | } |
657 | |
658 | int getVectorIndex() const { |
659 | assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 659 , __extension__ __PRETTY_FUNCTION__)); |
660 | return VectorIndex.Val; |
661 | } |
662 | |
663 | StringRef getSysReg() const { |
664 | assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 664 , __extension__ __PRETTY_FUNCTION__)); |
665 | return StringRef(SysReg.Data, SysReg.Length); |
666 | } |
667 | |
668 | unsigned getSysCR() const { |
669 | assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 669 , __extension__ __PRETTY_FUNCTION__)); |
670 | return SysCRImm.Val; |
671 | } |
672 | |
673 | unsigned getPrefetch() const { |
674 | assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 674 , __extension__ __PRETTY_FUNCTION__)); |
675 | return Prefetch.Val; |
676 | } |
677 | |
678 | unsigned getPSBHint() const { |
679 | assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 679 , __extension__ __PRETTY_FUNCTION__)); |
680 | return PSBHint.Val; |
681 | } |
682 | |
683 | StringRef getPSBHintName() const { |
684 | assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 684 , __extension__ __PRETTY_FUNCTION__)); |
685 | return StringRef(PSBHint.Data, PSBHint.Length); |
686 | } |
687 | |
688 | unsigned getBTIHint() const { |
689 | assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 689 , __extension__ __PRETTY_FUNCTION__)); |
690 | return BTIHint.Val; |
691 | } |
692 | |
693 | StringRef getBTIHintName() const { |
694 | assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 694 , __extension__ __PRETTY_FUNCTION__)); |
695 | return StringRef(BTIHint.Data, BTIHint.Length); |
696 | } |
697 | |
698 | StringRef getSVCR() const { |
699 | assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 699 , __extension__ __PRETTY_FUNCTION__)); |
700 | return StringRef(SVCR.Data, SVCR.Length); |
701 | } |
702 | |
703 | StringRef getPrefetchName() const { |
704 | assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 704 , __extension__ __PRETTY_FUNCTION__)); |
705 | return StringRef(Prefetch.Data, Prefetch.Length); |
706 | } |
707 | |
708 | AArch64_AM::ShiftExtendType getShiftExtendType() const { |
709 | if (Kind == k_ShiftExtend) |
710 | return ShiftExtend.Type; |
711 | if (Kind == k_Register) |
712 | return Reg.ShiftExtend.Type; |
713 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 713); |
714 | } |
715 | |
716 | unsigned getShiftExtendAmount() const { |
717 | if (Kind == k_ShiftExtend) |
718 | return ShiftExtend.Amount; |
719 | if (Kind == k_Register) |
720 | return Reg.ShiftExtend.Amount; |
721 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 721); |
722 | } |
723 | |
724 | bool hasShiftExtendAmount() const { |
725 | if (Kind == k_ShiftExtend) |
726 | return ShiftExtend.HasExplicitAmount; |
727 | if (Kind == k_Register) |
728 | return Reg.ShiftExtend.HasExplicitAmount; |
729 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 729); |
730 | } |
731 | |
732 | bool isImm() const override { return Kind == k_Immediate; } |
733 | bool isMem() const override { return false; } |
734 | |
735 | bool isUImm6() const { |
736 | if (!isImm()) |
737 | return false; |
738 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
739 | if (!MCE) |
740 | return false; |
741 | int64_t Val = MCE->getValue(); |
742 | return (Val >= 0 && Val < 64); |
743 | } |
744 | |
745 | template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } |
746 | |
747 | template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { |
748 | return isImmScaled<Bits, Scale>(true); |
749 | } |
750 | |
751 | template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const { |
752 | return isImmScaled<Bits, Scale>(false); |
753 | } |
754 | |
755 | template <int Bits, int Scale> |
756 | DiagnosticPredicate isImmScaled(bool Signed) const { |
757 | if (!isImm()) |
758 | return DiagnosticPredicateTy::NoMatch; |
759 | |
760 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
761 | if (!MCE) |
762 | return DiagnosticPredicateTy::NoMatch; |
763 | |
764 | int64_t MinVal, MaxVal; |
765 | if (Signed) { |
766 | int64_t Shift = Bits - 1; |
767 | MinVal = (int64_t(1) << Shift) * -Scale; |
768 | MaxVal = ((int64_t(1) << Shift) - 1) * Scale; |
769 | } else { |
770 | MinVal = 0; |
771 | MaxVal = ((int64_t(1) << Bits) - 1) * Scale; |
772 | } |
773 | |
774 | int64_t Val = MCE->getValue(); |
775 | if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) |
776 | return DiagnosticPredicateTy::Match; |
777 | |
778 | return DiagnosticPredicateTy::NearMatch; |
779 | } |
780 | |
781 | DiagnosticPredicate isSVEPattern() const { |
782 | if (!isImm()) |
783 | return DiagnosticPredicateTy::NoMatch; |
784 | auto *MCE = dyn_cast<MCConstantExpr>(getImm()); |
785 | if (!MCE) |
786 | return DiagnosticPredicateTy::NoMatch; |
787 | int64_t Val = MCE->getValue(); |
788 | if (Val >= 0 && Val < 32) |
789 | return DiagnosticPredicateTy::Match; |
790 | return DiagnosticPredicateTy::NearMatch; |
791 | } |
792 | |
793 | bool isSymbolicUImm12Offset(const MCExpr *Expr) const { |
794 | AArch64MCExpr::VariantKind ELFRefKind; |
795 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
796 | int64_t Addend; |
797 | if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, |
798 | Addend)) { |
799 | // If we don't understand the expression, assume the best and |
800 | // let the fixup and relocation code deal with it. |
801 | return true; |
802 | } |
803 | |
804 | if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
805 | ELFRefKind == AArch64MCExpr::VK_LO12 || |
806 | ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || |
807 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || |
808 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || |
809 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || |
810 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || |
811 | ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || |
812 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || |
813 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || |
814 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 || |
815 | ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) { |
816 | // Note that we don't range-check the addend. It's adjusted modulo page |
817 | // size when converted, so there is no "out of range" condition when using |
818 | // @pageoff. |
819 | return true; |
820 | } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || |
821 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { |
822 | // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. |
823 | return Addend == 0; |
824 | } |
825 | |
826 | return false; |
827 | } |
828 | |
829 | template <int Scale> bool isUImm12Offset() const { |
830 | if (!isImm()) |
831 | return false; |
832 | |
833 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
834 | if (!MCE) |
835 | return isSymbolicUImm12Offset(getImm()); |
836 | |
837 | int64_t Val = MCE->getValue(); |
838 | return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; |
839 | } |
840 | |
841 | template <int N, int M> |
842 | bool isImmInRange() const { |
843 | if (!isImm()) |
844 | return false; |
845 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
846 | if (!MCE) |
847 | return false; |
848 | int64_t Val = MCE->getValue(); |
849 | return (Val >= N && Val <= M); |
850 | } |
851 | |
852 | // NOTE: Also used for isLogicalImmNot as anything that can be represented as |
853 | // a logical immediate can always be represented when inverted. |
854 | template <typename T> |
855 | bool isLogicalImm() const { |
856 | if (!isImm()) |
857 | return false; |
858 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
859 | if (!MCE) |
860 | return false; |
861 | |
862 | int64_t Val = MCE->getValue(); |
863 | // Avoid left shift by 64 directly. |
864 | uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4); |
865 | // Allow all-0 or all-1 in top bits to permit bitwise NOT. |
866 | if ((Val & Upper) && (Val & Upper) != Upper) |
867 | return false; |
868 | |
869 | return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8); |
870 | } |
871 | |
872 | bool isShiftedImm() const { return Kind == k_ShiftedImm; } |
873 | |
874 | /// Returns the immediate value as a pair of (imm, shift) if the immediate is |
875 | /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted |
876 | /// immediate that can be shifted by 'Shift'. |
877 | template <unsigned Width> |
878 | Optional<std::pair<int64_t, unsigned> > getShiftedVal() const { |
879 | if (isShiftedImm() && Width == getShiftedImmShift()) |
880 | if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal())) |
881 | return std::make_pair(CE->getValue(), Width); |
882 | |
883 | if (isImm()) |
884 | if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) { |
885 | int64_t Val = CE->getValue(); |
886 | if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) |
887 | return std::make_pair(Val >> Width, Width); |
888 | else |
889 | return std::make_pair(Val, 0u); |
890 | } |
891 | |
892 | return {}; |
893 | } |
894 | |
895 | bool isAddSubImm() const { |
896 | if (!isShiftedImm() && !isImm()) |
897 | return false; |
898 | |
899 | const MCExpr *Expr; |
900 | |
901 | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. |
902 | if (isShiftedImm()) { |
903 | unsigned Shift = ShiftedImm.ShiftAmount; |
904 | Expr = ShiftedImm.Val; |
905 | if (Shift != 0 && Shift != 12) |
906 | return false; |
907 | } else { |
908 | Expr = getImm(); |
909 | } |
910 | |
911 | AArch64MCExpr::VariantKind ELFRefKind; |
912 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
913 | int64_t Addend; |
914 | if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, |
915 | DarwinRefKind, Addend)) { |
916 | return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF |
917 | || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF |
918 | || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) |
919 | || ELFRefKind == AArch64MCExpr::VK_LO12 |
920 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 |
921 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 |
922 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC |
923 | || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 |
924 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 |
925 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC |
926 | || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 |
927 | || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 |
928 | || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; |
929 | } |
930 | |
931 | // If it's a constant, it should be a real immediate in range. |
932 | if (auto ShiftedVal = getShiftedVal<12>()) |
933 | return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; |
934 | |
935 | // If it's an expression, we hope for the best and let the fixup/relocation |
936 | // code deal with it. |
937 | return true; |
938 | } |
939 | |
940 | bool isAddSubImmNeg() const { |
941 | if (!isShiftedImm() && !isImm()) |
942 | return false; |
943 | |
944 | // Otherwise it should be a real negative immediate in range. |
945 | if (auto ShiftedVal = getShiftedVal<12>()) |
946 | return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; |
947 | |
948 | return false; |
949 | } |
950 | |
951 | // Signed value in the range -128 to +127. For element widths of |
952 | // 16 bits or higher it may also be a signed multiple of 256 in the |
953 | // range -32768 to +32512. |
954 | // For element-width of 8 bits a range of -128 to 255 is accepted, |
955 | // since a copy of a byte can be either signed/unsigned. |
956 | template <typename T> |
957 | DiagnosticPredicate isSVECpyImm() const { |
958 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) |
959 | return DiagnosticPredicateTy::NoMatch; |
960 | |
961 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || |
962 | std::is_same<int8_t, T>::value; |
963 | if (auto ShiftedImm = getShiftedVal<8>()) |
964 | if (!(IsByte && ShiftedImm->second) && |
965 | AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) |
966 | << ShiftedImm->second)) |
967 | return DiagnosticPredicateTy::Match; |
968 | |
969 | return DiagnosticPredicateTy::NearMatch; |
970 | } |
971 | |
972 | // Unsigned value in the range 0 to 255. For element widths of |
973 | // 16 bits or higher it may also be a signed multiple of 256 in the |
974 | // range 0 to 65280. |
975 | template <typename T> DiagnosticPredicate isSVEAddSubImm() const { |
976 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) |
977 | return DiagnosticPredicateTy::NoMatch; |
978 | |
979 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || |
980 | std::is_same<int8_t, T>::value; |
981 | if (auto ShiftedImm = getShiftedVal<8>()) |
982 | if (!(IsByte && ShiftedImm->second) && |
983 | AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first |
984 | << ShiftedImm->second)) |
985 | return DiagnosticPredicateTy::Match; |
986 | |
987 | return DiagnosticPredicateTy::NearMatch; |
988 | } |
989 | |
990 | template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { |
991 | if (isLogicalImm<T>() && !isSVECpyImm<T>()) |
992 | return DiagnosticPredicateTy::Match; |
993 | return DiagnosticPredicateTy::NoMatch; |
994 | } |
995 | |
996 | bool isCondCode() const { return Kind == k_CondCode; } |
997 | |
998 | bool isSIMDImmType10() const { |
999 | if (!isImm()) |
1000 | return false; |
1001 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1002 | if (!MCE) |
1003 | return false; |
1004 | return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); |
1005 | } |
1006 | |
1007 | template<int N> |
1008 | bool isBranchTarget() const { |
1009 | if (!isImm()) |
1010 | return false; |
1011 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1012 | if (!MCE) |
1013 | return true; |
1014 | int64_t Val = MCE->getValue(); |
1015 | if (Val & 0x3) |
1016 | return false; |
1017 | assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!" ) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1017 , __extension__ __PRETTY_FUNCTION__)); |
1018 | return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2)); |
1019 | } |
1020 | |
1021 | bool |
1022 | isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { |
1023 | if (!isImm()) |
1024 | return false; |
1025 | |
1026 | AArch64MCExpr::VariantKind ELFRefKind; |
1027 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
1028 | int64_t Addend; |
1029 | if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind, |
1030 | DarwinRefKind, Addend)) { |
1031 | return false; |
1032 | } |
1033 | if (DarwinRefKind != MCSymbolRefExpr::VK_None) |
1034 | return false; |
1035 | |
1036 | return llvm::is_contained(AllowedModifiers, ELFRefKind); |
1037 | } |
1038 | |
1039 | bool isMovWSymbolG3() const { |
1040 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3}); |
1041 | } |
1042 | |
1043 | bool isMovWSymbolG2() const { |
1044 | return isMovWSymbol( |
1045 | {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, |
1046 | AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2, |
1047 | AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2, |
1048 | AArch64MCExpr::VK_DTPREL_G2}); |
1049 | } |
1050 | |
1051 | bool isMovWSymbolG1() const { |
1052 | return isMovWSymbol( |
1053 | {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, |
1054 | AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1, |
1055 | AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1, |
1056 | AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC, |
1057 | AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC}); |
1058 | } |
1059 | |
1060 | bool isMovWSymbolG0() const { |
1061 | return isMovWSymbol( |
1062 | {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, |
1063 | AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0, |
1064 | AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, |
1065 | AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC, |
1066 | AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC}); |
1067 | } |
1068 | |
1069 | template<int RegWidth, int Shift> |
1070 | bool isMOVZMovAlias() const { |
1071 | if (!isImm()) return false; |
1072 | |
1073 | const MCExpr *E = getImm(); |
1074 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) { |
1075 | uint64_t Value = CE->getValue(); |
1076 | |
1077 | return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); |
1078 | } |
1079 | // Only supports the case of Shift being 0 if an expression is used as an |
1080 | // operand |
1081 | return !Shift && E; |
1082 | } |
1083 | |
1084 | template<int RegWidth, int Shift> |
1085 | bool isMOVNMovAlias() const { |
1086 | if (!isImm()) return false; |
1087 | |
1088 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1089 | if (!CE) return false; |
1090 | uint64_t Value = CE->getValue(); |
1091 | |
1092 | return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); |
1093 | } |
1094 | |
1095 | bool isFPImm() const { |
1096 | return Kind == k_FPImm && |
1097 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1; |
1098 | } |
1099 | |
1100 | bool isBarrier() const { |
1101 | return Kind == k_Barrier && !getBarriernXSModifier(); |
1102 | } |
1103 | bool isBarriernXS() const { |
1104 | return Kind == k_Barrier && getBarriernXSModifier(); |
1105 | } |
1106 | bool isSysReg() const { return Kind == k_SysReg; } |
1107 | |
1108 | bool isMRSSystemRegister() const { |
1109 | if (!isSysReg()) return false; |
1110 | |
1111 | return SysReg.MRSReg != -1U; |
1112 | } |
1113 | |
1114 | bool isMSRSystemRegister() const { |
1115 | if (!isSysReg()) return false; |
1116 | return SysReg.MSRReg != -1U; |
1117 | } |
1118 | |
1119 | bool isSystemPStateFieldWithImm0_1() const { |
1120 | if (!isSysReg()) return false; |
1121 | return (SysReg.PStateField == AArch64PState::PAN || |
1122 | SysReg.PStateField == AArch64PState::DIT || |
1123 | SysReg.PStateField == AArch64PState::UAO || |
1124 | SysReg.PStateField == AArch64PState::SSBS); |
1125 | } |
1126 | |
1127 | bool isSystemPStateFieldWithImm0_15() const { |
1128 | if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false; |
1129 | return SysReg.PStateField != -1U; |
1130 | } |
1131 | |
1132 | bool isSVCR() const { |
1133 | if (Kind != k_SVCR) |
1134 | return false; |
1135 | return SVCR.PStateField != -1U; |
1136 | } |
1137 | |
1138 | bool isReg() const override { |
1139 | return Kind == k_Register; |
1140 | } |
1141 | |
1142 | bool isScalarReg() const { |
1143 | return Kind == k_Register && Reg.Kind == RegKind::Scalar; |
1144 | } |
1145 | |
1146 | bool isNeonVectorReg() const { |
1147 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector; |
1148 | } |
1149 | |
1150 | bool isNeonVectorRegLo() const { |
1151 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector && |
1152 | (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( |
1153 | Reg.RegNum) || |
1154 | AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains( |
1155 | Reg.RegNum)); |
1156 | } |
1157 | |
1158 | bool isMatrix() const { return Kind == k_MatrixRegister; } |
1159 | bool isMatrixTileList() const { return Kind == k_MatrixTileList; } |
1160 | |
1161 | template <unsigned Class> bool isSVEVectorReg() const { |
1162 | RegKind RK; |
1163 | switch (Class) { |
1164 | case AArch64::ZPRRegClassID: |
1165 | case AArch64::ZPR_3bRegClassID: |
1166 | case AArch64::ZPR_4bRegClassID: |
1167 | RK = RegKind::SVEDataVector; |
1168 | break; |
1169 | case AArch64::PPRRegClassID: |
1170 | case AArch64::PPR_3bRegClassID: |
1171 | RK = RegKind::SVEPredicateVector; |
1172 | break; |
1173 | default: |
1174 | llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1174 ); |
1175 | } |
1176 | |
1177 | return (Kind == k_Register && Reg.Kind == RK) && |
1178 | AArch64MCRegisterClasses[Class].contains(getReg()); |
1179 | } |
1180 | |
1181 | template <unsigned Class> bool isFPRasZPR() const { |
1182 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1183 | AArch64MCRegisterClasses[Class].contains(getReg()); |
1184 | } |
1185 | |
1186 | template <int ElementWidth, unsigned Class> |
1187 | DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { |
1188 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) |
1189 | return DiagnosticPredicateTy::NoMatch; |
1190 | |
1191 | if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth)) |
1192 | return DiagnosticPredicateTy::Match; |
1193 | |
1194 | return DiagnosticPredicateTy::NearMatch; |
1195 | } |
1196 | |
1197 | template <int ElementWidth, unsigned Class> |
1198 | DiagnosticPredicate isSVEDataVectorRegOfWidth() const { |
1199 | if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) |
1200 | return DiagnosticPredicateTy::NoMatch; |
1201 | |
1202 | if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth) |
1203 | return DiagnosticPredicateTy::Match; |
1204 | |
1205 | return DiagnosticPredicateTy::NearMatch; |
1206 | } |
1207 | |
1208 | template <int ElementWidth, unsigned Class, |
1209 | AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, |
1210 | bool ShiftWidthAlwaysSame> |
1211 | DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { |
1212 | auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); |
1213 | if (!VectorMatch.isMatch()) |
1214 | return DiagnosticPredicateTy::NoMatch; |
1215 | |
1216 | // Give a more specific diagnostic when the user has explicitly typed in |
1217 | // a shift-amount that does not match what is expected, but for which |
1218 | // there is also an unscaled addressing mode (e.g. sxtw/uxtw). |
1219 | bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); |
1220 | if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || |
1221 | ShiftExtendTy == AArch64_AM::SXTW) && |
1222 | !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) |
1223 | return DiagnosticPredicateTy::NoMatch; |
1224 | |
1225 | if (MatchShift && ShiftExtendTy == getShiftExtendType()) |
1226 | return DiagnosticPredicateTy::Match; |
1227 | |
1228 | return DiagnosticPredicateTy::NearMatch; |
1229 | } |
1230 | |
1231 | bool isGPR32as64() const { |
1232 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1233 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); |
1234 | } |
1235 | |
1236 | bool isGPR64as32() const { |
1237 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1238 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); |
1239 | } |
1240 | |
1241 | bool isGPR64x8() const { |
1242 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1243 | AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains( |
1244 | Reg.RegNum); |
1245 | } |
1246 | |
1247 | bool isWSeqPair() const { |
1248 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1249 | AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( |
1250 | Reg.RegNum); |
1251 | } |
1252 | |
1253 | bool isXSeqPair() const { |
1254 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1255 | AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( |
1256 | Reg.RegNum); |
1257 | } |
1258 | |
1259 | template<int64_t Angle, int64_t Remainder> |
1260 | DiagnosticPredicate isComplexRotation() const { |
1261 | if (!isImm()) return DiagnosticPredicateTy::NoMatch; |
1262 | |
1263 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1264 | if (!CE) return DiagnosticPredicateTy::NoMatch; |
1265 | uint64_t Value = CE->getValue(); |
1266 | |
1267 | if (Value % Angle == Remainder && Value <= 270) |
1268 | return DiagnosticPredicateTy::Match; |
1269 | return DiagnosticPredicateTy::NearMatch; |
1270 | } |
1271 | |
1272 | template <unsigned RegClassID> bool isGPR64() const { |
1273 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && |
1274 | AArch64MCRegisterClasses[RegClassID].contains(getReg()); |
1275 | } |
1276 | |
1277 | template <unsigned RegClassID, int ExtWidth> |
1278 | DiagnosticPredicate isGPR64WithShiftExtend() const { |
1279 | if (Kind != k_Register || Reg.Kind != RegKind::Scalar) |
1280 | return DiagnosticPredicateTy::NoMatch; |
1281 | |
1282 | if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && |
1283 | getShiftExtendAmount() == Log2_32(ExtWidth / 8)) |
1284 | return DiagnosticPredicateTy::Match; |
1285 | return DiagnosticPredicateTy::NearMatch; |
1286 | } |
1287 | |
1288 | /// Is this a vector list with the type implicit (presumably attached to the |
1289 | /// instruction itself)? |
1290 | template <RegKind VectorKind, unsigned NumRegs> |
1291 | bool isImplicitlyTypedVectorList() const { |
1292 | return Kind == k_VectorList && VectorList.Count == NumRegs && |
1293 | VectorList.NumElements == 0 && |
1294 | VectorList.RegisterKind == VectorKind; |
1295 | } |
1296 | |
1297 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, |
1298 | unsigned ElementWidth> |
1299 | bool isTypedVectorList() const { |
1300 | if (Kind != k_VectorList) |
1301 | return false; |
1302 | if (VectorList.Count != NumRegs) |
1303 | return false; |
1304 | if (VectorList.RegisterKind != VectorKind) |
1305 | return false; |
1306 | if (VectorList.ElementWidth != ElementWidth) |
1307 | return false; |
1308 | return VectorList.NumElements == NumElements; |
1309 | } |
1310 | |
1311 | template <int Min, int Max> |
1312 | DiagnosticPredicate isVectorIndex() const { |
1313 | if (Kind != k_VectorIndex) |
1314 | return DiagnosticPredicateTy::NoMatch; |
1315 | if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) |
1316 | return DiagnosticPredicateTy::Match; |
1317 | return DiagnosticPredicateTy::NearMatch; |
1318 | } |
1319 | |
1320 | bool isToken() const override { return Kind == k_Token; } |
1321 | |
1322 | bool isTokenEqual(StringRef Str) const { |
1323 | return Kind == k_Token && getToken() == Str; |
1324 | } |
1325 | bool isSysCR() const { return Kind == k_SysCR; } |
1326 | bool isPrefetch() const { return Kind == k_Prefetch; } |
1327 | bool isPSBHint() const { return Kind == k_PSBHint; } |
1328 | bool isBTIHint() const { return Kind == k_BTIHint; } |
1329 | bool isShiftExtend() const { return Kind == k_ShiftExtend; } |
1330 | bool isShifter() const { |
1331 | if (!isShiftExtend()) |
1332 | return false; |
1333 | |
1334 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1335 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1336 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || |
1337 | ST == AArch64_AM::MSL); |
1338 | } |
1339 | |
1340 | template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { |
1341 | if (Kind != k_FPImm) |
1342 | return DiagnosticPredicateTy::NoMatch; |
1343 | |
1344 | if (getFPImmIsExact()) { |
1345 | // Lookup the immediate from table of supported immediates. |
1346 | auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum); |
1347 | assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value" ) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1347 , __extension__ __PRETTY_FUNCTION__)); |
1348 | |
1349 | // Calculate its FP value. |
1350 | APFloat RealVal(APFloat::IEEEdouble()); |
1351 | auto StatusOrErr = |
1352 | RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero); |
1353 | if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK) |
1354 | llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1354 ); |
1355 | |
1356 | if (getFPImm().bitwiseIsEqual(RealVal)) |
1357 | return DiagnosticPredicateTy::Match; |
1358 | } |
1359 | |
1360 | return DiagnosticPredicateTy::NearMatch; |
1361 | } |
1362 | |
1363 | template <unsigned ImmA, unsigned ImmB> |
1364 | DiagnosticPredicate isExactFPImm() const { |
1365 | DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; |
1366 | if ((Res = isExactFPImm<ImmA>())) |
1367 | return DiagnosticPredicateTy::Match; |
1368 | if ((Res = isExactFPImm<ImmB>())) |
1369 | return DiagnosticPredicateTy::Match; |
1370 | return Res; |
1371 | } |
1372 | |
1373 | bool isExtend() const { |
1374 | if (!isShiftExtend()) |
1375 | return false; |
1376 | |
1377 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1378 | return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || |
1379 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || |
1380 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || |
1381 | ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || |
1382 | ET == AArch64_AM::LSL) && |
1383 | getShiftExtendAmount() <= 4; |
1384 | } |
1385 | |
1386 | bool isExtend64() const { |
1387 | if (!isExtend()) |
1388 | return false; |
1389 | // Make sure the extend expects a 32-bit source register. |
1390 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1391 | return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || |
1392 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || |
1393 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; |
1394 | } |
1395 | |
1396 | bool isExtendLSL64() const { |
1397 | if (!isExtend()) |
1398 | return false; |
1399 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1400 | return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || |
1401 | ET == AArch64_AM::LSL) && |
1402 | getShiftExtendAmount() <= 4; |
1403 | } |
1404 | |
1405 | template<int Width> bool isMemXExtend() const { |
1406 | if (!isExtend()) |
1407 | return false; |
1408 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1409 | return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && |
1410 | (getShiftExtendAmount() == Log2_32(Width / 8) || |
1411 | getShiftExtendAmount() == 0); |
1412 | } |
1413 | |
1414 | template<int Width> bool isMemWExtend() const { |
1415 | if (!isExtend()) |
1416 | return false; |
1417 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1418 | return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && |
1419 | (getShiftExtendAmount() == Log2_32(Width / 8) || |
1420 | getShiftExtendAmount() == 0); |
1421 | } |
1422 | |
1423 | template <unsigned width> |
1424 | bool isArithmeticShifter() const { |
1425 | if (!isShifter()) |
1426 | return false; |
1427 | |
1428 | // An arithmetic shifter is LSL, LSR, or ASR. |
1429 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1430 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1431 | ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; |
1432 | } |
1433 | |
1434 | template <unsigned width> |
1435 | bool isLogicalShifter() const { |
1436 | if (!isShifter()) |
1437 | return false; |
1438 | |
1439 | // A logical shifter is LSL, LSR, ASR or ROR. |
1440 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1441 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || |
1442 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && |
1443 | getShiftExtendAmount() < width; |
1444 | } |
1445 | |
1446 | bool isMovImm32Shifter() const { |
1447 | if (!isShifter()) |
1448 | return false; |
1449 | |
1450 | // A MOVi shifter is LSL of 0, 16, 32, or 48. |
1451 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1452 | if (ST != AArch64_AM::LSL) |
1453 | return false; |
1454 | uint64_t Val = getShiftExtendAmount(); |
1455 | return (Val == 0 || Val == 16); |
1456 | } |
1457 | |
1458 | bool isMovImm64Shifter() const { |
1459 | if (!isShifter()) |
1460 | return false; |
1461 | |
1462 | // A MOVi shifter is LSL of 0 or 16. |
1463 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); |
1464 | if (ST != AArch64_AM::LSL) |
1465 | return false; |
1466 | uint64_t Val = getShiftExtendAmount(); |
1467 | return (Val == 0 || Val == 16 || Val == 32 || Val == 48); |
1468 | } |
1469 | |
1470 | bool isLogicalVecShifter() const { |
1471 | if (!isShifter()) |
1472 | return false; |
1473 | |
1474 | // A logical vector shifter is a left shift by 0, 8, 16, or 24. |
1475 | unsigned Shift = getShiftExtendAmount(); |
1476 | return getShiftExtendType() == AArch64_AM::LSL && |
1477 | (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); |
1478 | } |
1479 | |
1480 | bool isLogicalVecHalfWordShifter() const { |
1481 | if (!isLogicalVecShifter()) |
1482 | return false; |
1483 | |
1484 | // A logical vector shifter is a left shift by 0 or 8. |
1485 | unsigned Shift = getShiftExtendAmount(); |
1486 | return getShiftExtendType() == AArch64_AM::LSL && |
1487 | (Shift == 0 || Shift == 8); |
1488 | } |
1489 | |
1490 | bool isMoveVecShifter() const { |
1491 | if (!isShiftExtend()) |
1492 | return false; |
1493 | |
1494 | // A logical vector shifter is a left shift by 8 or 16. |
1495 | unsigned Shift = getShiftExtendAmount(); |
1496 | return getShiftExtendType() == AArch64_AM::MSL && |
1497 | (Shift == 8 || Shift == 16); |
1498 | } |
1499 | |
1500 | // Fallback unscaled operands are for aliases of LDR/STR that fall back |
1501 | // to LDUR/STUR when the offset is not legal for the former but is for |
1502 | // the latter. As such, in addition to checking for being a legal unscaled |
1503 | // address, also check that it is not a legal scaled address. This avoids |
1504 | // ambiguity in the matcher. |
1505 | template<int Width> |
1506 | bool isSImm9OffsetFB() const { |
1507 | return isSImm<9>() && !isUImm12Offset<Width / 8>(); |
1508 | } |
1509 | |
1510 | bool isAdrpLabel() const { |
1511 | // Validation was handled during parsing, so we just verify that |
1512 | // something didn't go haywire. |
1513 | if (!isImm()) |
1514 | return false; |
1515 | |
1516 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
1517 | int64_t Val = CE->getValue(); |
1518 | int64_t Min = - (4096 * (1LL << (21 - 1))); |
1519 | int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); |
1520 | return (Val % 4096) == 0 && Val >= Min && Val <= Max; |
1521 | } |
1522 | |
1523 | return true; |
1524 | } |
1525 | |
1526 | bool isAdrLabel() const { |
1527 | // Validation was handled during parsing, so we just verify that |
1528 | // something didn't go haywire. |
1529 | if (!isImm()) |
1530 | return false; |
1531 | |
1532 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { |
1533 | int64_t Val = CE->getValue(); |
1534 | int64_t Min = - (1LL << (21 - 1)); |
1535 | int64_t Max = ((1LL << (21 - 1)) - 1); |
1536 | return Val >= Min && Val <= Max; |
1537 | } |
1538 | |
1539 | return true; |
1540 | } |
1541 | |
1542 | template <MatrixKind Kind, unsigned EltSize, unsigned RegClass> |
1543 | DiagnosticPredicate isMatrixRegOperand() const { |
1544 | if (!isMatrix()) |
1545 | return DiagnosticPredicateTy::NoMatch; |
1546 | if (getMatrixKind() != Kind || |
1547 | !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) || |
1548 | EltSize != getMatrixElementWidth()) |
1549 | return DiagnosticPredicateTy::NearMatch; |
1550 | return DiagnosticPredicateTy::Match; |
1551 | } |
1552 | |
1553 | void addExpr(MCInst &Inst, const MCExpr *Expr) const { |
1554 | // Add as immediates when possible. Null MCExpr = 0. |
1555 | if (!Expr) |
1556 | Inst.addOperand(MCOperand::createImm(0)); |
1557 | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) |
1558 | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
1559 | else |
1560 | Inst.addOperand(MCOperand::createExpr(Expr)); |
1561 | } |
1562 | |
1563 | void addRegOperands(MCInst &Inst, unsigned N) const { |
1564 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1564 , __extension__ __PRETTY_FUNCTION__)); |
1565 | Inst.addOperand(MCOperand::createReg(getReg())); |
1566 | } |
1567 | |
1568 | void addMatrixOperands(MCInst &Inst, unsigned N) const { |
1569 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1569 , __extension__ __PRETTY_FUNCTION__)); |
1570 | Inst.addOperand(MCOperand::createReg(getMatrixReg())); |
1571 | } |
1572 | |
1573 | void addGPR32as64Operands(MCInst &Inst, unsigned N) const { |
1574 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1574 , __extension__ __PRETTY_FUNCTION__)); |
1575 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1576 , __extension__ __PRETTY_FUNCTION__)) |
1576 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1576 , __extension__ __PRETTY_FUNCTION__)); |
1577 | |
1578 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
1579 | uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister( |
1580 | RI->getEncodingValue(getReg())); |
1581 | |
1582 | Inst.addOperand(MCOperand::createReg(Reg)); |
1583 | } |
1584 | |
1585 | void addGPR64as32Operands(MCInst &Inst, unsigned N) const { |
1586 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1586 , __extension__ __PRETTY_FUNCTION__)); |
1587 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1588 , __extension__ __PRETTY_FUNCTION__)) |
1588 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1588 , __extension__ __PRETTY_FUNCTION__)); |
1589 | |
1590 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); |
1591 | uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( |
1592 | RI->getEncodingValue(getReg())); |
1593 | |
1594 | Inst.addOperand(MCOperand::createReg(Reg)); |
1595 | } |
1596 | |
1597 | template <int Width> |
1598 | void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { |
1599 | unsigned Base; |
1600 | switch (Width) { |
1601 | case 8: Base = AArch64::B0; break; |
1602 | case 16: Base = AArch64::H0; break; |
1603 | case 32: Base = AArch64::S0; break; |
1604 | case 64: Base = AArch64::D0; break; |
1605 | case 128: Base = AArch64::Q0; break; |
1606 | default: |
1607 | llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1607); |
1608 | } |
1609 | Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base)); |
1610 | } |
1611 | |
1612 | void addVectorReg64Operands(MCInst &Inst, unsigned N) const { |
1613 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1613 , __extension__ __PRETTY_FUNCTION__)); |
1614 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1615 , __extension__ __PRETTY_FUNCTION__)) |
1615 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1615 , __extension__ __PRETTY_FUNCTION__)); |
1616 | Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0)); |
1617 | } |
1618 | |
1619 | void addVectorReg128Operands(MCInst &Inst, unsigned N) const { |
1620 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1620 , __extension__ __PRETTY_FUNCTION__)); |
1621 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1622 , __extension__ __PRETTY_FUNCTION__)) |
1622 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1622 , __extension__ __PRETTY_FUNCTION__)); |
1623 | Inst.addOperand(MCOperand::createReg(getReg())); |
1624 | } |
1625 | |
1626 | void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { |
1627 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1627 , __extension__ __PRETTY_FUNCTION__)); |
1628 | Inst.addOperand(MCOperand::createReg(getReg())); |
1629 | } |
1630 | |
1631 | enum VecListIndexType { |
1632 | VecListIdx_DReg = 0, |
1633 | VecListIdx_QReg = 1, |
1634 | VecListIdx_ZReg = 2, |
1635 | }; |
1636 | |
1637 | template <VecListIndexType RegTy, unsigned NumRegs> |
1638 | void addVectorListOperands(MCInst &Inst, unsigned N) const { |
1639 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1639 , __extension__ __PRETTY_FUNCTION__)); |
1640 | static const unsigned FirstRegs[][5] = { |
1641 | /* DReg */ { AArch64::Q0, |
1642 | AArch64::D0, AArch64::D0_D1, |
1643 | AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, |
1644 | /* QReg */ { AArch64::Q0, |
1645 | AArch64::Q0, AArch64::Q0_Q1, |
1646 | AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, |
1647 | /* ZReg */ { AArch64::Z0, |
1648 | AArch64::Z0, AArch64::Z0_Z1, |
1649 | AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 } |
1650 | }; |
1651 | |
1652 | assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1653 , __extension__ __PRETTY_FUNCTION__)) |
1653 | " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1653 , __extension__ __PRETTY_FUNCTION__)); |
1654 | |
1655 | unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; |
1656 | Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() - |
1657 | FirstRegs[(unsigned)RegTy][0])); |
1658 | } |
1659 | |
1660 | void addMatrixTileListOperands(MCInst &Inst, unsigned N) const { |
1661 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1661 , __extension__ __PRETTY_FUNCTION__)); |
1662 | unsigned RegMask = getMatrixTileListRegMask(); |
1663 | assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!" ) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1663 , __extension__ __PRETTY_FUNCTION__)); |
1664 | Inst.addOperand(MCOperand::createImm(RegMask)); |
1665 | } |
1666 | |
1667 | void addVectorIndexOperands(MCInst &Inst, unsigned N) const { |
1668 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1668 , __extension__ __PRETTY_FUNCTION__)); |
1669 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); |
1670 | } |
1671 | |
1672 | template <unsigned ImmIs0, unsigned ImmIs1> |
1673 | void addExactFPImmOperands(MCInst &Inst, unsigned N) const { |
1674 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1674 , __extension__ __PRETTY_FUNCTION__)); |
1675 | assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1 >()) && "Invalid operand") ? void (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1675 , __extension__ __PRETTY_FUNCTION__)); |
1676 | Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>()))); |
1677 | } |
1678 | |
1679 | void addImmOperands(MCInst &Inst, unsigned N) const { |
1680 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1680 , __extension__ __PRETTY_FUNCTION__)); |
1681 | // If this is a pageoff symrefexpr with an addend, adjust the addend |
1682 | // to be only the page-offset portion. Otherwise, just add the expr |
1683 | // as-is. |
1684 | addExpr(Inst, getImm()); |
1685 | } |
1686 | |
1687 | template <int Shift> |
1688 | void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { |
1689 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1689 , __extension__ __PRETTY_FUNCTION__)); |
1690 | if (auto ShiftedVal = getShiftedVal<Shift>()) { |
1691 | Inst.addOperand(MCOperand::createImm(ShiftedVal->first)); |
1692 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); |
1693 | } else if (isShiftedImm()) { |
1694 | addExpr(Inst, getShiftedImmVal()); |
1695 | Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); |
1696 | } else { |
1697 | addExpr(Inst, getImm()); |
1698 | Inst.addOperand(MCOperand::createImm(0)); |
1699 | } |
1700 | } |
1701 | |
1702 | template <int Shift> |
1703 | void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { |
1704 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1704 , __extension__ __PRETTY_FUNCTION__)); |
1705 | if (auto ShiftedVal = getShiftedVal<Shift>()) { |
1706 | Inst.addOperand(MCOperand::createImm(-ShiftedVal->first)); |
1707 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); |
1708 | } else |
1709 | llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1709 ); |
1710 | } |
1711 | |
1712 | void addCondCodeOperands(MCInst &Inst, unsigned N) const { |
1713 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1713 , __extension__ __PRETTY_FUNCTION__)); |
1714 | Inst.addOperand(MCOperand::createImm(getCondCode())); |
1715 | } |
1716 | |
1717 | void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { |
1718 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1718 , __extension__ __PRETTY_FUNCTION__)); |
1719 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1720 | if (!MCE) |
1721 | addExpr(Inst, getImm()); |
1722 | else |
1723 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12)); |
1724 | } |
1725 | |
1726 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { |
1727 | addImmOperands(Inst, N); |
1728 | } |
1729 | |
1730 | template<int Scale> |
1731 | void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { |
1732 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1732 , __extension__ __PRETTY_FUNCTION__)); |
1733 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1734 | |
1735 | if (!MCE) { |
1736 | Inst.addOperand(MCOperand::createExpr(getImm())); |
1737 | return; |
1738 | } |
1739 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); |
1740 | } |
1741 | |
1742 | void addUImm6Operands(MCInst &Inst, unsigned N) const { |
1743 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1743 , __extension__ __PRETTY_FUNCTION__)); |
1744 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1745 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); |
1746 | } |
1747 | |
1748 | template <int Scale> |
1749 | void addImmScaledOperands(MCInst &Inst, unsigned N) const { |
1750 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1750 , __extension__ __PRETTY_FUNCTION__)); |
1751 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1752 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); |
1753 | } |
1754 | |
1755 | template <typename T> |
1756 | void addLogicalImmOperands(MCInst &Inst, unsigned N) const { |
1757 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1757 , __extension__ __PRETTY_FUNCTION__)); |
1758 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1759 | std::make_unsigned_t<T> Val = MCE->getValue(); |
1760 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); |
1761 | Inst.addOperand(MCOperand::createImm(encoding)); |
1762 | } |
1763 | |
1764 | template <typename T> |
1765 | void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { |
1766 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1766 , __extension__ __PRETTY_FUNCTION__)); |
1767 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1768 | std::make_unsigned_t<T> Val = ~MCE->getValue(); |
1769 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); |
1770 | Inst.addOperand(MCOperand::createImm(encoding)); |
1771 | } |
1772 | |
1773 | void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { |
1774 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1774 , __extension__ __PRETTY_FUNCTION__)); |
1775 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1776 | uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); |
1777 | Inst.addOperand(MCOperand::createImm(encoding)); |
1778 | } |
1779 | |
1780 | void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { |
1781 | // Branch operands don't encode the low bits, so shift them off |
1782 | // here. If it's a label, however, just put it on directly as there's |
1783 | // not enough information now to do anything. |
1784 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1784 , __extension__ __PRETTY_FUNCTION__)); |
1785 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1786 | if (!MCE) { |
1787 | addExpr(Inst, getImm()); |
1788 | return; |
1789 | } |
1790 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1790 , __extension__ __PRETTY_FUNCTION__)); |
1791 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); |
1792 | } |
1793 | |
1794 | void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { |
1795 | // Branch operands don't encode the low bits, so shift them off |
1796 | // here. If it's a label, however, just put it on directly as there's |
1797 | // not enough information now to do anything. |
1798 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1798 , __extension__ __PRETTY_FUNCTION__)); |
1799 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1800 | if (!MCE) { |
1801 | addExpr(Inst, getImm()); |
1802 | return; |
1803 | } |
1804 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1804 , __extension__ __PRETTY_FUNCTION__)); |
1805 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); |
1806 | } |
1807 | |
1808 | void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { |
1809 | // Branch operands don't encode the low bits, so shift them off |
1810 | // here. If it's a label, however, just put it on directly as there's |
1811 | // not enough information now to do anything. |
1812 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1812 , __extension__ __PRETTY_FUNCTION__)); |
1813 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); |
1814 | if (!MCE) { |
1815 | addExpr(Inst, getImm()); |
1816 | return; |
1817 | } |
1818 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1818 , __extension__ __PRETTY_FUNCTION__)); |
1819 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); |
1820 | } |
1821 | |
1822 | void addFPImmOperands(MCInst &Inst, unsigned N) const { |
1823 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1823 , __extension__ __PRETTY_FUNCTION__)); |
1824 | Inst.addOperand(MCOperand::createImm( |
1825 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()))); |
1826 | } |
1827 | |
1828 | void addBarrierOperands(MCInst &Inst, unsigned N) const { |
1829 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1829 , __extension__ __PRETTY_FUNCTION__)); |
1830 | Inst.addOperand(MCOperand::createImm(getBarrier())); |
1831 | } |
1832 | |
1833 | void addBarriernXSOperands(MCInst &Inst, unsigned N) const { |
1834 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1834 , __extension__ __PRETTY_FUNCTION__)); |
1835 | Inst.addOperand(MCOperand::createImm(getBarrier())); |
1836 | } |
1837 | |
1838 | void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
1839 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1839 , __extension__ __PRETTY_FUNCTION__)); |
1840 | |
1841 | Inst.addOperand(MCOperand::createImm(SysReg.MRSReg)); |
1842 | } |
1843 | |
1844 | void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { |
1845 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1845 , __extension__ __PRETTY_FUNCTION__)); |
1846 | |
1847 | Inst.addOperand(MCOperand::createImm(SysReg.MSRReg)); |
1848 | } |
1849 | |
1850 | void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { |
1851 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1851 , __extension__ __PRETTY_FUNCTION__)); |
1852 | |
1853 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); |
1854 | } |
1855 | |
1856 | void addSVCROperands(MCInst &Inst, unsigned N) const { |
1857 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1857 , __extension__ __PRETTY_FUNCTION__)); |
1858 | |
1859 | Inst.addOperand(MCOperand::createImm(SVCR.PStateField)); |
1860 | } |
1861 | |
1862 | void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { |
1863 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1863 , __extension__ __PRETTY_FUNCTION__)); |
1864 | |
1865 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); |
1866 | } |
1867 | |
1868 | void addSysCROperands(MCInst &Inst, unsigned N) const { |
1869 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1869 , __extension__ __PRETTY_FUNCTION__)); |
1870 | Inst.addOperand(MCOperand::createImm(getSysCR())); |
1871 | } |
1872 | |
1873 | void addPrefetchOperands(MCInst &Inst, unsigned N) const { |
1874 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1874 , __extension__ __PRETTY_FUNCTION__)); |
1875 | Inst.addOperand(MCOperand::createImm(getPrefetch())); |
1876 | } |
1877 | |
1878 | void addPSBHintOperands(MCInst &Inst, unsigned N) const { |
1879 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1879 , __extension__ __PRETTY_FUNCTION__)); |
1880 | Inst.addOperand(MCOperand::createImm(getPSBHint())); |
1881 | } |
1882 | |
1883 | void addBTIHintOperands(MCInst &Inst, unsigned N) const { |
1884 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1884 , __extension__ __PRETTY_FUNCTION__)); |
1885 | Inst.addOperand(MCOperand::createImm(getBTIHint())); |
1886 | } |
1887 | |
1888 | void addShifterOperands(MCInst &Inst, unsigned N) const { |
1889 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1889 , __extension__ __PRETTY_FUNCTION__)); |
1890 | unsigned Imm = |
1891 | AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount()); |
1892 | Inst.addOperand(MCOperand::createImm(Imm)); |
1893 | } |
1894 | |
1895 | void addExtendOperands(MCInst &Inst, unsigned N) const { |
1896 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1896 , __extension__ __PRETTY_FUNCTION__)); |
1897 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1898 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; |
1899 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); |
1900 | Inst.addOperand(MCOperand::createImm(Imm)); |
1901 | } |
1902 | |
1903 | void addExtend64Operands(MCInst &Inst, unsigned N) const { |
1904 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1904 , __extension__ __PRETTY_FUNCTION__)); |
1905 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1906 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; |
1907 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); |
1908 | Inst.addOperand(MCOperand::createImm(Imm)); |
1909 | } |
1910 | |
1911 | void addMemExtendOperands(MCInst &Inst, unsigned N) const { |
1912 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1912 , __extension__ __PRETTY_FUNCTION__)); |
1913 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1914 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; |
1915 | Inst.addOperand(MCOperand::createImm(IsSigned)); |
1916 | Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0)); |
1917 | } |
1918 | |
1919 | // For 8-bit load/store instructions with a register offset, both the |
1920 | // "DoShift" and "NoShift" variants have a shift of 0. Because of this, |
1921 | // they're disambiguated by whether the shift was explicit or implicit rather |
1922 | // than its size. |
1923 | void addMemExtend8Operands(MCInst &Inst, unsigned N) const { |
1924 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1924 , __extension__ __PRETTY_FUNCTION__)); |
1925 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); |
1926 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; |
1927 | Inst.addOperand(MCOperand::createImm(IsSigned)); |
1928 | Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount())); |
1929 | } |
1930 | |
1931 | template<int Shift> |
1932 | void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { |
1933 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1933 , __extension__ __PRETTY_FUNCTION__)); |
1934 | |
1935 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
1936 | if (CE) { |
1937 | uint64_t Value = CE->getValue(); |
1938 | Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); |
1939 | } else { |
1940 | addExpr(Inst, getImm()); |
1941 | } |
1942 | } |
1943 | |
1944 | template<int Shift> |
1945 | void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { |
1946 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1946 , __extension__ __PRETTY_FUNCTION__)); |
1947 | |
1948 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); |
1949 | uint64_t Value = CE->getValue(); |
1950 | Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); |
1951 | } |
1952 | |
1953 | void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { |
1954 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1954 , __extension__ __PRETTY_FUNCTION__)); |
1955 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1956 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90)); |
1957 | } |
1958 | |
1959 | void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { |
1960 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1960 , __extension__ __PRETTY_FUNCTION__)); |
1961 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); |
1962 | Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180)); |
1963 | } |
1964 | |
1965 | void print(raw_ostream &OS) const override; |
1966 | |
1967 | static std::unique_ptr<AArch64Operand> |
1968 | CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) { |
1969 | auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx); |
1970 | Op->Tok.Data = Str.data(); |
1971 | Op->Tok.Length = Str.size(); |
1972 | Op->Tok.IsSuffix = IsSuffix; |
1973 | Op->StartLoc = S; |
1974 | Op->EndLoc = S; |
1975 | return Op; |
1976 | } |
1977 | |
1978 | static std::unique_ptr<AArch64Operand> |
1979 | CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, |
1980 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, |
1981 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, |
1982 | unsigned ShiftAmount = 0, |
1983 | unsigned HasExplicitAmount = false) { |
1984 | auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx); |
1985 | Op->Reg.RegNum = RegNum; |
1986 | Op->Reg.Kind = Kind; |
1987 | Op->Reg.ElementWidth = 0; |
1988 | Op->Reg.EqualityTy = EqTy; |
1989 | Op->Reg.ShiftExtend.Type = ExtTy; |
1990 | Op->Reg.ShiftExtend.Amount = ShiftAmount; |
1991 | Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; |
1992 | Op->StartLoc = S; |
1993 | Op->EndLoc = E; |
1994 | return Op; |
1995 | } |
1996 | |
1997 | static std::unique_ptr<AArch64Operand> |
1998 | CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, |
1999 | SMLoc S, SMLoc E, MCContext &Ctx, |
2000 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, |
2001 | unsigned ShiftAmount = 0, |
2002 | unsigned HasExplicitAmount = false) { |
2003 | assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector ) && "Invalid vector kind") ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2005 , __extension__ __PRETTY_FUNCTION__)) |
2004 | Kind == RegKind::SVEPredicateVector) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector ) && "Invalid vector kind") ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2005 , __extension__ __PRETTY_FUNCTION__)) |
2005 | "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector ) && "Invalid vector kind") ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2005 , __extension__ __PRETTY_FUNCTION__)); |
2006 | auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, |
2007 | HasExplicitAmount); |
2008 | Op->Reg.ElementWidth = ElementWidth; |
2009 | return Op; |
2010 | } |
2011 | |
2012 | static std::unique_ptr<AArch64Operand> |
2013 | CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements, |
2014 | unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E, |
2015 | MCContext &Ctx) { |
2016 | auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx); |
2017 | Op->VectorList.RegNum = RegNum; |
2018 | Op->VectorList.Count = Count; |
2019 | Op->VectorList.NumElements = NumElements; |
2020 | Op->VectorList.ElementWidth = ElementWidth; |
2021 | Op->VectorList.RegisterKind = RegisterKind; |
2022 | Op->StartLoc = S; |
2023 | Op->EndLoc = E; |
2024 | return Op; |
2025 | } |
2026 | |
2027 | static std::unique_ptr<AArch64Operand> |
2028 | CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) { |
2029 | auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx); |
2030 | Op->VectorIndex.Val = Idx; |
2031 | Op->StartLoc = S; |
2032 | Op->EndLoc = E; |
2033 | return Op; |
2034 | } |
2035 | |
2036 | static std::unique_ptr<AArch64Operand> |
2037 | CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) { |
2038 | auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx); |
2039 | Op->MatrixTileList.RegMask = RegMask; |
2040 | Op->StartLoc = S; |
2041 | Op->EndLoc = E; |
2042 | return Op; |
2043 | } |
2044 | |
2045 | static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs, |
2046 | const unsigned ElementWidth) { |
2047 | static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>> |
2048 | RegMap = { |
2049 | {{0, AArch64::ZAB0}, |
2050 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, |
2051 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, |
2052 | {{8, AArch64::ZAB0}, |
2053 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, |
2054 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, |
2055 | {{16, AArch64::ZAH0}, |
2056 | {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}}, |
2057 | {{16, AArch64::ZAH1}, |
2058 | {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}}, |
2059 | {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}}, |
2060 | {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}}, |
2061 | {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}}, |
2062 | {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}}, |
2063 | }; |
2064 | |
2065 | if (ElementWidth == 64) |
2066 | OutRegs.insert(Reg); |
2067 | else { |
2068 | std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)]; |
2069 | assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!" ) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2069 , __extension__ __PRETTY_FUNCTION__)); |
2070 | for (auto OutReg : Regs) |
2071 | OutRegs.insert(OutReg); |
2072 | } |
2073 | } |
2074 | |
2075 | static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, |
2076 | SMLoc E, MCContext &Ctx) { |
2077 | auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx); |
2078 | Op->Imm.Val = Val; |
2079 | Op->StartLoc = S; |
2080 | Op->EndLoc = E; |
2081 | return Op; |
2082 | } |
2083 | |
2084 | static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, |
2085 | unsigned ShiftAmount, |
2086 | SMLoc S, SMLoc E, |
2087 | MCContext &Ctx) { |
2088 | auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx); |
2089 | Op->ShiftedImm .Val = Val; |
2090 | Op->ShiftedImm.ShiftAmount = ShiftAmount; |
2091 | Op->StartLoc = S; |
2092 | Op->EndLoc = E; |
2093 | return Op; |
2094 | } |
2095 | |
2096 | static std::unique_ptr<AArch64Operand> |
2097 | CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { |
2098 | auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx); |
2099 | Op->CondCode.Code = Code; |
2100 | Op->StartLoc = S; |
2101 | Op->EndLoc = E; |
2102 | return Op; |
2103 | } |
2104 | |
2105 | static std::unique_ptr<AArch64Operand> |
2106 | CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { |
2107 | auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx); |
2108 | Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); |
2109 | Op->FPImm.IsExact = IsExact; |
2110 | Op->StartLoc = S; |
2111 | Op->EndLoc = S; |
2112 | return Op; |
2113 | } |
2114 | |
2115 | static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, |
2116 | StringRef Str, |
2117 | SMLoc S, |
2118 | MCContext &Ctx, |
2119 | bool HasnXSModifier) { |
2120 | auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx); |
2121 | Op->Barrier.Val = Val; |
2122 | Op->Barrier.Data = Str.data(); |
2123 | Op->Barrier.Length = Str.size(); |
2124 | Op->Barrier.HasnXSModifier = HasnXSModifier; |
2125 | Op->StartLoc = S; |
2126 | Op->EndLoc = S; |
2127 | return Op; |
2128 | } |
2129 | |
2130 | static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, |
2131 | uint32_t MRSReg, |
2132 | uint32_t MSRReg, |
2133 | uint32_t PStateField, |
2134 | MCContext &Ctx) { |
2135 | auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx); |
2136 | Op->SysReg.Data = Str.data(); |
2137 | Op->SysReg.Length = Str.size(); |
2138 | Op->SysReg.MRSReg = MRSReg; |
2139 | Op->SysReg.MSRReg = MSRReg; |
2140 | Op->SysReg.PStateField = PStateField; |
2141 | Op->StartLoc = S; |
2142 | Op->EndLoc = S; |
2143 | return Op; |
2144 | } |
2145 | |
2146 | static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, |
2147 | SMLoc E, MCContext &Ctx) { |
2148 | auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx); |
2149 | Op->SysCRImm.Val = Val; |
2150 | Op->StartLoc = S; |
2151 | Op->EndLoc = E; |
2152 | return Op; |
2153 | } |
2154 | |
2155 | static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, |
2156 | StringRef Str, |
2157 | SMLoc S, |
2158 | MCContext &Ctx) { |
2159 | auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx); |
2160 | Op->Prefetch.Val = Val; |
2161 | Op->Barrier.Data = Str.data(); |
2162 | Op->Barrier.Length = Str.size(); |
2163 | Op->StartLoc = S; |
2164 | Op->EndLoc = S; |
2165 | return Op; |
2166 | } |
2167 | |
2168 | static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, |
2169 | StringRef Str, |
2170 | SMLoc S, |
2171 | MCContext &Ctx) { |
2172 | auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx); |
2173 | Op->PSBHint.Val = Val; |
2174 | Op->PSBHint.Data = Str.data(); |
2175 | Op->PSBHint.Length = Str.size(); |
2176 | Op->StartLoc = S; |
2177 | Op->EndLoc = S; |
2178 | return Op; |
2179 | } |
2180 | |
2181 | static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val, |
2182 | StringRef Str, |
2183 | SMLoc S, |
2184 | MCContext &Ctx) { |
2185 | auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx); |
2186 | Op->BTIHint.Val = Val | 32; |
2187 | Op->BTIHint.Data = Str.data(); |
2188 | Op->BTIHint.Length = Str.size(); |
2189 | Op->StartLoc = S; |
2190 | Op->EndLoc = S; |
2191 | return Op; |
2192 | } |
2193 | |
2194 | static std::unique_ptr<AArch64Operand> |
2195 | CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind, |
2196 | SMLoc S, SMLoc E, MCContext &Ctx) { |
2197 | auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx); |
2198 | Op->MatrixReg.RegNum = RegNum; |
2199 | Op->MatrixReg.ElementWidth = ElementWidth; |
2200 | Op->MatrixReg.Kind = Kind; |
2201 | Op->StartLoc = S; |
2202 | Op->EndLoc = E; |
2203 | return Op; |
2204 | } |
2205 | |
2206 | static std::unique_ptr<AArch64Operand> |
2207 | CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) { |
2208 | auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx); |
2209 | Op->SVCR.PStateField = PStateField; |
2210 | Op->SVCR.Data = Str.data(); |
2211 | Op->SVCR.Length = Str.size(); |
2212 | Op->StartLoc = S; |
2213 | Op->EndLoc = S; |
2214 | return Op; |
2215 | } |
2216 | |
2217 | static std::unique_ptr<AArch64Operand> |
2218 | CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, |
2219 | bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { |
2220 | auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx); |
2221 | Op->ShiftExtend.Type = ShOp; |
2222 | Op->ShiftExtend.Amount = Val; |
2223 | Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; |
2224 | Op->StartLoc = S; |
2225 | Op->EndLoc = E; |
2226 | return Op; |
2227 | } |
2228 | }; |
2229 | |
2230 | } // end anonymous namespace. |
2231 | |
2232 | void AArch64Operand::print(raw_ostream &OS) const { |
2233 | switch (Kind) { |
2234 | case k_FPImm: |
2235 | OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); |
2236 | if (!getFPImmIsExact()) |
2237 | OS << " (inexact)"; |
2238 | OS << ">"; |
2239 | break; |
2240 | case k_Barrier: { |
2241 | StringRef Name = getBarrierName(); |
2242 | if (!Name.empty()) |
2243 | OS << "<barrier " << Name << ">"; |
2244 | else |
2245 | OS << "<barrier invalid #" << getBarrier() << ">"; |
2246 | break; |
2247 | } |
2248 | case k_Immediate: |
2249 | OS << *getImm(); |
2250 | break; |
2251 | case k_ShiftedImm: { |
2252 | unsigned Shift = getShiftedImmShift(); |
2253 | OS << "<shiftedimm "; |
2254 | OS << *getShiftedImmVal(); |
2255 | OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">"; |
2256 | break; |
2257 | } |
2258 | case k_CondCode: |
2259 | OS << "<condcode " << getCondCode() << ">"; |
2260 | break; |
2261 | case k_VectorList: { |
2262 | OS << "<vectorlist "; |
2263 | unsigned Reg = getVectorListStart(); |
2264 | for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) |
2265 | OS << Reg + i << " "; |
2266 | OS << ">"; |
2267 | break; |
2268 | } |
2269 | case k_VectorIndex: |
2270 | OS << "<vectorindex " << getVectorIndex() << ">"; |
2271 | break; |
2272 | case k_SysReg: |
2273 | OS << "<sysreg: " << getSysReg() << '>'; |
2274 | break; |
2275 | case k_Token: |
2276 | OS << "'" << getToken() << "'"; |
2277 | break; |
2278 | case k_SysCR: |
2279 | OS << "c" << getSysCR(); |
2280 | break; |
2281 | case k_Prefetch: { |
2282 | StringRef Name = getPrefetchName(); |
2283 | if (!Name.empty()) |
2284 | OS << "<prfop " << Name << ">"; |
2285 | else |
2286 | OS << "<prfop invalid #" << getPrefetch() << ">"; |
2287 | break; |
2288 | } |
2289 | case k_PSBHint: |
2290 | OS << getPSBHintName(); |
2291 | break; |
2292 | case k_BTIHint: |
2293 | OS << getBTIHintName(); |
2294 | break; |
2295 | case k_MatrixRegister: |
2296 | OS << "<matrix " << getMatrixReg() << ">"; |
2297 | break; |
2298 | case k_MatrixTileList: { |
2299 | OS << "<matrixlist "; |
2300 | unsigned RegMask = getMatrixTileListRegMask(); |
2301 | unsigned MaxBits = 8; |
2302 | for (unsigned I = MaxBits; I > 0; --I) |
2303 | OS << ((RegMask & (1 << (I - 1))) >> (I - 1)); |
2304 | OS << '>'; |
2305 | break; |
2306 | } |
2307 | case k_SVCR: { |
2308 | OS << getSVCR(); |
2309 | break; |
2310 | } |
2311 | case k_Register: |
2312 | OS << "<register " << getReg() << ">"; |
2313 | if (!getShiftExtendAmount() && !hasShiftExtendAmount()) |
2314 | break; |
2315 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
2316 | case k_ShiftExtend: |
2317 | OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" |
2318 | << getShiftExtendAmount(); |
2319 | if (!hasShiftExtendAmount()) |
2320 | OS << "<imp>"; |
2321 | OS << '>'; |
2322 | break; |
2323 | } |
2324 | } |
2325 | |
2326 | /// @name Auto-generated Match Functions |
2327 | /// { |
2328 | |
2329 | static unsigned MatchRegisterName(StringRef Name); |
2330 | |
2331 | /// } |
2332 | |
2333 | static unsigned MatchNeonVectorRegName(StringRef Name) { |
2334 | return StringSwitch<unsigned>(Name.lower()) |
2335 | .Case("v0", AArch64::Q0) |
2336 | .Case("v1", AArch64::Q1) |
2337 | .Case("v2", AArch64::Q2) |
2338 | .Case("v3", AArch64::Q3) |
2339 | .Case("v4", AArch64::Q4) |
2340 | .Case("v5", AArch64::Q5) |
2341 | .Case("v6", AArch64::Q6) |
2342 | .Case("v7", AArch64::Q7) |
2343 | .Case("v8", AArch64::Q8) |
2344 | .Case("v9", AArch64::Q9) |
2345 | .Case("v10", AArch64::Q10) |
2346 | .Case("v11", AArch64::Q11) |
2347 | .Case("v12", AArch64::Q12) |
2348 | .Case("v13", AArch64::Q13) |
2349 | .Case("v14", AArch64::Q14) |
2350 | .Case("v15", AArch64::Q15) |
2351 | .Case("v16", AArch64::Q16) |
2352 | .Case("v17", AArch64::Q17) |
2353 | .Case("v18", AArch64::Q18) |
2354 | .Case("v19", AArch64::Q19) |
2355 | .Case("v20", AArch64::Q20) |
2356 | .Case("v21", AArch64::Q21) |
2357 | .Case("v22", AArch64::Q22) |
2358 | .Case("v23", AArch64::Q23) |
2359 | .Case("v24", AArch64::Q24) |
2360 | .Case("v25", AArch64::Q25) |
2361 | .Case("v26", AArch64::Q26) |
2362 | .Case("v27", AArch64::Q27) |
2363 | .Case("v28", AArch64::Q28) |
2364 | .Case("v29", AArch64::Q29) |
2365 | .Case("v30", AArch64::Q30) |
2366 | .Case("v31", AArch64::Q31) |
2367 | .Default(0); |
2368 | } |
2369 | |
2370 | /// Returns an optional pair of (#elements, element-width) if Suffix |
2371 | /// is a valid vector kind. Where the number of elements in a vector |
2372 | /// or the vector width is implicit or explicitly unknown (but still a |
2373 | /// valid suffix kind), 0 is used. |
2374 | static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, |
2375 | RegKind VectorKind) { |
2376 | std::pair<int, int> Res = {-1, -1}; |
2377 | |
2378 | switch (VectorKind) { |
2379 | case RegKind::NeonVector: |
2380 | Res = |
2381 | StringSwitch<std::pair<int, int>>(Suffix.lower()) |
2382 | .Case("", {0, 0}) |
2383 | .Case(".1d", {1, 64}) |
2384 | .Case(".1q", {1, 128}) |
2385 | // '.2h' needed for fp16 scalar pairwise reductions |
2386 | .Case(".2h", {2, 16}) |
2387 | .Case(".2s", {2, 32}) |
2388 | .Case(".2d", {2, 64}) |
2389 | // '.4b' is another special case for the ARMv8.2a dot product |
2390 | // operand |
2391 | .Case(".4b", {4, 8}) |
2392 | .Case(".4h", {4, 16}) |
2393 | .Case(".4s", {4, 32}) |
2394 | .Case(".8b", {8, 8}) |
2395 | .Case(".8h", {8, 16}) |
2396 | .Case(".16b", {16, 8}) |
2397 | // Accept the width neutral ones, too, for verbose syntax. If those |
2398 | // aren't used in the right places, the token operand won't match so |
2399 | // all will work out. |
2400 | .Case(".b", {0, 8}) |
2401 | .Case(".h", {0, 16}) |
2402 | .Case(".s", {0, 32}) |
2403 | .Case(".d", {0, 64}) |
2404 | .Default({-1, -1}); |
2405 | break; |
2406 | case RegKind::SVEPredicateVector: |
2407 | case RegKind::SVEDataVector: |
2408 | case RegKind::Matrix: |
2409 | Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) |
2410 | .Case("", {0, 0}) |
2411 | .Case(".b", {0, 8}) |
2412 | .Case(".h", {0, 16}) |
2413 | .Case(".s", {0, 32}) |
2414 | .Case(".d", {0, 64}) |
2415 | .Case(".q", {0, 128}) |
2416 | .Default({-1, -1}); |
2417 | break; |
2418 | default: |
2419 | llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2419); |
2420 | } |
2421 | |
2422 | if (Res == std::make_pair(-1, -1)) |
2423 | return Optional<std::pair<int, int>>(); |
2424 | |
2425 | return Optional<std::pair<int, int>>(Res); |
2426 | } |
2427 | |
2428 | static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { |
2429 | return parseVectorKind(Suffix, VectorKind).hasValue(); |
2430 | } |
2431 | |
2432 | static unsigned matchSVEDataVectorRegName(StringRef Name) { |
2433 | return StringSwitch<unsigned>(Name.lower()) |
2434 | .Case("z0", AArch64::Z0) |
2435 | .Case("z1", AArch64::Z1) |
2436 | .Case("z2", AArch64::Z2) |
2437 | .Case("z3", AArch64::Z3) |
2438 | .Case("z4", AArch64::Z4) |
2439 | .Case("z5", AArch64::Z5) |
2440 | .Case("z6", AArch64::Z6) |
2441 | .Case("z7", AArch64::Z7) |
2442 | .Case("z8", AArch64::Z8) |
2443 | .Case("z9", AArch64::Z9) |
2444 | .Case("z10", AArch64::Z10) |
2445 | .Case("z11", AArch64::Z11) |
2446 | .Case("z12", AArch64::Z12) |
2447 | .Case("z13", AArch64::Z13) |
2448 | .Case("z14", AArch64::Z14) |
2449 | .Case("z15", AArch64::Z15) |
2450 | .Case("z16", AArch64::Z16) |
2451 | .Case("z17", AArch64::Z17) |
2452 | .Case("z18", AArch64::Z18) |
2453 | .Case("z19", AArch64::Z19) |
2454 | .Case("z20", AArch64::Z20) |
2455 | .Case("z21", AArch64::Z21) |
2456 | .Case("z22", AArch64::Z22) |
2457 | .Case("z23", AArch64::Z23) |
2458 | .Case("z24", AArch64::Z24) |
2459 | .Case("z25", AArch64::Z25) |
2460 | .Case("z26", AArch64::Z26) |
2461 | .Case("z27", AArch64::Z27) |
2462 | .Case("z28", AArch64::Z28) |
2463 | .Case("z29", AArch64::Z29) |
2464 | .Case("z30", AArch64::Z30) |
2465 | .Case("z31", AArch64::Z31) |
2466 | .Default(0); |
2467 | } |
2468 | |
2469 | static unsigned matchSVEPredicateVectorRegName(StringRef Name) { |
2470 | return StringSwitch<unsigned>(Name.lower()) |
2471 | .Case("p0", AArch64::P0) |
2472 | .Case("p1", AArch64::P1) |
2473 | .Case("p2", AArch64::P2) |
2474 | .Case("p3", AArch64::P3) |
2475 | .Case("p4", AArch64::P4) |
2476 | .Case("p5", AArch64::P5) |
2477 | .Case("p6", AArch64::P6) |
2478 | .Case("p7", AArch64::P7) |
2479 | .Case("p8", AArch64::P8) |
2480 | .Case("p9", AArch64::P9) |
2481 | .Case("p10", AArch64::P10) |
2482 | .Case("p11", AArch64::P11) |
2483 | .Case("p12", AArch64::P12) |
2484 | .Case("p13", AArch64::P13) |
2485 | .Case("p14", AArch64::P14) |
2486 | .Case("p15", AArch64::P15) |
2487 | .Default(0); |
2488 | } |
2489 | |
2490 | static unsigned matchMatrixTileListRegName(StringRef Name) { |
2491 | return StringSwitch<unsigned>(Name.lower()) |
2492 | .Case("za0.d", AArch64::ZAD0) |
2493 | .Case("za1.d", AArch64::ZAD1) |
2494 | .Case("za2.d", AArch64::ZAD2) |
2495 | .Case("za3.d", AArch64::ZAD3) |
2496 | .Case("za4.d", AArch64::ZAD4) |
2497 | .Case("za5.d", AArch64::ZAD5) |
2498 | .Case("za6.d", AArch64::ZAD6) |
2499 | .Case("za7.d", AArch64::ZAD7) |
2500 | .Case("za0.s", AArch64::ZAS0) |
2501 | .Case("za1.s", AArch64::ZAS1) |
2502 | .Case("za2.s", AArch64::ZAS2) |
2503 | .Case("za3.s", AArch64::ZAS3) |
2504 | .Case("za0.h", AArch64::ZAH0) |
2505 | .Case("za1.h", AArch64::ZAH1) |
2506 | .Case("za0.b", AArch64::ZAB0) |
2507 | .Default(0); |
2508 | } |
2509 | |
2510 | static unsigned matchMatrixRegName(StringRef Name) { |
2511 | return StringSwitch<unsigned>(Name.lower()) |
2512 | .Case("za", AArch64::ZA) |
2513 | .Case("za0.q", AArch64::ZAQ0) |
2514 | .Case("za1.q", AArch64::ZAQ1) |
2515 | .Case("za2.q", AArch64::ZAQ2) |
2516 | .Case("za3.q", AArch64::ZAQ3) |
2517 | .Case("za4.q", AArch64::ZAQ4) |
2518 | .Case("za5.q", AArch64::ZAQ5) |
2519 | .Case("za6.q", AArch64::ZAQ6) |
2520 | .Case("za7.q", AArch64::ZAQ7) |
2521 | .Case("za8.q", AArch64::ZAQ8) |
2522 | .Case("za9.q", AArch64::ZAQ9) |
2523 | .Case("za10.q", AArch64::ZAQ10) |
2524 | .Case("za11.q", AArch64::ZAQ11) |
2525 | .Case("za12.q", AArch64::ZAQ12) |
2526 | .Case("za13.q", AArch64::ZAQ13) |
2527 | .Case("za14.q", AArch64::ZAQ14) |
2528 | .Case("za15.q", AArch64::ZAQ15) |
2529 | .Case("za0.d", AArch64::ZAD0) |
2530 | .Case("za1.d", AArch64::ZAD1) |
2531 | .Case("za2.d", AArch64::ZAD2) |
2532 | .Case("za3.d", AArch64::ZAD3) |
2533 | .Case("za4.d", AArch64::ZAD4) |
2534 | .Case("za5.d", AArch64::ZAD5) |
2535 | .Case("za6.d", AArch64::ZAD6) |
2536 | .Case("za7.d", AArch64::ZAD7) |
2537 | .Case("za0.s", AArch64::ZAS0) |
2538 | .Case("za1.s", AArch64::ZAS1) |
2539 | .Case("za2.s", AArch64::ZAS2) |
2540 | .Case("za3.s", AArch64::ZAS3) |
2541 | .Case("za0.h", AArch64::ZAH0) |
2542 | .Case("za1.h", AArch64::ZAH1) |
2543 | .Case("za0.b", AArch64::ZAB0) |
2544 | .Case("za0h.q", AArch64::ZAQ0) |
2545 | .Case("za1h.q", AArch64::ZAQ1) |
2546 | .Case("za2h.q", AArch64::ZAQ2) |
2547 | .Case("za3h.q", AArch64::ZAQ3) |
2548 | .Case("za4h.q", AArch64::ZAQ4) |
2549 | .Case("za5h.q", AArch64::ZAQ5) |
2550 | .Case("za6h.q", AArch64::ZAQ6) |
2551 | .Case("za7h.q", AArch64::ZAQ7) |
2552 | .Case("za8h.q", AArch64::ZAQ8) |
2553 | .Case("za9h.q", AArch64::ZAQ9) |
2554 | .Case("za10h.q", AArch64::ZAQ10) |
2555 | .Case("za11h.q", AArch64::ZAQ11) |
2556 | .Case("za12h.q", AArch64::ZAQ12) |
2557 | .Case("za13h.q", AArch64::ZAQ13) |
2558 | .Case("za14h.q", AArch64::ZAQ14) |
2559 | .Case("za15h.q", AArch64::ZAQ15) |
2560 | .Case("za0h.d", AArch64::ZAD0) |
2561 | .Case("za1h.d", AArch64::ZAD1) |
2562 | .Case("za2h.d", AArch64::ZAD2) |
2563 | .Case("za3h.d", AArch64::ZAD3) |
2564 | .Case("za4h.d", AArch64::ZAD4) |
2565 | .Case("za5h.d", AArch64::ZAD5) |
2566 | .Case("za6h.d", AArch64::ZAD6) |
2567 | .Case("za7h.d", AArch64::ZAD7) |
2568 | .Case("za0h.s", AArch64::ZAS0) |
2569 | .Case("za1h.s", AArch64::ZAS1) |
2570 | .Case("za2h.s", AArch64::ZAS2) |
2571 | .Case("za3h.s", AArch64::ZAS3) |
2572 | .Case("za0h.h", AArch64::ZAH0) |
2573 | .Case("za1h.h", AArch64::ZAH1) |
2574 | .Case("za0h.b", AArch64::ZAB0) |
2575 | .Case("za0v.q", AArch64::ZAQ0) |
2576 | .Case("za1v.q", AArch64::ZAQ1) |
2577 | .Case("za2v.q", AArch64::ZAQ2) |
2578 | .Case("za3v.q", AArch64::ZAQ3) |
2579 | .Case("za4v.q", AArch64::ZAQ4) |
2580 | .Case("za5v.q", AArch64::ZAQ5) |
2581 | .Case("za6v.q", AArch64::ZAQ6) |
2582 | .Case("za7v.q", AArch64::ZAQ7) |
2583 | .Case("za8v.q", AArch64::ZAQ8) |
2584 | .Case("za9v.q", AArch64::ZAQ9) |
2585 | .Case("za10v.q", AArch64::ZAQ10) |
2586 | .Case("za11v.q", AArch64::ZAQ11) |
2587 | .Case("za12v.q", AArch64::ZAQ12) |
2588 | .Case("za13v.q", AArch64::ZAQ13) |
2589 | .Case("za14v.q", AArch64::ZAQ14) |
2590 | .Case("za15v.q", AArch64::ZAQ15) |
2591 | .Case("za0v.d", AArch64::ZAD0) |
2592 | .Case("za1v.d", AArch64::ZAD1) |
2593 | .Case("za2v.d", AArch64::ZAD2) |
2594 | .Case("za3v.d", AArch64::ZAD3) |
2595 | .Case("za4v.d", AArch64::ZAD4) |
2596 | .Case("za5v.d", AArch64::ZAD5) |
2597 | .Case("za6v.d", AArch64::ZAD6) |
2598 | .Case("za7v.d", AArch64::ZAD7) |
2599 | .Case("za0v.s", AArch64::ZAS0) |
2600 | .Case("za1v.s", AArch64::ZAS1) |
2601 | .Case("za2v.s", AArch64::ZAS2) |
2602 | .Case("za3v.s", AArch64::ZAS3) |
2603 | .Case("za0v.h", AArch64::ZAH0) |
2604 | .Case("za1v.h", AArch64::ZAH1) |
2605 | .Case("za0v.b", AArch64::ZAB0) |
2606 | .Default(0); |
2607 | } |
2608 | |
2609 | bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, |
2610 | SMLoc &EndLoc) { |
2611 | return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success; |
2612 | } |
2613 | |
2614 | OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo, |
2615 | SMLoc &StartLoc, |
2616 | SMLoc &EndLoc) { |
2617 | StartLoc = getLoc(); |
2618 | auto Res = tryParseScalarRegister(RegNo); |
2619 | EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2620 | return Res; |
2621 | } |
2622 | |
2623 | // Matches a register name or register alias previously defined by '.req' |
2624 | unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, |
2625 | RegKind Kind) { |
2626 | unsigned RegNum = 0; |
2627 | if ((RegNum = matchSVEDataVectorRegName(Name))) |
2628 | return Kind == RegKind::SVEDataVector ? RegNum : 0; |
2629 | |
2630 | if ((RegNum = matchSVEPredicateVectorRegName(Name))) |
2631 | return Kind == RegKind::SVEPredicateVector ? RegNum : 0; |
2632 | |
2633 | if ((RegNum = MatchNeonVectorRegName(Name))) |
2634 | return Kind == RegKind::NeonVector ? RegNum : 0; |
2635 | |
2636 | if ((RegNum = matchMatrixRegName(Name))) |
2637 | return Kind == RegKind::Matrix ? RegNum : 0; |
2638 | |
2639 | // The parsed register must be of RegKind Scalar |
2640 | if ((RegNum = MatchRegisterName(Name))) |
2641 | return Kind == RegKind::Scalar ? RegNum : 0; |
2642 | |
2643 | if (!RegNum) { |
2644 | // Handle a few common aliases of registers. |
2645 | if (auto RegNum = StringSwitch<unsigned>(Name.lower()) |
2646 | .Case("fp", AArch64::FP) |
2647 | .Case("lr", AArch64::LR) |
2648 | .Case("x31", AArch64::XZR) |
2649 | .Case("w31", AArch64::WZR) |
2650 | .Default(0)) |
2651 | return Kind == RegKind::Scalar ? RegNum : 0; |
2652 | |
2653 | // Check for aliases registered via .req. Canonicalize to lower case. |
2654 | // That's more consistent since register names are case insensitive, and |
2655 | // it's how the original entry was passed in from MC/MCParser/AsmParser. |
2656 | auto Entry = RegisterReqs.find(Name.lower()); |
2657 | if (Entry == RegisterReqs.end()) |
2658 | return 0; |
2659 | |
2660 | // set RegNum if the match is the right kind of register |
2661 | if (Kind == Entry->getValue().first) |
2662 | RegNum = Entry->getValue().second; |
2663 | } |
2664 | return RegNum; |
2665 | } |
2666 | |
2667 | /// tryParseScalarRegister - Try to parse a register name. The token must be an |
2668 | /// Identifier when called, and if it is a register name the token is eaten and |
2669 | /// the register is added to the operand list. |
2670 | OperandMatchResultTy |
2671 | AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) { |
2672 | const AsmToken &Tok = getTok(); |
2673 | if (Tok.isNot(AsmToken::Identifier)) |
2674 | return MatchOperand_NoMatch; |
2675 | |
2676 | std::string lowerCase = Tok.getString().lower(); |
2677 | unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar); |
2678 | if (Reg == 0) |
2679 | return MatchOperand_NoMatch; |
2680 | |
2681 | RegNum = Reg; |
2682 | Lex(); // Eat identifier token. |
2683 | return MatchOperand_Success; |
2684 | } |
2685 | |
2686 | /// tryParseSysCROperand - Try to parse a system instruction CR operand name. |
2687 | OperandMatchResultTy |
2688 | AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { |
2689 | SMLoc S = getLoc(); |
2690 | |
2691 | if (getTok().isNot(AsmToken::Identifier)) { |
2692 | Error(S, "Expected cN operand where 0 <= N <= 15"); |
2693 | return MatchOperand_ParseFail; |
2694 | } |
2695 | |
2696 | StringRef Tok = getTok().getIdentifier(); |
2697 | if (Tok[0] != 'c' && Tok[0] != 'C') { |
2698 | Error(S, "Expected cN operand where 0 <= N <= 15"); |
2699 | return MatchOperand_ParseFail; |
2700 | } |
2701 | |
2702 | uint32_t CRNum; |
2703 | bool BadNum = Tok.drop_front().getAsInteger(10, CRNum); |
2704 | if (BadNum || CRNum > 15) { |
2705 | Error(S, "Expected cN operand where 0 <= N <= 15"); |
2706 | return MatchOperand_ParseFail; |
2707 | } |
2708 | |
2709 | Lex(); // Eat identifier token. |
2710 | Operands.push_back( |
2711 | AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext())); |
2712 | return MatchOperand_Success; |
2713 | } |
2714 | |
2715 | /// tryParsePrefetch - Try to parse a prefetch operand. |
2716 | template <bool IsSVEPrefetch> |
2717 | OperandMatchResultTy |
2718 | AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { |
2719 | SMLoc S = getLoc(); |
2720 | const AsmToken &Tok = getTok(); |
2721 | |
2722 | auto LookupByName = [](StringRef N) { |
2723 | if (IsSVEPrefetch) { |
2724 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) |
2725 | return Optional<unsigned>(Res->Encoding); |
2726 | } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) |
2727 | return Optional<unsigned>(Res->Encoding); |
2728 | return Optional<unsigned>(); |
2729 | }; |
2730 | |
2731 | auto LookupByEncoding = [](unsigned E) { |
2732 | if (IsSVEPrefetch) { |
2733 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) |
2734 | return Optional<StringRef>(Res->Name); |
2735 | } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) |
2736 | return Optional<StringRef>(Res->Name); |
2737 | return Optional<StringRef>(); |
2738 | }; |
2739 | unsigned MaxVal = IsSVEPrefetch ? 15 : 31; |
2740 | |
2741 | // Either an identifier for named values or a 5-bit immediate. |
2742 | // Eat optional hash. |
2743 | if (parseOptionalToken(AsmToken::Hash) || |
2744 | Tok.is(AsmToken::Integer)) { |
2745 | const MCExpr *ImmVal; |
2746 | if (getParser().parseExpression(ImmVal)) |
2747 | return MatchOperand_ParseFail; |
2748 | |
2749 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
2750 | if (!MCE) { |
2751 | TokError("immediate value expected for prefetch operand"); |
2752 | return MatchOperand_ParseFail; |
2753 | } |
2754 | unsigned prfop = MCE->getValue(); |
2755 | if (prfop > MaxVal) { |
2756 | TokError("prefetch operand out of range, [0," + utostr(MaxVal) + |
2757 | "] expected"); |
2758 | return MatchOperand_ParseFail; |
2759 | } |
2760 | |
2761 | auto PRFM = LookupByEncoding(MCE->getValue()); |
2762 | Operands.push_back(AArch64Operand::CreatePrefetch( |
2763 | prfop, PRFM.getValueOr(""), S, getContext())); |
2764 | return MatchOperand_Success; |
2765 | } |
2766 | |
2767 | if (Tok.isNot(AsmToken::Identifier)) { |
2768 | TokError("prefetch hint expected"); |
2769 | return MatchOperand_ParseFail; |
2770 | } |
2771 | |
2772 | auto PRFM = LookupByName(Tok.getString()); |
2773 | if (!PRFM) { |
2774 | TokError("prefetch hint expected"); |
2775 | return MatchOperand_ParseFail; |
2776 | } |
2777 | |
2778 | Operands.push_back(AArch64Operand::CreatePrefetch( |
2779 | *PRFM, Tok.getString(), S, getContext())); |
2780 | Lex(); // Eat identifier token. |
2781 | return MatchOperand_Success; |
2782 | } |
2783 | |
2784 | /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command |
2785 | OperandMatchResultTy |
2786 | AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { |
2787 | SMLoc S = getLoc(); |
2788 | const AsmToken &Tok = getTok(); |
2789 | if (Tok.isNot(AsmToken::Identifier)) { |
2790 | TokError("invalid operand for instruction"); |
2791 | return MatchOperand_ParseFail; |
2792 | } |
2793 | |
2794 | auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString()); |
2795 | if (!PSB) { |
2796 | TokError("invalid operand for instruction"); |
2797 | return MatchOperand_ParseFail; |
2798 | } |
2799 | |
2800 | Operands.push_back(AArch64Operand::CreatePSBHint( |
2801 | PSB->Encoding, Tok.getString(), S, getContext())); |
2802 | Lex(); // Eat identifier token. |
2803 | return MatchOperand_Success; |
2804 | } |
2805 | |
2806 | /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command |
2807 | OperandMatchResultTy |
2808 | AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) { |
2809 | SMLoc S = getLoc(); |
2810 | const AsmToken &Tok = getTok(); |
2811 | if (Tok.isNot(AsmToken::Identifier)) { |
2812 | TokError("invalid operand for instruction"); |
2813 | return MatchOperand_ParseFail; |
2814 | } |
2815 | |
2816 | auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString()); |
2817 | if (!BTI) { |
2818 | TokError("invalid operand for instruction"); |
2819 | return MatchOperand_ParseFail; |
2820 | } |
2821 | |
2822 | Operands.push_back(AArch64Operand::CreateBTIHint( |
2823 | BTI->Encoding, Tok.getString(), S, getContext())); |
2824 | Lex(); // Eat identifier token. |
2825 | return MatchOperand_Success; |
2826 | } |
2827 | |
2828 | /// tryParseAdrpLabel - Parse and validate a source label for the ADRP |
2829 | /// instruction. |
2830 | OperandMatchResultTy |
2831 | AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { |
2832 | SMLoc S = getLoc(); |
2833 | const MCExpr *Expr = nullptr; |
2834 | |
2835 | if (getTok().is(AsmToken::Hash)) { |
2836 | Lex(); // Eat hash token. |
2837 | } |
2838 | |
2839 | if (parseSymbolicImmVal(Expr)) |
2840 | return MatchOperand_ParseFail; |
2841 | |
2842 | AArch64MCExpr::VariantKind ELFRefKind; |
2843 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
2844 | int64_t Addend; |
2845 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
2846 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && |
2847 | ELFRefKind == AArch64MCExpr::VK_INVALID) { |
2848 | // No modifier was specified at all; this is the syntax for an ELF basic |
2849 | // ADRP relocation (unfortunately). |
2850 | Expr = |
2851 | AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext()); |
2852 | } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || |
2853 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && |
2854 | Addend != 0) { |
2855 | Error(S, "gotpage label reference not allowed an addend"); |
2856 | return MatchOperand_ParseFail; |
2857 | } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && |
2858 | DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && |
2859 | DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && |
2860 | ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC && |
2861 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && |
2862 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 && |
2863 | ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && |
2864 | ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { |
2865 | // The operand must be an @page or @gotpage qualified symbolref. |
2866 | Error(S, "page or gotpage label reference expected"); |
2867 | return MatchOperand_ParseFail; |
2868 | } |
2869 | } |
2870 | |
2871 | // We have either a label reference possibly with addend or an immediate. The |
2872 | // addend is a raw value here. The linker will adjust it to only reference the |
2873 | // page. |
2874 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2875 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); |
2876 | |
2877 | return MatchOperand_Success; |
2878 | } |
2879 | |
2880 | /// tryParseAdrLabel - Parse and validate a source label for the ADR |
2881 | /// instruction. |
2882 | OperandMatchResultTy |
2883 | AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { |
2884 | SMLoc S = getLoc(); |
2885 | const MCExpr *Expr = nullptr; |
2886 | |
2887 | // Leave anything with a bracket to the default for SVE |
2888 | if (getTok().is(AsmToken::LBrac)) |
2889 | return MatchOperand_NoMatch; |
2890 | |
2891 | if (getTok().is(AsmToken::Hash)) |
2892 | Lex(); // Eat hash token. |
2893 | |
2894 | if (parseSymbolicImmVal(Expr)) |
2895 | return MatchOperand_ParseFail; |
2896 | |
2897 | AArch64MCExpr::VariantKind ELFRefKind; |
2898 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
2899 | int64_t Addend; |
2900 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
2901 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && |
2902 | ELFRefKind == AArch64MCExpr::VK_INVALID) { |
2903 | // No modifier was specified at all; this is the syntax for an ELF basic |
2904 | // ADR relocation (unfortunately). |
2905 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext()); |
2906 | } else { |
2907 | Error(S, "unexpected adr label"); |
2908 | return MatchOperand_ParseFail; |
2909 | } |
2910 | } |
2911 | |
2912 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
2913 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); |
2914 | return MatchOperand_Success; |
2915 | } |
2916 | |
2917 | /// tryParseFPImm - A floating point immediate expression operand. |
2918 | template<bool AddFPZeroAsLiteral> |
2919 | OperandMatchResultTy |
2920 | AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { |
2921 | SMLoc S = getLoc(); |
2922 | |
2923 | bool Hash = parseOptionalToken(AsmToken::Hash); |
2924 | |
2925 | // Handle negation, as that still comes through as a separate token. |
2926 | bool isNegative = parseOptionalToken(AsmToken::Minus); |
2927 | |
2928 | const AsmToken &Tok = getTok(); |
2929 | if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) { |
2930 | if (!Hash) |
2931 | return MatchOperand_NoMatch; |
2932 | TokError("invalid floating point immediate"); |
2933 | return MatchOperand_ParseFail; |
2934 | } |
2935 | |
2936 | // Parse hexadecimal representation. |
2937 | if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) { |
2938 | if (Tok.getIntVal() > 255 || isNegative) { |
2939 | TokError("encoded floating point value out of range"); |
2940 | return MatchOperand_ParseFail; |
2941 | } |
2942 | |
2943 | APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal())); |
2944 | Operands.push_back( |
2945 | AArch64Operand::CreateFPImm(F, true, S, getContext())); |
2946 | } else { |
2947 | // Parse FP representation. |
2948 | APFloat RealVal(APFloat::IEEEdouble()); |
2949 | auto StatusOrErr = |
2950 | RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); |
2951 | if (errorToBool(StatusOrErr.takeError())) { |
2952 | TokError("invalid floating point representation"); |
2953 | return MatchOperand_ParseFail; |
2954 | } |
2955 | |
2956 | if (isNegative) |
2957 | RealVal.changeSign(); |
2958 | |
2959 | if (AddFPZeroAsLiteral && RealVal.isPosZero()) { |
2960 | Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext())); |
2961 | Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext())); |
2962 | } else |
2963 | Operands.push_back(AArch64Operand::CreateFPImm( |
2964 | RealVal, *StatusOrErr == APFloat::opOK, S, getContext())); |
2965 | } |
2966 | |
2967 | Lex(); // Eat the token. |
2968 | |
2969 | return MatchOperand_Success; |
2970 | } |
2971 | |
2972 | /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with |
2973 | /// a shift suffix, for example '#1, lsl #12'. |
2974 | OperandMatchResultTy |
2975 | AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { |
2976 | SMLoc S = getLoc(); |
2977 | |
2978 | if (getTok().is(AsmToken::Hash)) |
2979 | Lex(); // Eat '#' |
2980 | else if (getTok().isNot(AsmToken::Integer)) |
2981 | // Operand should start from # or should be integer, emit error otherwise. |
2982 | return MatchOperand_NoMatch; |
2983 | |
2984 | const MCExpr *Imm = nullptr; |
2985 | if (parseSymbolicImmVal(Imm)) |
2986 | return MatchOperand_ParseFail; |
2987 | else if (getTok().isNot(AsmToken::Comma)) { |
2988 | Operands.push_back( |
2989 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); |
2990 | return MatchOperand_Success; |
2991 | } |
2992 | |
2993 | // Eat ',' |
2994 | Lex(); |
2995 | |
2996 | // The optional operand must be "lsl #N" where N is non-negative. |
2997 | if (!getTok().is(AsmToken::Identifier) || |
2998 | !getTok().getIdentifier().equals_insensitive("lsl")) { |
2999 | Error(getLoc(), "only 'lsl #+N' valid after immediate"); |
3000 | return MatchOperand_ParseFail; |
3001 | } |
3002 | |
3003 | // Eat 'lsl' |
3004 | Lex(); |
3005 | |
3006 | parseOptionalToken(AsmToken::Hash); |
3007 | |
3008 | if (getTok().isNot(AsmToken::Integer)) { |
3009 | Error(getLoc(), "only 'lsl #+N' valid after immediate"); |
3010 | return MatchOperand_ParseFail; |
3011 | } |
3012 | |
3013 | int64_t ShiftAmount = getTok().getIntVal(); |
3014 | |
3015 | if (ShiftAmount < 0) { |
3016 | Error(getLoc(), "positive shift amount required"); |
3017 | return MatchOperand_ParseFail; |
3018 | } |
3019 | Lex(); // Eat the number |
3020 | |
3021 | // Just in case the optional lsl #0 is used for immediates other than zero. |
3022 | if (ShiftAmount == 0 && Imm != nullptr) { |
3023 | Operands.push_back( |
3024 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); |
3025 | return MatchOperand_Success; |
3026 | } |
3027 | |
3028 | Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, |
3029 | getLoc(), getContext())); |
3030 | return MatchOperand_Success; |
3031 | } |
3032 | |
3033 | /// parseCondCodeString - Parse a Condition Code string, optionally returning a |
3034 | /// suggestion to help common typos. |
3035 | AArch64CC::CondCode |
3036 | AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) { |
3037 | AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) |
3038 | .Case("eq", AArch64CC::EQ) |
3039 | .Case("ne", AArch64CC::NE) |
3040 | .Case("cs", AArch64CC::HS) |
3041 | .Case("hs", AArch64CC::HS) |
3042 | .Case("cc", AArch64CC::LO) |
3043 | .Case("lo", AArch64CC::LO) |
3044 | .Case("mi", AArch64CC::MI) |
3045 | .Case("pl", AArch64CC::PL) |
3046 | .Case("vs", AArch64CC::VS) |
3047 | .Case("vc", AArch64CC::VC) |
3048 | .Case("hi", AArch64CC::HI) |
3049 | .Case("ls", AArch64CC::LS) |
3050 | .Case("ge", AArch64CC::GE) |
3051 | .Case("lt", AArch64CC::LT) |
3052 | .Case("gt", AArch64CC::GT) |
3053 | .Case("le", AArch64CC::LE) |
3054 | .Case("al", AArch64CC::AL) |
3055 | .Case("nv", AArch64CC::NV) |
3056 | .Default(AArch64CC::Invalid); |
3057 | |
3058 | if (CC == AArch64CC::Invalid && |
3059 | getSTI().getFeatureBits()[AArch64::FeatureSVE]) { |
3060 | CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) |
3061 | .Case("none", AArch64CC::EQ) |
3062 | .Case("any", AArch64CC::NE) |
3063 | .Case("nlast", AArch64CC::HS) |
3064 | .Case("last", AArch64CC::LO) |
3065 | .Case("first", AArch64CC::MI) |
3066 | .Case("nfrst", AArch64CC::PL) |
3067 | .Case("pmore", AArch64CC::HI) |
3068 | .Case("plast", AArch64CC::LS) |
3069 | .Case("tcont", AArch64CC::GE) |
3070 | .Case("tstop", AArch64CC::LT) |
3071 | .Default(AArch64CC::Invalid); |
3072 | |
3073 | if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst") |
3074 | Suggestion = "nfrst"; |
3075 | } |
3076 | return CC; |
3077 | } |
3078 | |
3079 | /// parseCondCode - Parse a Condition Code operand. |
3080 | bool AArch64AsmParser::parseCondCode(OperandVector &Operands, |
3081 | bool invertCondCode) { |
3082 | SMLoc S = getLoc(); |
3083 | const AsmToken &Tok = getTok(); |
3084 | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) && "Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3084 , __extension__ __PRETTY_FUNCTION__)); |
3085 | |
3086 | StringRef Cond = Tok.getString(); |
3087 | std::string Suggestion; |
3088 | AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion); |
3089 | if (CC == AArch64CC::Invalid) { |
3090 | std::string Msg = "invalid condition code"; |
3091 | if (!Suggestion.empty()) |
3092 | Msg += ", did you mean " + Suggestion + "?"; |
3093 | return TokError(Msg); |
3094 | } |
3095 | Lex(); // Eat identifier token. |
3096 | |
3097 | if (invertCondCode) { |
3098 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) |
3099 | return TokError("condition codes AL and NV are invalid for this instruction"); |
3100 | CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC)); |
3101 | } |
3102 | |
3103 | Operands.push_back( |
3104 | AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext())); |
3105 | return false; |
3106 | } |
3107 | |
3108 | OperandMatchResultTy |
3109 | AArch64AsmParser::tryParseSVCR(OperandVector &Operands) { |
3110 | const AsmToken &Tok = getTok(); |
3111 | SMLoc S = getLoc(); |
3112 | |
3113 | if (Tok.isNot(AsmToken::Identifier)) { |
3114 | TokError("invalid operand for instruction"); |
3115 | return MatchOperand_ParseFail; |
3116 | } |
3117 | |
3118 | unsigned PStateImm = -1; |
3119 | const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString()); |
3120 | if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits())) |
3121 | PStateImm = SVCR->Encoding; |
3122 | |
3123 | Operands.push_back( |
3124 | AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext())); |
3125 | Lex(); // Eat identifier token. |
3126 | return MatchOperand_Success; |
3127 | } |
3128 | |
3129 | OperandMatchResultTy |
3130 | AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) { |
3131 | const AsmToken &Tok = getTok(); |
3132 | SMLoc S = getLoc(); |
3133 | |
3134 | StringRef Name = Tok.getString(); |
3135 | |
3136 | if (Name.equals_insensitive("za")) { |
3137 | Lex(); // eat "za" |
3138 | Operands.push_back(AArch64Operand::CreateMatrixRegister( |
3139 | AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(), |
3140 | getContext())); |
3141 | if (getLexer().is(AsmToken::LBrac)) { |
3142 | // There's no comma after matrix operand, so we can parse the next operand |
3143 | // immediately. |
3144 | if (parseOperand(Operands, false, false)) |
3145 | return MatchOperand_NoMatch; |
3146 | } |
3147 | return MatchOperand_Success; |
3148 | } |
3149 | |
3150 | // Try to parse matrix register. |
3151 | unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix); |
3152 | if (!Reg) |
3153 | return MatchOperand_NoMatch; |
3154 | |
3155 | size_t DotPosition = Name.find('.'); |
3156 | assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos && "Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3156 , __extension__ __PRETTY_FUNCTION__)); |
3157 | |
3158 | StringRef Head = Name.take_front(DotPosition); |
3159 | StringRef Tail = Name.drop_front(DotPosition); |
3160 | StringRef RowOrColumn = Head.take_back(); |
3161 | |
3162 | MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn) |
3163 | .Case("h", MatrixKind::Row) |
3164 | .Case("v", MatrixKind::Col) |
3165 | .Default(MatrixKind::Tile); |
3166 | |
3167 | // Next up, parsing the suffix |
3168 | const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix); |
3169 | if (!KindRes) { |
3170 | TokError("Expected the register to be followed by element width suffix"); |
3171 | return MatchOperand_ParseFail; |
3172 | } |
3173 | unsigned ElementWidth = KindRes->second; |
3174 | |
3175 | Lex(); |
3176 | |
3177 | Operands.push_back(AArch64Operand::CreateMatrixRegister( |
3178 | Reg, ElementWidth, Kind, S, getLoc(), getContext())); |
3179 | |
3180 | if (getLexer().is(AsmToken::LBrac)) { |
3181 | // There's no comma after matrix operand, so we can parse the next operand |
3182 | // immediately. |
3183 | if (parseOperand(Operands, false, false)) |
3184 | return MatchOperand_NoMatch; |
3185 | } |
3186 | return MatchOperand_Success; |
3187 | } |
3188 | |
3189 | /// tryParseOptionalShift - Some operands take an optional shift argument. Parse |
3190 | /// them if present. |
3191 | OperandMatchResultTy |
3192 | AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { |
3193 | const AsmToken &Tok = getTok(); |
3194 | std::string LowerID = Tok.getString().lower(); |
3195 | AArch64_AM::ShiftExtendType ShOp = |
3196 | StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) |
3197 | .Case("lsl", AArch64_AM::LSL) |
3198 | .Case("lsr", AArch64_AM::LSR) |
3199 | .Case("asr", AArch64_AM::ASR) |
3200 | .Case("ror", AArch64_AM::ROR) |
3201 | .Case("msl", AArch64_AM::MSL) |
3202 | .Case("uxtb", AArch64_AM::UXTB) |
3203 | .Case("uxth", AArch64_AM::UXTH) |
3204 | .Case("uxtw", AArch64_AM::UXTW) |
3205 | .Case("uxtx", AArch64_AM::UXTX) |
3206 | .Case("sxtb", AArch64_AM::SXTB) |
3207 | .Case("sxth", AArch64_AM::SXTH) |
3208 | .Case("sxtw", AArch64_AM::SXTW) |
3209 | .Case("sxtx", AArch64_AM::SXTX) |
3210 | .Default(AArch64_AM::InvalidShiftExtend); |
3211 | |
3212 | if (ShOp == AArch64_AM::InvalidShiftExtend) |
3213 | return MatchOperand_NoMatch; |
3214 | |
3215 | SMLoc S = Tok.getLoc(); |
3216 | Lex(); |
3217 | |
3218 | bool Hash = parseOptionalToken(AsmToken::Hash); |
3219 | |
3220 | if (!Hash && getLexer().isNot(AsmToken::Integer)) { |
3221 | if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || |
3222 | ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || |
3223 | ShOp == AArch64_AM::MSL) { |
3224 | // We expect a number here. |
3225 | TokError("expected #imm after shift specifier"); |
3226 | return MatchOperand_ParseFail; |
3227 | } |
3228 | |
3229 | // "extend" type operations don't need an immediate, #0 is implicit. |
3230 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
3231 | Operands.push_back( |
3232 | AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext())); |
3233 | return MatchOperand_Success; |
3234 | } |
3235 | |
3236 | // Make sure we do actually have a number, identifier or a parenthesized |
3237 | // expression. |
3238 | SMLoc E = getLoc(); |
3239 | if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) && |
3240 | !getTok().is(AsmToken::Identifier)) { |
3241 | Error(E, "expected integer shift amount"); |
3242 | return MatchOperand_ParseFail; |
3243 | } |
3244 | |
3245 | const MCExpr *ImmVal; |
3246 | if (getParser().parseExpression(ImmVal)) |
3247 | return MatchOperand_ParseFail; |
3248 | |
3249 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
3250 | if (!MCE) { |
3251 | Error(E, "expected constant '#imm' after shift specifier"); |
3252 | return MatchOperand_ParseFail; |
3253 | } |
3254 | |
3255 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
3256 | Operands.push_back(AArch64Operand::CreateShiftExtend( |
3257 | ShOp, MCE->getValue(), true, S, E, getContext())); |
3258 | return MatchOperand_Success; |
3259 | } |
3260 | |
3261 | static const struct Extension { |
3262 | const char *Name; |
3263 | const FeatureBitset Features; |
3264 | } ExtensionMap[] = { |
3265 | {"crc", {AArch64::FeatureCRC}}, |
3266 | {"sm4", {AArch64::FeatureSM4}}, |
3267 | {"sha3", {AArch64::FeatureSHA3}}, |
3268 | {"sha2", {AArch64::FeatureSHA2}}, |
3269 | {"aes", {AArch64::FeatureAES}}, |
3270 | {"crypto", {AArch64::FeatureCrypto}}, |
3271 | {"fp", {AArch64::FeatureFPARMv8}}, |
3272 | {"simd", {AArch64::FeatureNEON}}, |
3273 | {"ras", {AArch64::FeatureRAS}}, |
3274 | {"lse", {AArch64::FeatureLSE}}, |
3275 | {"predres", {AArch64::FeaturePredRes}}, |
3276 | {"ccdp", {AArch64::FeatureCacheDeepPersist}}, |
3277 | {"mte", {AArch64::FeatureMTE}}, |
3278 | {"memtag", {AArch64::FeatureMTE}}, |
3279 | {"tlb-rmi", {AArch64::FeatureTLB_RMI}}, |
3280 | {"pan", {AArch64::FeaturePAN}}, |
3281 | {"pan-rwv", {AArch64::FeaturePAN_RWV}}, |
3282 | {"ccpp", {AArch64::FeatureCCPP}}, |
3283 | {"rcpc", {AArch64::FeatureRCPC}}, |
3284 | {"rng", {AArch64::FeatureRandGen}}, |
3285 | {"sve", {AArch64::FeatureSVE}}, |
3286 | {"sve2", {AArch64::FeatureSVE2}}, |
3287 | {"sve2-aes", {AArch64::FeatureSVE2AES}}, |
3288 | {"sve2-sm4", {AArch64::FeatureSVE2SM4}}, |
3289 | {"sve2-sha3", {AArch64::FeatureSVE2SHA3}}, |
3290 | {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}}, |
3291 | {"ls64", {AArch64::FeatureLS64}}, |
3292 | {"xs", {AArch64::FeatureXS}}, |
3293 | {"pauth", {AArch64::FeaturePAuth}}, |
3294 | {"flagm", {AArch64::FeatureFlagM}}, |
3295 | {"rme", {AArch64::FeatureRME}}, |
3296 | {"sme", {AArch64::FeatureSME}}, |
3297 | {"sme-f64", {AArch64::FeatureSMEF64}}, |
3298 | {"sme-i64", {AArch64::FeatureSMEI64}}, |
3299 | {"hbc", {AArch64::FeatureHBC}}, |
3300 | {"mops", {AArch64::FeatureMOPS}}, |
3301 | // FIXME: Unsupported extensions |
3302 | {"lor", {}}, |
3303 | {"rdma", {}}, |
3304 | {"profile", {}}, |
3305 | }; |
3306 | |
3307 | static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { |
3308 | if (FBS[AArch64::HasV8_0aOps]) |
3309 | Str += "ARMv8a"; |
3310 | if (FBS[AArch64::HasV8_1aOps]) |
3311 | Str += "ARMv8.1a"; |
3312 | else if (FBS[AArch64::HasV8_2aOps]) |
3313 | Str += "ARMv8.2a"; |
3314 | else if (FBS[AArch64::HasV8_3aOps]) |
3315 | Str += "ARMv8.3a"; |
3316 | else if (FBS[AArch64::HasV8_4aOps]) |
3317 | Str += "ARMv8.4a"; |
3318 | else if (FBS[AArch64::HasV8_5aOps]) |
3319 | Str += "ARMv8.5a"; |
3320 | else if (FBS[AArch64::HasV8_6aOps]) |
3321 | Str += "ARMv8.6a"; |
3322 | else if (FBS[AArch64::HasV8_7aOps]) |
3323 | Str += "ARMv8.7a"; |
3324 | else if (FBS[AArch64::HasV8_8aOps]) |
3325 | Str += "ARMv8.8a"; |
3326 | else if (FBS[AArch64::HasV9_0aOps]) |
3327 | Str += "ARMv9-a"; |
3328 | else if (FBS[AArch64::HasV9_1aOps]) |
3329 | Str += "ARMv9.1a"; |
3330 | else if (FBS[AArch64::HasV9_2aOps]) |
3331 | Str += "ARMv9.2a"; |
3332 | else if (FBS[AArch64::HasV9_3aOps]) |
3333 | Str += "ARMv9.3a"; |
3334 | else if (FBS[AArch64::HasV8_0rOps]) |
3335 | Str += "ARMv8r"; |
3336 | else { |
3337 | SmallVector<std::string, 2> ExtMatches; |
3338 | for (const auto& Ext : ExtensionMap) { |
3339 | // Use & in case multiple features are enabled |
3340 | if ((FBS & Ext.Features) != FeatureBitset()) |
3341 | ExtMatches.push_back(Ext.Name); |
3342 | } |
3343 | Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)"; |
3344 | } |
3345 | } |
3346 | |
3347 | void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands, |
3348 | SMLoc S) { |
3349 | const uint16_t Op2 = Encoding & 7; |
3350 | const uint16_t Cm = (Encoding & 0x78) >> 3; |
3351 | const uint16_t Cn = (Encoding & 0x780) >> 7; |
3352 | const uint16_t Op1 = (Encoding & 0x3800) >> 11; |
3353 | |
3354 | const MCExpr *Expr = MCConstantExpr::create(Op1, getContext()); |
3355 | |
3356 | Operands.push_back( |
3357 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); |
3358 | Operands.push_back( |
3359 | AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); |
3360 | Operands.push_back( |
3361 | AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); |
3362 | Expr = MCConstantExpr::create(Op2, getContext()); |
3363 | Operands.push_back( |
3364 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); |
3365 | } |
3366 | |
3367 | /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for |
3368 | /// the SYS instruction. Parse them specially so that we create a SYS MCInst. |
3369 | bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, |
3370 | OperandVector &Operands) { |
3371 | if (Name.contains('.')) |
3372 | return TokError("invalid operand"); |
3373 | |
3374 | Mnemonic = Name; |
3375 | Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext())); |
3376 | |
3377 | const AsmToken &Tok = getTok(); |
3378 | StringRef Op = Tok.getString(); |
3379 | SMLoc S = Tok.getLoc(); |
3380 | |
3381 | if (Mnemonic == "ic") { |
3382 | const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op); |
3383 | if (!IC) |
3384 | return TokError("invalid operand for IC instruction"); |
3385 | else if (!IC->haveFeatures(getSTI().getFeatureBits())) { |
3386 | std::string Str("IC " + std::string(IC->Name) + " requires: "); |
3387 | setRequiredFeatureString(IC->getRequiredFeatures(), Str); |
3388 | return TokError(Str); |
3389 | } |
3390 | createSysAlias(IC->Encoding, Operands, S); |
3391 | } else if (Mnemonic == "dc") { |
3392 | const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op); |
3393 | if (!DC) |
3394 | return TokError("invalid operand for DC instruction"); |
3395 | else if (!DC->haveFeatures(getSTI().getFeatureBits())) { |
3396 | std::string Str("DC " + std::string(DC->Name) + " requires: "); |
3397 | setRequiredFeatureString(DC->getRequiredFeatures(), Str); |
3398 | return TokError(Str); |
3399 | } |
3400 | createSysAlias(DC->Encoding, Operands, S); |
3401 | } else if (Mnemonic == "at") { |
3402 | const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op); |
3403 | if (!AT) |
3404 | return TokError("invalid operand for AT instruction"); |
3405 | else if (!AT->haveFeatures(getSTI().getFeatureBits())) { |
3406 | std::string Str("AT " + std::string(AT->Name) + " requires: "); |
3407 | setRequiredFeatureString(AT->getRequiredFeatures(), Str); |
3408 | return TokError(Str); |
3409 | } |
3410 | createSysAlias(AT->Encoding, Operands, S); |
3411 | } else if (Mnemonic == "tlbi") { |
3412 | const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op); |
3413 | if (!TLBI) |
3414 | return TokError("invalid operand for TLBI instruction"); |
3415 | else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) { |
3416 | std::string Str("TLBI " + std::string(TLBI->Name) + " requires: "); |
3417 | setRequiredFeatureString(TLBI->getRequiredFeatures(), Str); |
3418 | return TokError(Str); |
3419 | } |
3420 | createSysAlias(TLBI->Encoding, Operands, S); |
3421 | } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") { |
3422 | const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op); |
3423 | if (!PRCTX) |
3424 | return TokError("invalid operand for prediction restriction instruction"); |
3425 | else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) { |
3426 | std::string Str( |
3427 | Mnemonic.upper() + std::string(PRCTX->Name) + " requires: "); |
3428 | setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str); |
3429 | return TokError(Str); |
3430 | } |
3431 | uint16_t PRCTX_Op2 = |
3432 | Mnemonic == "cfp" ? 4 : |
3433 | Mnemonic == "dvp" ? 5 : |
3434 | Mnemonic == "cpp" ? 7 : |
3435 | 0; |
3436 | assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction" ) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3436 , __extension__ __PRETTY_FUNCTION__)); |
3437 | createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S); |
3438 | } |
3439 | |
3440 | Lex(); // Eat operand. |
3441 | |
3442 | bool ExpectRegister = (Op.lower().find("all") == StringRef::npos); |
3443 | bool HasRegister = false; |
3444 | |
3445 | // Check for the optional register operand. |
3446 | if (parseOptionalToken(AsmToken::Comma)) { |
3447 | if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands)) |
3448 | return TokError("expected register operand"); |
3449 | HasRegister = true; |
3450 | } |
3451 | |
3452 | if (ExpectRegister && !HasRegister) |
3453 | return TokError("specified " + Mnemonic + " op requires a register"); |
3454 | else if (!ExpectRegister && HasRegister) |
3455 | return TokError("specified " + Mnemonic + " op does not use a register"); |
3456 | |
3457 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) |
3458 | return true; |
3459 | |
3460 | return false; |
3461 | } |
3462 | |
3463 | OperandMatchResultTy |
3464 | AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { |
3465 | MCAsmParser &Parser = getParser(); |
3466 | const AsmToken &Tok = getTok(); |
3467 | |
3468 | if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) { |
3469 | TokError("'csync' operand expected"); |
3470 | return MatchOperand_ParseFail; |
3471 | } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { |
3472 | // Immediate operand. |
3473 | const MCExpr *ImmVal; |
3474 | SMLoc ExprLoc = getLoc(); |
3475 | AsmToken IntTok = Tok; |
3476 | if (getParser().parseExpression(ImmVal)) |
3477 | return MatchOperand_ParseFail; |
3478 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
3479 | if (!MCE) { |
3480 | Error(ExprLoc, "immediate value expected for barrier operand"); |
3481 | return MatchOperand_ParseFail; |
3482 | } |
3483 | int64_t Value = MCE->getValue(); |
3484 | if (Mnemonic == "dsb" && Value > 15) { |
3485 | // This case is a no match here, but it might be matched by the nXS |
3486 | // variant. Deliberately not unlex the optional '#' as it is not necessary |
3487 | // to characterize an integer immediate. |
3488 | Parser.getLexer().UnLex(IntTok); |
3489 | return MatchOperand_NoMatch; |
3490 | } |
3491 | if (Value < 0 || Value > 15) { |
3492 | Error(ExprLoc, "barrier operand out of range"); |
3493 | return MatchOperand_ParseFail; |
3494 | } |
3495 | auto DB = AArch64DB::lookupDBByEncoding(Value); |
3496 | Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "", |
3497 | ExprLoc, getContext(), |
3498 | false /*hasnXSModifier*/)); |
3499 | return MatchOperand_Success; |
3500 | } |
3501 | |
3502 | if (Tok.isNot(AsmToken::Identifier)) { |
3503 | TokError("invalid operand for instruction"); |
3504 | return MatchOperand_ParseFail; |
3505 | } |
3506 | |
3507 | StringRef Operand = Tok.getString(); |
3508 | auto TSB = AArch64TSB::lookupTSBByName(Operand); |
3509 | auto DB = AArch64DB::lookupDBByName(Operand); |
3510 | // The only valid named option for ISB is 'sy' |
3511 | if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) { |
3512 | TokError("'sy' or #imm operand expected"); |
3513 | return MatchOperand_ParseFail; |
3514 | // The only valid named option for TSB is 'csync' |
3515 | } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) { |
3516 | TokError("'csync' operand expected"); |
3517 | return MatchOperand_ParseFail; |
3518 | } else if (!DB && !TSB) { |
3519 | if (Mnemonic == "dsb") { |
3520 | // This case is a no match here, but it might be matched by the nXS |
3521 | // variant. |
3522 | return MatchOperand_NoMatch; |
3523 | } |
3524 | TokError("invalid barrier option name"); |
3525 | return MatchOperand_ParseFail; |
3526 | } |
3527 | |
3528 | Operands.push_back(AArch64Operand::CreateBarrier( |
3529 | DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), |
3530 | getContext(), false /*hasnXSModifier*/)); |
3531 | Lex(); // Consume the option |
3532 | |
3533 | return MatchOperand_Success; |
3534 | } |
3535 | |
3536 | OperandMatchResultTy |
3537 | AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) { |
3538 | const AsmToken &Tok = getTok(); |
3539 | |
3540 | assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands" ) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3540 , __extension__ __PRETTY_FUNCTION__)); |
3541 | if (Mnemonic != "dsb") |
3542 | return MatchOperand_ParseFail; |
3543 | |
3544 | if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { |
3545 | // Immediate operand. |
3546 | const MCExpr *ImmVal; |
3547 | SMLoc ExprLoc = getLoc(); |
3548 | if (getParser().parseExpression(ImmVal)) |
3549 | return MatchOperand_ParseFail; |
3550 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
3551 | if (!MCE) { |
3552 | Error(ExprLoc, "immediate value expected for barrier operand"); |
3553 | return MatchOperand_ParseFail; |
3554 | } |
3555 | int64_t Value = MCE->getValue(); |
3556 | // v8.7-A DSB in the nXS variant accepts only the following immediate |
3557 | // values: 16, 20, 24, 28. |
3558 | if (Value != 16 && Value != 20 && Value != 24 && Value != 28) { |
3559 | Error(ExprLoc, "barrier operand out of range"); |
3560 | return MatchOperand_ParseFail; |
3561 | } |
3562 | auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value); |
3563 | Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name, |
3564 | ExprLoc, getContext(), |
3565 | true /*hasnXSModifier*/)); |
3566 | return MatchOperand_Success; |
3567 | } |
3568 | |
3569 | if (Tok.isNot(AsmToken::Identifier)) { |
3570 | TokError("invalid operand for instruction"); |
3571 | return MatchOperand_ParseFail; |
3572 | } |
3573 | |
3574 | StringRef Operand = Tok.getString(); |
3575 | auto DB = AArch64DBnXS::lookupDBnXSByName(Operand); |
3576 | |
3577 | if (!DB) { |
3578 | TokError("invalid barrier option name"); |
3579 | return MatchOperand_ParseFail; |
3580 | } |
3581 | |
3582 | Operands.push_back( |
3583 | AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(), |
3584 | getContext(), true /*hasnXSModifier*/)); |
3585 | Lex(); // Consume the option |
3586 | |
3587 | return MatchOperand_Success; |
3588 | } |
3589 | |
3590 | OperandMatchResultTy |
3591 | AArch64AsmParser::tryParseSysReg(OperandVector &Operands) { |
3592 | const AsmToken &Tok = getTok(); |
3593 | |
3594 | if (Tok.isNot(AsmToken::Identifier)) |
3595 | return MatchOperand_NoMatch; |
3596 | |
3597 | if (AArch64SVCR::lookupSVCRByName(Tok.getString())) |
3598 | return MatchOperand_NoMatch; |
3599 | |
3600 | int MRSReg, MSRReg; |
3601 | auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString()); |
3602 | if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) { |
3603 | MRSReg = SysReg->Readable ? SysReg->Encoding : -1; |
3604 | MSRReg = SysReg->Writeable ? SysReg->Encoding : -1; |
3605 | } else |
3606 | MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString()); |
3607 | |
3608 | auto PState = AArch64PState::lookupPStateByName(Tok.getString()); |
3609 | unsigned PStateImm = -1; |
3610 | if (PState && PState->haveFeatures(getSTI().getFeatureBits())) |
3611 | PStateImm = PState->Encoding; |
3612 | |
3613 | Operands.push_back( |
3614 | AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg, |
3615 | PStateImm, getContext())); |
3616 | Lex(); // Eat identifier |
3617 | |
3618 | return MatchOperand_Success; |
3619 | } |
3620 | |
3621 | /// tryParseNeonVectorRegister - Parse a vector register operand. |
3622 | bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { |
3623 | if (getTok().isNot(AsmToken::Identifier)) |
3624 | return true; |
3625 | |
3626 | SMLoc S = getLoc(); |
3627 | // Check for a vector register specifier first. |
3628 | StringRef Kind; |
3629 | unsigned Reg; |
3630 | OperandMatchResultTy Res = |
3631 | tryParseVectorRegister(Reg, Kind, RegKind::NeonVector); |
3632 | if (Res != MatchOperand_Success) |
3633 | return true; |
3634 | |
3635 | const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector); |
3636 | if (!KindRes) |
3637 | return true; |
3638 | |
3639 | unsigned ElementWidth = KindRes->second; |
3640 | Operands.push_back( |
3641 | AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth, |
3642 | S, getLoc(), getContext())); |
3643 | |
3644 | // If there was an explicit qualifier, that goes on as a literal text |
3645 | // operand. |
3646 | if (!Kind.empty()) |
3647 | Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext())); |
3648 | |
3649 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; |
3650 | } |
3651 | |
3652 | OperandMatchResultTy |
3653 | AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { |
3654 | SMLoc SIdx = getLoc(); |
3655 | if (parseOptionalToken(AsmToken::LBrac)) { |
3656 | const MCExpr *ImmVal; |
3657 | if (getParser().parseExpression(ImmVal)) |
3658 | return MatchOperand_NoMatch; |
3659 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); |
3660 | if (!MCE) { |
3661 | TokError("immediate value expected for vector index"); |
3662 | return MatchOperand_ParseFail;; |
3663 | } |
3664 | |
3665 | SMLoc E = getLoc(); |
3666 | |
3667 | if (parseToken(AsmToken::RBrac, "']' expected")) |
3668 | return MatchOperand_ParseFail;; |
3669 | |
3670 | Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, |
3671 | E, getContext())); |
3672 | return MatchOperand_Success; |
3673 | } |
3674 | |
3675 | return MatchOperand_NoMatch; |
3676 | } |
3677 | |
3678 | // tryParseVectorRegister - Try to parse a vector register name with |
3679 | // optional kind specifier. If it is a register specifier, eat the token |
3680 | // and return it. |
3681 | OperandMatchResultTy |
3682 | AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind, |
3683 | RegKind MatchKind) { |
3684 | const AsmToken &Tok = getTok(); |
3685 | |
3686 | if (Tok.isNot(AsmToken::Identifier)) |
3687 | return MatchOperand_NoMatch; |
3688 | |
3689 | StringRef Name = Tok.getString(); |
3690 | // If there is a kind specifier, it's separated from the register name by |
3691 | // a '.'. |
3692 | size_t Start = 0, Next = Name.find('.'); |
3693 | StringRef Head = Name.slice(Start, Next); |
3694 | unsigned RegNum = matchRegisterNameAlias(Head, MatchKind); |
3695 | |
3696 | if (RegNum) { |
3697 | if (Next != StringRef::npos) { |
3698 | Kind = Name.slice(Next, StringRef::npos); |
3699 | if (!isValidVectorKind(Kind, MatchKind)) { |
3700 | TokError("invalid vector kind qualifier"); |
3701 | return MatchOperand_ParseFail; |
3702 | } |
3703 | } |
3704 | Lex(); // Eat the register token. |
3705 | |
3706 | Reg = RegNum; |
3707 | return MatchOperand_Success; |
3708 | } |
3709 | |
3710 | return MatchOperand_NoMatch; |
3711 | } |
3712 | |
3713 | /// tryParseSVEPredicateVector - Parse a SVE predicate register operand. |
3714 | OperandMatchResultTy |
3715 | AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { |
3716 | // Check for a SVE predicate register specifier first. |
3717 | const SMLoc S = getLoc(); |
3718 | StringRef Kind; |
3719 | unsigned RegNum; |
3720 | auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); |
3721 | if (Res != MatchOperand_Success) |
3722 | return Res; |
3723 | |
3724 | const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector); |
3725 | if (!KindRes) |
3726 | return MatchOperand_NoMatch; |
3727 | |
3728 | unsigned ElementWidth = KindRes->second; |
3729 | Operands.push_back(AArch64Operand::CreateVectorReg( |
3730 | RegNum, RegKind::SVEPredicateVector, ElementWidth, S, |
3731 | getLoc(), getContext())); |
3732 | |
3733 | if (getLexer().is(AsmToken::LBrac)) { |
3734 | // Indexed predicate, there's no comma so try parse the next operand |
3735 | // immediately. |
3736 | if (parseOperand(Operands, false, false)) |
3737 | return MatchOperand_NoMatch; |
3738 | } |
3739 | |
3740 | // Not all predicates are followed by a '/m' or '/z'. |
3741 | if (getTok().isNot(AsmToken::Slash)) |
3742 | return MatchOperand_Success; |
3743 | |
3744 | // But when they do they shouldn't have an element type suffix. |
3745 | if (!Kind.empty()) { |
3746 | Error(S, "not expecting size suffix"); |
3747 | return MatchOperand_ParseFail; |
3748 | } |
3749 | |
3750 | // Add a literal slash as operand |
3751 | Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext())); |
3752 | |
3753 | Lex(); // Eat the slash. |
3754 | |
3755 | // Zeroing or merging? |
3756 | auto Pred = getTok().getString().lower(); |
3757 | if (Pred != "z" && Pred != "m") { |
3758 | Error(getLoc(), "expecting 'm' or 'z' predication"); |
3759 | return MatchOperand_ParseFail; |
3760 | } |
3761 | |
3762 | // Add zero/merge token. |
3763 | const char *ZM = Pred == "z" ? "z" : "m"; |
3764 | Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext())); |
3765 | |
3766 | Lex(); // Eat zero/merge token. |
3767 | return MatchOperand_Success; |
3768 | } |
3769 | |
3770 | /// parseRegister - Parse a register operand. |
3771 | bool AArch64AsmParser::parseRegister(OperandVector &Operands) { |
3772 | // Try for a Neon vector register. |
3773 | if (!tryParseNeonVectorRegister(Operands)) |
3774 | return false; |
3775 | |
3776 | // Otherwise try for a scalar register. |
3777 | if (tryParseGPROperand<false>(Operands) == MatchOperand_Success) |
3778 | return false; |
3779 | |
3780 | return true; |
3781 | } |
3782 | |
3783 | bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { |
3784 | bool HasELFModifier = false; |
3785 | AArch64MCExpr::VariantKind RefKind; |
3786 | |
3787 | if (parseOptionalToken(AsmToken::Colon)) { |
3788 | HasELFModifier = true; |
3789 | |
3790 | if (getTok().isNot(AsmToken::Identifier)) |
3791 | return TokError("expect relocation specifier in operand after ':'"); |
3792 | |
3793 | std::string LowerCase = getTok().getIdentifier().lower(); |
3794 | RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) |
3795 | .Case("lo12", AArch64MCExpr::VK_LO12) |
3796 | .Case("abs_g3", AArch64MCExpr::VK_ABS_G3) |
3797 | .Case("abs_g2", AArch64MCExpr::VK_ABS_G2) |
3798 | .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S) |
3799 | .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC) |
3800 | .Case("abs_g1", AArch64MCExpr::VK_ABS_G1) |
3801 | .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S) |
3802 | .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC) |
3803 | .Case("abs_g0", AArch64MCExpr::VK_ABS_G0) |
3804 | .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S) |
3805 | .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC) |
3806 | .Case("prel_g3", AArch64MCExpr::VK_PREL_G3) |
3807 | .Case("prel_g2", AArch64MCExpr::VK_PREL_G2) |
3808 | .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC) |
3809 | .Case("prel_g1", AArch64MCExpr::VK_PREL_G1) |
3810 | .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC) |
3811 | .Case("prel_g0", AArch64MCExpr::VK_PREL_G0) |
3812 | .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC) |
3813 | .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2) |
3814 | .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1) |
3815 | .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC) |
3816 | .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0) |
3817 | .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC) |
3818 | .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12) |
3819 | .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12) |
3820 | .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC) |
3821 | .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC) |
3822 | .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2) |
3823 | .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1) |
3824 | .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC) |
3825 | .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0) |
3826 | .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC) |
3827 | .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12) |
3828 | .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12) |
3829 | .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC) |
3830 | .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12) |
3831 | .Case("got", AArch64MCExpr::VK_GOT_PAGE) |
3832 | .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15) |
3833 | .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12) |
3834 | .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE) |
3835 | .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC) |
3836 | .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) |
3837 | .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) |
3838 | .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) |
3839 | .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12) |
3840 | .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12) |
3841 | .Default(AArch64MCExpr::VK_INVALID); |
3842 | |
3843 | if (RefKind == AArch64MCExpr::VK_INVALID) |
3844 | return TokError("expect relocation specifier in operand after ':'"); |
3845 | |
3846 | Lex(); // Eat identifier |
3847 | |
3848 | if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier")) |
3849 | return true; |
3850 | } |
3851 | |
3852 | if (getParser().parseExpression(ImmVal)) |
3853 | return true; |
3854 | |
3855 | if (HasELFModifier) |
3856 | ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext()); |
3857 | |
3858 | return false; |
3859 | } |
3860 | |
3861 | OperandMatchResultTy |
3862 | AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) { |
3863 | if (getTok().isNot(AsmToken::LCurly)) |
3864 | return MatchOperand_NoMatch; |
3865 | |
3866 | auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) { |
3867 | StringRef Name = getTok().getString(); |
3868 | size_t DotPosition = Name.find('.'); |
3869 | if (DotPosition == StringRef::npos) |
3870 | return MatchOperand_NoMatch; |
3871 | |
3872 | unsigned RegNum = matchMatrixTileListRegName(Name); |
3873 | if (!RegNum) |
3874 | return MatchOperand_NoMatch; |
3875 | |
3876 | StringRef Tail = Name.drop_front(DotPosition); |
3877 | const Optional<std::pair<int, int>> &KindRes = |
3878 | parseVectorKind(Tail, RegKind::Matrix); |
3879 | if (!KindRes) { |
3880 | TokError("Expected the register to be followed by element width suffix"); |
3881 | return MatchOperand_ParseFail; |
3882 | } |
3883 | ElementWidth = KindRes->second; |
3884 | Reg = RegNum; |
3885 | Lex(); // Eat the register. |
3886 | return MatchOperand_Success; |
3887 | }; |
3888 | |
3889 | SMLoc S = getLoc(); |
3890 | auto LCurly = getTok(); |
3891 | Lex(); // Eat left bracket token. |
3892 | |
3893 | // Empty matrix list |
3894 | if (parseOptionalToken(AsmToken::RCurly)) { |
3895 | Operands.push_back(AArch64Operand::CreateMatrixTileList( |
3896 | /*RegMask=*/0, S, getLoc(), getContext())); |
3897 | return MatchOperand_Success; |
3898 | } |
3899 | |
3900 | // Try parse {za} alias early |
3901 | if (getTok().getString().equals_insensitive("za")) { |
3902 | Lex(); // Eat 'za' |
3903 | |
3904 | if (parseToken(AsmToken::RCurly, "'}' expected")) |
3905 | return MatchOperand_ParseFail; |
3906 | |
3907 | Operands.push_back(AArch64Operand::CreateMatrixTileList( |
3908 | /*RegMask=*/0xFF, S, getLoc(), getContext())); |
3909 | return MatchOperand_Success; |
3910 | } |
3911 | |
3912 | SMLoc TileLoc = getLoc(); |
3913 | |
3914 | unsigned FirstReg, ElementWidth; |
3915 | auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth); |
3916 | if (ParseRes != MatchOperand_Success) { |
3917 | getLexer().UnLex(LCurly); |
3918 | return ParseRes; |
3919 | } |
3920 | |
3921 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
3922 | |
3923 | unsigned PrevReg = FirstReg; |
3924 | |
3925 | SmallSet<unsigned, 8> DRegs; |
3926 | AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth); |
3927 | |
3928 | SmallSet<unsigned, 8> SeenRegs; |
3929 | SeenRegs.insert(FirstReg); |
3930 | |
3931 | while (parseOptionalToken(AsmToken::Comma)) { |
3932 | TileLoc = getLoc(); |
3933 | unsigned Reg, NextElementWidth; |
3934 | ParseRes = ParseMatrixTile(Reg, NextElementWidth); |
3935 | if (ParseRes != MatchOperand_Success) |
3936 | return ParseRes; |
3937 | |
3938 | // Element size must match on all regs in the list. |
3939 | if (ElementWidth != NextElementWidth) { |
3940 | Error(TileLoc, "mismatched register size suffix"); |
3941 | return MatchOperand_ParseFail; |
3942 | } |
3943 | |
3944 | if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg))) |
3945 | Warning(TileLoc, "tile list not in ascending order"); |
3946 | |
3947 | if (SeenRegs.contains(Reg)) |
3948 | Warning(TileLoc, "duplicate tile in list"); |
3949 | else { |
3950 | SeenRegs.insert(Reg); |
3951 | AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth); |
3952 | } |
3953 | |
3954 | PrevReg = Reg; |
3955 | } |
3956 | |
3957 | if (parseToken(AsmToken::RCurly, "'}' expected")) |
3958 | return MatchOperand_ParseFail; |
3959 | |
3960 | unsigned RegMask = 0; |
3961 | for (auto Reg : DRegs) |
3962 | RegMask |= 0x1 << (RI->getEncodingValue(Reg) - |
3963 | RI->getEncodingValue(AArch64::ZAD0)); |
3964 | Operands.push_back( |
3965 | AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext())); |
3966 | |
3967 | return MatchOperand_Success; |
3968 | } |
3969 | |
3970 | template <RegKind VectorKind> |
3971 | OperandMatchResultTy |
3972 | AArch64AsmParser::tryParseVectorList(OperandVector &Operands, |
3973 | bool ExpectMatch) { |
3974 | MCAsmParser &Parser = getParser(); |
3975 | if (!getTok().is(AsmToken::LCurly)) |
3976 | return MatchOperand_NoMatch; |
3977 | |
3978 | // Wrapper around parse function |
3979 | auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc, |
3980 | bool NoMatchIsError) { |
3981 | auto RegTok = getTok(); |
3982 | auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind); |
3983 | if (ParseRes == MatchOperand_Success) { |
3984 | if (parseVectorKind(Kind, VectorKind)) |
3985 | return ParseRes; |
3986 | llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3986 ); |
3987 | } |
3988 | |
3989 | if (RegTok.isNot(AsmToken::Identifier) || |
3990 | ParseRes == MatchOperand_ParseFail || |
3991 | (ParseRes == MatchOperand_NoMatch && NoMatchIsError && |
3992 | !RegTok.getString().startswith_insensitive("za"))) { |
3993 | Error(Loc, "vector register expected"); |
3994 | return MatchOperand_ParseFail; |
3995 | } |
3996 | |
3997 | return MatchOperand_NoMatch; |
3998 | }; |
3999 | |
4000 | SMLoc S = getLoc(); |
4001 | auto LCurly = getTok(); |
4002 | Lex(); // Eat left bracket token. |
4003 | |
4004 | StringRef Kind; |
4005 | unsigned FirstReg; |
4006 | auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); |
4007 | |
4008 | // Put back the original left bracket if there was no match, so that |
4009 | // different types of list-operands can be matched (e.g. SVE, Neon). |
4010 | if (ParseRes == MatchOperand_NoMatch) |
4011 | Parser.getLexer().UnLex(LCurly); |
4012 | |
4013 | if (ParseRes != MatchOperand_Success) |
4014 | return ParseRes; |
4015 | |
4016 | int64_t PrevReg = FirstReg; |
4017 | unsigned Count = 1; |
4018 | |
4019 | if (parseOptionalToken(AsmToken::Minus)) { |
4020 | SMLoc Loc = getLoc(); |
4021 | StringRef NextKind; |
4022 | |
4023 | unsigned Reg; |
4024 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); |
4025 | if (ParseRes != MatchOperand_Success) |
4026 | return ParseRes; |
4027 | |
4028 | // Any Kind suffices must match on all regs in the list. |
4029 | if (Kind != NextKind) { |
4030 | Error(Loc, "mismatched register size suffix"); |
4031 | return MatchOperand_ParseFail; |
4032 | } |
4033 | |
4034 | unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg); |
4035 | |
4036 | if (Space == 0 || Space > 3) { |
4037 | Error(Loc, "invalid number of vectors"); |
4038 | return MatchOperand_ParseFail; |
4039 | } |
4040 | |
4041 | Count += Space; |
4042 | } |
4043 | else { |
4044 | while (parseOptionalToken(AsmToken::Comma)) { |
4045 | SMLoc Loc = getLoc(); |
4046 | StringRef NextKind; |
4047 | unsigned Reg; |
4048 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); |
4049 | if (ParseRes != MatchOperand_Success) |
4050 | return ParseRes; |
4051 | |
4052 | // Any Kind suffices must match on all regs in the list. |
4053 | if (Kind != NextKind) { |
4054 | Error(Loc, "mismatched register size suffix"); |
4055 | return MatchOperand_ParseFail; |
4056 | } |
4057 | |
4058 | // Registers must be incremental (with wraparound at 31) |
4059 | if (getContext().getRegisterInfo()->getEncodingValue(Reg) != |
4060 | (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) { |
4061 | Error(Loc, "registers must be sequential"); |
4062 | return MatchOperand_ParseFail; |
4063 | } |
4064 | |
4065 | PrevReg = Reg; |
4066 | ++Count; |
4067 | } |
4068 | } |
4069 | |
4070 | if (parseToken(AsmToken::RCurly, "'}' expected")) |
4071 | return MatchOperand_ParseFail; |
4072 | |
4073 | if (Count > 4) { |
4074 | Error(S, "invalid number of vectors"); |
4075 | return MatchOperand_ParseFail; |
4076 | } |
4077 | |
4078 | unsigned NumElements = 0; |
4079 | unsigned ElementWidth = 0; |
4080 | if (!Kind.empty()) { |
4081 | if (const auto &VK = parseVectorKind(Kind, VectorKind)) |
4082 | std::tie(NumElements, ElementWidth) = *VK; |
4083 | } |
4084 | |
4085 | Operands.push_back(AArch64Operand::CreateVectorList( |
4086 | FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(), |
4087 | getContext())); |
4088 | |
4089 | return MatchOperand_Success; |
4090 | } |
4091 | |
4092 | /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. |
4093 | bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { |
4094 | auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true); |
4095 | if (ParseRes != MatchOperand_Success) |
4096 | return true; |
4097 | |
4098 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; |
4099 | } |
4100 | |
4101 | OperandMatchResultTy |
4102 | AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { |
4103 | SMLoc StartLoc = getLoc(); |
4104 | |
4105 | unsigned RegNum; |
4106 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); |
4107 | if (Res != MatchOperand_Success) |
4108 | return Res; |
4109 | |
4110 | if (!parseOptionalToken(AsmToken::Comma)) { |
4111 | Operands.push_back(AArch64Operand::CreateReg( |
4112 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); |
4113 | return MatchOperand_Success; |
4114 | } |
4115 | |
4116 | parseOptionalToken(AsmToken::Hash); |
4117 | |
4118 | if (getTok().isNot(AsmToken::Integer)) { |
4119 | Error(getLoc(), "index must be absent or #0"); |
4120 | return MatchOperand_ParseFail; |
4121 | } |
4122 | |
4123 | const MCExpr *ImmVal; |
4124 | if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || |
4125 | cast<MCConstantExpr>(ImmVal)->getValue() != 0) { |
4126 | Error(getLoc(), "index must be absent or #0"); |
4127 | return MatchOperand_ParseFail; |
4128 | } |
4129 | |
4130 | Operands.push_back(AArch64Operand::CreateReg( |
4131 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); |
4132 | return MatchOperand_Success; |
4133 | } |
4134 | |
4135 | template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> |
4136 | OperandMatchResultTy |
4137 | AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { |
4138 | SMLoc StartLoc = getLoc(); |
4139 | |
4140 | unsigned RegNum; |
4141 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); |
4142 | if (Res != MatchOperand_Success) |
4143 | return Res; |
4144 | |
4145 | // No shift/extend is the default. |
4146 | if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) { |
4147 | Operands.push_back(AArch64Operand::CreateReg( |
4148 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); |
4149 | return MatchOperand_Success; |
4150 | } |
4151 | |
4152 | // Eat the comma |
4153 | Lex(); |
4154 | |
4155 | // Match the shift |
4156 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; |
4157 | Res = tryParseOptionalShiftExtend(ExtOpnd); |
4158 | if (Res != MatchOperand_Success) |
4159 | return Res; |
4160 | |
4161 | auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); |
4162 | Operands.push_back(AArch64Operand::CreateReg( |
4163 | RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, |
4164 | Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), |
4165 | Ext->hasShiftExtendAmount())); |
4166 | |
4167 | return MatchOperand_Success; |
4168 | } |
4169 | |
4170 | bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { |
4171 | MCAsmParser &Parser = getParser(); |
4172 | |
4173 | // Some SVE instructions have a decoration after the immediate, i.e. |
4174 | // "mul vl". We parse them here and add tokens, which must be present in the |
4175 | // asm string in the tablegen instruction. |
4176 | bool NextIsVL = |
4177 | Parser.getLexer().peekTok().getString().equals_insensitive("vl"); |
4178 | bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash); |
4179 | if (!getTok().getString().equals_insensitive("mul") || |
4180 | !(NextIsVL || NextIsHash)) |
4181 | return true; |
4182 | |
4183 | Operands.push_back( |
4184 | AArch64Operand::CreateToken("mul", getLoc(), getContext())); |
4185 | Lex(); // Eat the "mul" |
4186 | |
4187 | if (NextIsVL) { |
4188 | Operands.push_back( |
4189 | AArch64Operand::CreateToken("vl", getLoc(), getContext())); |
4190 | Lex(); // Eat the "vl" |
4191 | return false; |
4192 | } |
4193 | |
4194 | if (NextIsHash) { |
4195 | Lex(); // Eat the # |
4196 | SMLoc S = getLoc(); |
4197 | |
4198 | // Parse immediate operand. |
4199 | const MCExpr *ImmVal; |
4200 | if (!Parser.parseExpression(ImmVal)) |
4201 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) { |
4202 | Operands.push_back(AArch64Operand::CreateImm( |
4203 | MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(), |
4204 | getContext())); |
4205 | return MatchOperand_Success; |
4206 | } |
4207 | } |
4208 | |
4209 | return Error(getLoc(), "expected 'vl' or '#<imm>'"); |
4210 | } |
4211 | |
4212 | bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) { |
4213 | auto Tok = getTok(); |
4214 | if (Tok.isNot(AsmToken::Identifier)) |
4215 | return true; |
4216 | |
4217 | auto Keyword = Tok.getString(); |
4218 | Keyword = StringSwitch<StringRef>(Keyword.lower()) |
4219 | .Case("sm", "sm") |
4220 | .Case("za", "za") |
4221 | .Default(Keyword); |
4222 | Operands.push_back( |
4223 | AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext())); |
4224 | |
4225 | Lex(); |
4226 | return false; |
4227 | } |
4228 | |
4229 | /// parseOperand - Parse a arm instruction operand. For now this parses the |
4230 | /// operand regardless of the mnemonic. |
4231 | bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, |
4232 | bool invertCondCode) { |
4233 | MCAsmParser &Parser = getParser(); |
4234 | |
4235 | OperandMatchResultTy ResTy = |
4236 | MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true); |
4237 | |
4238 | // Check if the current operand has a custom associated parser, if so, try to |
4239 | // custom parse the operand, or fallback to the general approach. |
4240 | if (ResTy == MatchOperand_Success) |
4241 | return false; |
4242 | // If there wasn't a custom match, try the generic matcher below. Otherwise, |
4243 | // there was a match, but an error occurred, in which case, just return that |
4244 | // the operand parsing failed. |
4245 | if (ResTy == MatchOperand_ParseFail) |
4246 | return true; |
4247 | |
4248 | // Nothing custom, so do general case parsing. |
4249 | SMLoc S, E; |
4250 | switch (getLexer().getKind()) { |
4251 | default: { |
4252 | SMLoc S = getLoc(); |
4253 | const MCExpr *Expr; |
4254 | if (parseSymbolicImmVal(Expr)) |
4255 | return Error(S, "invalid operand"); |
4256 | |
4257 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
4258 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); |
4259 | return false; |
4260 | } |
4261 | case AsmToken::LBrac: { |
4262 | Operands.push_back( |
4263 | AArch64Operand::CreateToken("[", getLoc(), getContext())); |
4264 | Lex(); // Eat '[' |
4265 | |
4266 | // There's no comma after a '[', so we can parse the next operand |
4267 | // immediately. |
4268 | return parseOperand(Operands, false, false); |
4269 | } |
4270 | case AsmToken::LCurly: { |
4271 | if (!parseNeonVectorList(Operands)) |
4272 | return false; |
4273 | |
4274 | Operands.push_back( |
4275 | AArch64Operand::CreateToken("{", getLoc(), getContext())); |
4276 | Lex(); // Eat '{' |
4277 | |
4278 | // There's no comma after a '{', so we can parse the next operand |
4279 | // immediately. |
4280 | return parseOperand(Operands, false, false); |
4281 | } |
4282 | case AsmToken::Identifier: { |
4283 | // If we're expecting a Condition Code operand, then just parse that. |
4284 | if (isCondCode) |
4285 | return parseCondCode(Operands, invertCondCode); |
4286 | |
4287 | // If it's a register name, parse it. |
4288 | if (!parseRegister(Operands)) |
4289 | return false; |
4290 | |
4291 | // See if this is a "mul vl" decoration or "mul #<int>" operand used |
4292 | // by SVE instructions. |
4293 | if (!parseOptionalMulOperand(Operands)) |
4294 | return false; |
4295 | |
4296 | // If this is an "smstart" or "smstop" instruction, parse its special |
4297 | // keyword operand as an identifier. |
4298 | if (Mnemonic == "smstart" || Mnemonic == "smstop") |
4299 | return parseKeywordOperand(Operands); |
4300 | |
4301 | // This could be an optional "shift" or "extend" operand. |
4302 | OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); |
4303 | // We can only continue if no tokens were eaten. |
4304 | if (GotShift != MatchOperand_NoMatch) |
4305 | return GotShift; |
4306 | |
4307 | // If this is a two-word mnemonic, parse its special keyword |
4308 | // operand as an identifier. |
4309 | if (Mnemonic == "brb") |
4310 | return parseKeywordOperand(Operands); |
4311 | |
4312 | // This was not a register so parse other operands that start with an |
4313 | // identifier (like labels) as expressions and create them as immediates. |
4314 | const MCExpr *IdVal; |
4315 | S = getLoc(); |
4316 | if (getParser().parseExpression(IdVal)) |
4317 | return true; |
4318 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
4319 | Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext())); |
4320 | return false; |
4321 | } |
4322 | case AsmToken::Integer: |
4323 | case AsmToken::Real: |
4324 | case AsmToken::Hash: { |
4325 | // #42 -> immediate. |
4326 | S = getLoc(); |
4327 | |
4328 | parseOptionalToken(AsmToken::Hash); |
4329 | |
4330 | // Parse a negative sign |
4331 | bool isNegative = false; |
4332 | if (getTok().is(AsmToken::Minus)) { |
4333 | isNegative = true; |
4334 | // We need to consume this token only when we have a Real, otherwise |
4335 | // we let parseSymbolicImmVal take care of it |
4336 | if (Parser.getLexer().peekTok().is(AsmToken::Real)) |
4337 | Lex(); |
4338 | } |
4339 | |
4340 | // The only Real that should come through here is a literal #0.0 for |
4341 | // the fcmp[e] r, #0.0 instructions. They expect raw token operands, |
4342 | // so convert the value. |
4343 | const AsmToken &Tok = getTok(); |
4344 | if (Tok.is(AsmToken::Real)) { |
4345 | APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); |
4346 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); |
4347 | if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && |
4348 | Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && |
4349 | Mnemonic != "fcmlt" && Mnemonic != "fcmne") |
4350 | return TokError("unexpected floating point literal"); |
4351 | else if (IntVal != 0 || isNegative) |
4352 | return TokError("expected floating-point constant #0.0"); |
4353 | Lex(); // Eat the token. |
4354 | |
4355 | Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext())); |
4356 | Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext())); |
4357 | return false; |
4358 | } |
4359 | |
4360 | const MCExpr *ImmVal; |
4361 | if (parseSymbolicImmVal(ImmVal)) |
4362 | return true; |
4363 | |
4364 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); |
4365 | Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext())); |
4366 | return false; |
4367 | } |
4368 | case AsmToken::Equal: { |
4369 | SMLoc Loc = getLoc(); |
4370 | if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) |
4371 | return TokError("unexpected token in operand"); |
4372 | Lex(); // Eat '=' |
4373 | const MCExpr *SubExprVal; |
4374 | if (getParser().parseExpression(SubExprVal)) |
4375 | return true; |
4376 | |
4377 | if (Operands.size() < 2 || |
4378 | !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) |
4379 | return Error(Loc, "Only valid when first operand is register"); |
4380 | |
4381 | bool IsXReg = |
4382 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
4383 | Operands[1]->getReg()); |
4384 | |
4385 | MCContext& Ctx = getContext(); |
4386 | E = SMLoc::getFromPointer(Loc.getPointer() - 1); |
4387 | // If the op is an imm and can be fit into a mov, then replace ldr with mov. |
4388 | if (isa<MCConstantExpr>(SubExprVal)) { |
4389 | uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue(); |
4390 | uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; |
4391 | while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) { |
4392 | ShiftAmt += 16; |
4393 | Imm >>= 16; |
4394 | } |
4395 | if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { |
4396 | Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx); |
4397 | Operands.push_back(AArch64Operand::CreateImm( |
4398 | MCConstantExpr::create(Imm, Ctx), S, E, Ctx)); |
4399 | if (ShiftAmt) |
4400 | Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL, |
4401 | ShiftAmt, true, S, E, Ctx)); |
4402 | return false; |
4403 | } |
4404 | APInt Simm = APInt(64, Imm << ShiftAmt); |
4405 | // check if the immediate is an unsigned or signed 32-bit int for W regs |
4406 | if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32))) |
4407 | return Error(Loc, "Immediate too large for register"); |
4408 | } |
4409 | // If it is a label or an imm that cannot fit in a movz, put it into CP. |
4410 | const MCExpr *CPLoc = |
4411 | getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc); |
4412 | Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx)); |
4413 | return false; |
4414 | } |
4415 | } |
4416 | } |
4417 | |
4418 | bool AArch64AsmParser::parseImmExpr(int64_t &Out) { |
4419 | const MCExpr *Expr = nullptr; |
4420 | SMLoc L = getLoc(); |
4421 | if (check(getParser().parseExpression(Expr), L, "expected expression")) |
4422 | return true; |
4423 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); |
4424 | if (check(!Value, L, "expected constant expression")) |
4425 | return true; |
4426 | Out = Value->getValue(); |
4427 | return false; |
4428 | } |
4429 | |
4430 | bool AArch64AsmParser::parseComma() { |
4431 | if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma")) |
4432 | return true; |
4433 | // Eat the comma |
4434 | Lex(); |
4435 | return false; |
4436 | } |
4437 | |
4438 | bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base, |
4439 | unsigned First, unsigned Last) { |
4440 | unsigned Reg; |
4441 | SMLoc Start, End; |
4442 | if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register")) |
4443 | return true; |
4444 | |
4445 | // Special handling for FP and LR; they aren't linearly after x28 in |
4446 | // the registers enum. |
4447 | unsigned RangeEnd = Last; |
4448 | if (Base == AArch64::X0) { |
4449 | if (Last == AArch64::FP) { |
4450 | RangeEnd = AArch64::X28; |
4451 | if (Reg == AArch64::FP) { |
4452 | Out = 29; |
4453 | return false; |
4454 | } |
4455 | } |
4456 | if (Last == AArch64::LR) { |
4457 | RangeEnd = AArch64::X28; |
4458 | if (Reg == AArch64::FP) { |
4459 | Out = 29; |
4460 | return false; |
4461 | } else if (Reg == AArch64::LR) { |
4462 | Out = 30; |
4463 | return false; |
4464 | } |
4465 | } |
4466 | } |
4467 | |
4468 | if (check(Reg < First || Reg > RangeEnd, Start, |
4469 | Twine("expected register in range ") + |
4470 | AArch64InstPrinter::getRegisterName(First) + " to " + |
4471 | AArch64InstPrinter::getRegisterName(Last))) |
4472 | return true; |
4473 | Out = Reg - Base; |
4474 | return false; |
4475 | } |
4476 | |
4477 | bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1, |
4478 | const MCParsedAsmOperand &Op2) const { |
4479 | auto &AOp1 = static_cast<const AArch64Operand&>(Op1); |
4480 | auto &AOp2 = static_cast<const AArch64Operand&>(Op2); |
4481 | if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && |
4482 | AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) |
4483 | return MCTargetAsmParser::regsEqual(Op1, Op2); |
4484 | |
4485 | assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2 .isScalarReg() && "Testing equality of non-scalar registers not supported" ) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4486 , __extension__ __PRETTY_FUNCTION__)) |
4486 | "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2 .isScalarReg() && "Testing equality of non-scalar registers not supported" ) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4486 , __extension__ __PRETTY_FUNCTION__)); |
4487 | |
4488 | // Check if a registers match their sub/super register classes. |
4489 | if (AOp1.getRegEqualityTy() == EqualsSuperReg) |
4490 | return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); |
4491 | if (AOp1.getRegEqualityTy() == EqualsSubReg) |
4492 | return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); |
4493 | if (AOp2.getRegEqualityTy() == EqualsSuperReg) |
4494 | return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); |
4495 | if (AOp2.getRegEqualityTy() == EqualsSubReg) |
4496 | return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); |
4497 | |
4498 | return false; |
4499 | } |
4500 | |
4501 | /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its |
4502 | /// operands. |
4503 | bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, |
4504 | StringRef Name, SMLoc NameLoc, |
4505 | OperandVector &Operands) { |
4506 | Name = StringSwitch<StringRef>(Name.lower()) |
4507 | .Case("beq", "b.eq") |
4508 | .Case("bne", "b.ne") |
4509 | .Case("bhs", "b.hs") |
4510 | .Case("bcs", "b.cs") |
4511 | .Case("blo", "b.lo") |
4512 | .Case("bcc", "b.cc") |
4513 | .Case("bmi", "b.mi") |
4514 | .Case("bpl", "b.pl") |
4515 | .Case("bvs", "b.vs") |
4516 | .Case("bvc", "b.vc") |
4517 | .Case("bhi", "b.hi") |
4518 | .Case("bls", "b.ls") |
4519 | .Case("bge", "b.ge") |
4520 | .Case("blt", "b.lt") |
4521 | .Case("bgt", "b.gt") |
4522 | .Case("ble", "b.le") |
4523 | .Case("bal", "b.al") |
4524 | .Case("bnv", "b.nv") |
4525 | .Default(Name); |
4526 | |
4527 | // First check for the AArch64-specific .req directive. |
4528 | if (getTok().is(AsmToken::Identifier) && |
4529 | getTok().getIdentifier().lower() == ".req") { |
4530 | parseDirectiveReq(Name, NameLoc); |
4531 | // We always return 'error' for this, as we're done with this |
4532 | // statement and don't need to match the 'instruction." |
4533 | return true; |
4534 | } |
4535 | |
4536 | // Create the leading tokens for the mnemonic, split by '.' characters. |
4537 | size_t Start = 0, Next = Name.find('.'); |
4538 | StringRef Head = Name.slice(Start, Next); |
4539 | |
4540 | // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for |
4541 | // the SYS instruction. |
4542 | if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" || |
4543 | Head == "cfp" || Head == "dvp" || Head == "cpp") |
4544 | return parseSysAlias(Head, NameLoc, Operands); |
4545 | |
4546 | Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext())); |
4547 | Mnemonic = Head; |
4548 | |
4549 | // Handle condition codes for a branch mnemonic |
4550 | if ((Head == "b" || Head == "bc") && Next != StringRef::npos) { |
4551 | Start = Next; |
4552 | Next = Name.find('.', Start + 1); |
4553 | Head = Name.slice(Start + 1, Next); |
4554 | |
4555 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + |
4556 | (Head.data() - Name.data())); |
4557 | std::string Suggestion; |
4558 | AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion); |
4559 | if (CC == AArch64CC::Invalid) { |
4560 | std::string Msg = "invalid condition code"; |
4561 | if (!Suggestion.empty()) |
4562 | Msg += ", did you mean " + Suggestion + "?"; |
4563 | return Error(SuffixLoc, Msg); |
4564 | } |
4565 | Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(), |
4566 | /*IsSuffix=*/true)); |
4567 | Operands.push_back( |
4568 | AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext())); |
4569 | } |
4570 | |
4571 | // Add the remaining tokens in the mnemonic. |
4572 | while (Next != StringRef::npos) { |
4573 | Start = Next; |
4574 | Next = Name.find('.', Start + 1); |
4575 | Head = Name.slice(Start, Next); |
4576 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + |
4577 | (Head.data() - Name.data()) + 1); |
4578 | Operands.push_back(AArch64Operand::CreateToken( |
4579 | Head, SuffixLoc, getContext(), /*IsSuffix=*/true)); |
4580 | } |
4581 | |
4582 | // Conditional compare instructions have a Condition Code operand, which needs |
4583 | // to be parsed and an immediate operand created. |
4584 | bool condCodeFourthOperand = |
4585 | (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || |
4586 | Head == "fccmpe" || Head == "fcsel" || Head == "csel" || |
4587 | Head == "csinc" || Head == "csinv" || Head == "csneg"); |
4588 | |
4589 | // These instructions are aliases to some of the conditional select |
4590 | // instructions. However, the condition code is inverted in the aliased |
4591 | // instruction. |
4592 | // |
4593 | // FIXME: Is this the correct way to handle these? Or should the parser |
4594 | // generate the aliased instructions directly? |
4595 | bool condCodeSecondOperand = (Head == "cset" || Head == "csetm"); |
4596 | bool condCodeThirdOperand = |
4597 | (Head == "cinc" || Head == "cinv" || Head == "cneg"); |
4598 | |
4599 | // Read the remaining operands. |
4600 | if (getLexer().isNot(AsmToken::EndOfStatement)) { |
4601 | |
4602 | unsigned N = 1; |
4603 | do { |
4604 | // Parse and remember the operand. |
4605 | if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) || |
4606 | (N == 3 && condCodeThirdOperand) || |
4607 | (N == 2 && condCodeSecondOperand), |
4608 | condCodeSecondOperand || condCodeThirdOperand)) { |
4609 | return true; |
4610 | } |
4611 | |
4612 | // After successfully parsing some operands there are three special cases |
4613 | // to consider (i.e. notional operands not separated by commas). Two are |
4614 | // due to memory specifiers: |
4615 | // + An RBrac will end an address for load/store/prefetch |
4616 | // + An '!' will indicate a pre-indexed operation. |
4617 | // |
4618 | // And a further case is '}', which ends a group of tokens specifying the |
4619 | // SME accumulator array 'ZA' or tile vector, i.e. |
4620 | // |
4621 | // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }' |
4622 | // |
4623 | // It's someone else's responsibility to make sure these tokens are sane |
4624 | // in the given context! |
4625 | |
4626 | if (parseOptionalToken(AsmToken::RBrac)) |
4627 | Operands.push_back( |
4628 | AArch64Operand::CreateToken("]", getLoc(), getContext())); |
4629 | if (parseOptionalToken(AsmToken::Exclaim)) |
4630 | Operands.push_back( |
4631 | AArch64Operand::CreateToken("!", getLoc(), getContext())); |
4632 | if (parseOptionalToken(AsmToken::RCurly)) |
4633 | Operands.push_back( |
4634 | AArch64Operand::CreateToken("}", getLoc(), getContext())); |
4635 | |
4636 | ++N; |
4637 | } while (parseOptionalToken(AsmToken::Comma)); |
4638 | } |
4639 | |
4640 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) |
4641 | return true; |
4642 | |
4643 | return false; |
4644 | } |
4645 | |
4646 | static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) { |
4647 | assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4647 , __extension__ __PRETTY_FUNCTION__)); |
4648 | return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) || |
4649 | (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) || |
4650 | (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) || |
4651 | (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) || |
4652 | (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) || |
4653 | (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0)); |
4654 | } |
4655 | |
4656 | // FIXME: This entire function is a giant hack to provide us with decent |
4657 | // operand range validation/diagnostics until TableGen/MC can be extended |
4658 | // to support autogeneration of this kind of validation. |
4659 | bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc, |
4660 | SmallVectorImpl<SMLoc> &Loc) { |
4661 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); |
4662 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); |
4663 | |
4664 | // A prefix only applies to the instruction following it. Here we extract |
4665 | // prefix information for the next instruction before validating the current |
4666 | // one so that in the case of failure we don't erronously continue using the |
4667 | // current prefix. |
4668 | PrefixInfo Prefix = NextPrefix; |
4669 | NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags); |
4670 | |
4671 | // Before validating the instruction in isolation we run through the rules |
4672 | // applicable when it follows a prefix instruction. |
4673 | // NOTE: brk & hlt can be prefixed but require no additional validation. |
4674 | if (Prefix.isActive() && |
4675 | (Inst.getOpcode() != AArch64::BRK) && |
4676 | (Inst.getOpcode() != AArch64::HLT)) { |
4677 | |
4678 | // Prefixed intructions must have a destructive operand. |
4679 | if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) == |
4680 | AArch64::NotDestructive) |
4681 | return Error(IDLoc, "instruction is unpredictable when following a" |
4682 | " movprfx, suggest replacing movprfx with mov"); |
4683 | |
4684 | // Destination operands must match. |
4685 | if (Inst.getOperand(0).getReg() != Prefix.getDstReg()) |
4686 | return Error(Loc[0], "instruction is unpredictable when following a" |
4687 | " movprfx writing to a different destination"); |
4688 | |
4689 | // Destination operand must not be used in any other location. |
4690 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) { |
4691 | if (Inst.getOperand(i).isReg() && |
4692 | (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) && |
4693 | isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg())) |
4694 | return Error(Loc[0], "instruction is unpredictable when following a" |
4695 | " movprfx and destination also used as non-destructive" |
4696 | " source"); |
4697 | } |
4698 | |
4699 | auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID]; |
4700 | if (Prefix.isPredicated()) { |
4701 | int PgIdx = -1; |
4702 | |
4703 | // Find the instructions general predicate. |
4704 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) |
4705 | if (Inst.getOperand(i).isReg() && |
4706 | PPRRegClass.contains(Inst.getOperand(i).getReg())) { |
4707 | PgIdx = i; |
4708 | break; |
4709 | } |
4710 | |
4711 | // Instruction must be predicated if the movprfx is predicated. |
4712 | if (PgIdx == -1 || |
4713 | (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone) |
4714 | return Error(IDLoc, "instruction is unpredictable when following a" |
4715 | " predicated movprfx, suggest using unpredicated movprfx"); |
4716 | |
4717 | // Instruction must use same general predicate as the movprfx. |
4718 | if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg()) |
4719 | return Error(IDLoc, "instruction is unpredictable when following a" |
4720 | " predicated movprfx using a different general predicate"); |
4721 | |
4722 | // Instruction element type must match the movprfx. |
4723 | if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize()) |
4724 | return Error(IDLoc, "instruction is unpredictable when following a" |
4725 | " predicated movprfx with a different element size"); |
4726 | } |
4727 | } |
4728 | |
4729 | // Check for indexed addressing modes w/ the base register being the |
4730 | // same as a destination/source register or pair load where |
4731 | // the Rt == Rt2. All of those are undefined behaviour. |
4732 | switch (Inst.getOpcode()) { |
4733 | case AArch64::LDPSWpre: |
4734 | case AArch64::LDPWpost: |
4735 | case AArch64::LDPWpre: |
4736 | case AArch64::LDPXpost: |
4737 | case AArch64::LDPXpre: { |
4738 | unsigned Rt = Inst.getOperand(1).getReg(); |
4739 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
4740 | unsigned Rn = Inst.getOperand(3).getReg(); |
4741 | if (RI->isSubRegisterEq(Rn, Rt)) |
4742 | return Error(Loc[0], "unpredictable LDP instruction, writeback base " |
4743 | "is also a destination"); |
4744 | if (RI->isSubRegisterEq(Rn, Rt2)) |
4745 | return Error(Loc[1], "unpredictable LDP instruction, writeback base " |
4746 | "is also a destination"); |
4747 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; |
4748 | } |
4749 | case AArch64::LDPDi: |
4750 | case AArch64::LDPQi: |
4751 | case AArch64::LDPSi: |
4752 | case AArch64::LDPSWi: |
4753 | case AArch64::LDPWi: |
4754 | case AArch64::LDPXi: { |
4755 | unsigned Rt = Inst.getOperand(0).getReg(); |
4756 | unsigned Rt2 = Inst.getOperand(1).getReg(); |
4757 | if (Rt == Rt2) |
4758 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); |
4759 | break; |
4760 | } |
4761 | case AArch64::LDPDpost: |
4762 | case AArch64::LDPDpre: |
4763 | case AArch64::LDPQpost: |
4764 | case AArch64::LDPQpre: |
4765 | case AArch64::LDPSpost: |
4766 | case AArch64::LDPSpre: |
4767 | case AArch64::LDPSWpost: { |
4768 | unsigned Rt = Inst.getOperand(1).getReg(); |
4769 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
4770 | if (Rt == Rt2) |
4771 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); |
4772 | break; |
4773 | } |
4774 | case AArch64::STPDpost: |
4775 | case AArch64::STPDpre: |
4776 | case AArch64::STPQpost: |
4777 | case AArch64::STPQpre: |
4778 | case AArch64::STPSpost: |
4779 | case AArch64::STPSpre: |
4780 | case AArch64::STPWpost: |
4781 | case AArch64::STPWpre: |
4782 | case AArch64::STPXpost: |
4783 | case AArch64::STPXpre: { |
4784 | unsigned Rt = Inst.getOperand(1).getReg(); |
4785 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
4786 | unsigned Rn = Inst.getOperand(3).getReg(); |
4787 | if (RI->isSubRegisterEq(Rn, Rt)) |
4788 | return Error(Loc[0], "unpredictable STP instruction, writeback base " |
4789 | "is also a source"); |
4790 | if (RI->isSubRegisterEq(Rn, Rt2)) |
4791 | return Error(Loc[1], "unpredictable STP instruction, writeback base " |
4792 | "is also a source"); |
4793 | break; |
4794 | } |
4795 | case AArch64::LDRBBpre: |
4796 | case AArch64::LDRBpre: |
4797 | case AArch64::LDRHHpre: |
4798 | case AArch64::LDRHpre: |
4799 | case AArch64::LDRSBWpre: |
4800 | case AArch64::LDRSBXpre: |
4801 | case AArch64::LDRSHWpre: |
4802 | case AArch64::LDRSHXpre: |
4803 | case AArch64::LDRSWpre: |
4804 | case AArch64::LDRWpre: |
4805 | case AArch64::LDRXpre: |
4806 | case AArch64::LDRBBpost: |
4807 | case AArch64::LDRBpost: |
4808 | case AArch64::LDRHHpost: |
4809 | case AArch64::LDRHpost: |
4810 | case AArch64::LDRSBWpost: |
4811 | case AArch64::LDRSBXpost: |
4812 | case AArch64::LDRSHWpost: |
4813 | case AArch64::LDRSHXpost: |
4814 | case AArch64::LDRSWpost: |
4815 | case AArch64::LDRWpost: |
4816 | case AArch64::LDRXpost: { |
4817 | unsigned Rt = Inst.getOperand(1).getReg(); |
4818 | unsigned Rn = Inst.getOperand(2).getReg(); |
4819 | if (RI->isSubRegisterEq(Rn, Rt)) |
4820 | return Error(Loc[0], "unpredictable LDR instruction, writeback base " |
4821 | "is also a source"); |
4822 | break; |
4823 | } |
4824 | case AArch64::STRBBpost: |
4825 | case AArch64::STRBpost: |
4826 | case AArch64::STRHHpost: |
4827 | case AArch64::STRHpost: |
4828 | case AArch64::STRWpost: |
4829 | case AArch64::STRXpost: |
4830 | case AArch64::STRBBpre: |
4831 | case AArch64::STRBpre: |
4832 | case AArch64::STRHHpre: |
4833 | case AArch64::STRHpre: |
4834 | case AArch64::STRWpre: |
4835 | case AArch64::STRXpre: { |
4836 | unsigned Rt = Inst.getOperand(1).getReg(); |
4837 | unsigned Rn = Inst.getOperand(2).getReg(); |
4838 | if (RI->isSubRegisterEq(Rn, Rt)) |
4839 | return Error(Loc[0], "unpredictable STR instruction, writeback base " |
4840 | "is also a source"); |
4841 | break; |
4842 | } |
4843 | case AArch64::STXRB: |
4844 | case AArch64::STXRH: |
4845 | case AArch64::STXRW: |
4846 | case AArch64::STXRX: |
4847 | case AArch64::STLXRB: |
4848 | case AArch64::STLXRH: |
4849 | case AArch64::STLXRW: |
4850 | case AArch64::STLXRX: { |
4851 | unsigned Rs = Inst.getOperand(0).getReg(); |
4852 | unsigned Rt = Inst.getOperand(1).getReg(); |
4853 | unsigned Rn = Inst.getOperand(2).getReg(); |
4854 | if (RI->isSubRegisterEq(Rt, Rs) || |
4855 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) |
4856 | return Error(Loc[0], |
4857 | "unpredictable STXR instruction, status is also a source"); |
4858 | break; |
4859 | } |
4860 | case AArch64::STXPW: |
4861 | case AArch64::STXPX: |
4862 | case AArch64::STLXPW: |
4863 | case AArch64::STLXPX: { |
4864 | unsigned Rs = Inst.getOperand(0).getReg(); |
4865 | unsigned Rt1 = Inst.getOperand(1).getReg(); |
4866 | unsigned Rt2 = Inst.getOperand(2).getReg(); |
4867 | unsigned Rn = Inst.getOperand(3).getReg(); |
4868 | if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) || |
4869 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) |
4870 | return Error(Loc[0], |
4871 | "unpredictable STXP instruction, status is also a source"); |
4872 | break; |
4873 | } |
4874 | case AArch64::LDRABwriteback: |
4875 | case AArch64::LDRAAwriteback: { |
4876 | unsigned Xt = Inst.getOperand(0).getReg(); |
4877 | unsigned Xn = Inst.getOperand(1).getReg(); |
4878 | if (Xt == Xn) |
4879 | return Error(Loc[0], |
4880 | "unpredictable LDRA instruction, writeback base" |
4881 | " is also a destination"); |
4882 | break; |
4883 | } |
4884 | } |
4885 | |
4886 | // Check v8.8-A memops instructions. |
4887 | switch (Inst.getOpcode()) { |
4888 | case AArch64::CPYFP: |
4889 | case AArch64::CPYFPWN: |
4890 | case AArch64::CPYFPRN: |
4891 | case AArch64::CPYFPN: |
4892 | case AArch64::CPYFPWT: |
4893 | case AArch64::CPYFPWTWN: |
4894 | case AArch64::CPYFPWTRN: |
4895 | case AArch64::CPYFPWTN: |
4896 | case AArch64::CPYFPRT: |
4897 | case AArch64::CPYFPRTWN: |
4898 | case AArch64::CPYFPRTRN: |
4899 | case AArch64::CPYFPRTN: |
4900 | case AArch64::CPYFPT: |
4901 | case AArch64::CPYFPTWN: |
4902 | case AArch64::CPYFPTRN: |
4903 | case AArch64::CPYFPTN: |
4904 | case AArch64::CPYFM: |
4905 | case AArch64::CPYFMWN: |
4906 | case AArch64::CPYFMRN: |
4907 | case AArch64::CPYFMN: |
4908 | case AArch64::CPYFMWT: |
4909 | case AArch64::CPYFMWTWN: |
4910 | case AArch64::CPYFMWTRN: |
4911 | case AArch64::CPYFMWTN: |
4912 | case AArch64::CPYFMRT: |
4913 | case AArch64::CPYFMRTWN: |
4914 | case AArch64::CPYFMRTRN: |
4915 | case AArch64::CPYFMRTN: |
4916 | case AArch64::CPYFMT: |
4917 | case AArch64::CPYFMTWN: |
4918 | case AArch64::CPYFMTRN: |
4919 | case AArch64::CPYFMTN: |
4920 | case AArch64::CPYFE: |
4921 | case AArch64::CPYFEWN: |
4922 | case AArch64::CPYFERN: |
4923 | case AArch64::CPYFEN: |
4924 | case AArch64::CPYFEWT: |
4925 | case AArch64::CPYFEWTWN: |
4926 | case AArch64::CPYFEWTRN: |
4927 | case AArch64::CPYFEWTN: |
4928 | case AArch64::CPYFERT: |
4929 | case AArch64::CPYFERTWN: |
4930 | case AArch64::CPYFERTRN: |
4931 | case AArch64::CPYFERTN: |
4932 | case AArch64::CPYFET: |
4933 | case AArch64::CPYFETWN: |
4934 | case AArch64::CPYFETRN: |
4935 | case AArch64::CPYFETN: |
4936 | case AArch64::CPYP: |
4937 | case AArch64::CPYPWN: |
4938 | case AArch64::CPYPRN: |
4939 | case AArch64::CPYPN: |
4940 | case AArch64::CPYPWT: |
4941 | case AArch64::CPYPWTWN: |
4942 | case AArch64::CPYPWTRN: |
4943 | case AArch64::CPYPWTN: |
4944 | case AArch64::CPYPRT: |
4945 | case AArch64::CPYPRTWN: |
4946 | case AArch64::CPYPRTRN: |
4947 | case AArch64::CPYPRTN: |
4948 | case AArch64::CPYPT: |
4949 | case AArch64::CPYPTWN: |
4950 | case AArch64::CPYPTRN: |
4951 | case AArch64::CPYPTN: |
4952 | case AArch64::CPYM: |
4953 | case AArch64::CPYMWN: |
4954 | case AArch64::CPYMRN: |
4955 | case AArch64::CPYMN: |
4956 | case AArch64::CPYMWT: |
4957 | case AArch64::CPYMWTWN: |
4958 | case AArch64::CPYMWTRN: |
4959 | case AArch64::CPYMWTN: |
4960 | case AArch64::CPYMRT: |
4961 | case AArch64::CPYMRTWN: |
4962 | case AArch64::CPYMRTRN: |
4963 | case AArch64::CPYMRTN: |
4964 | case AArch64::CPYMT: |
4965 | case AArch64::CPYMTWN: |
4966 | case AArch64::CPYMTRN: |
4967 | case AArch64::CPYMTN: |
4968 | case AArch64::CPYE: |
4969 | case AArch64::CPYEWN: |
4970 | case AArch64::CPYERN: |
4971 | case AArch64::CPYEN: |
4972 | case AArch64::CPYEWT: |
4973 | case AArch64::CPYEWTWN: |
4974 | case AArch64::CPYEWTRN: |
4975 | case AArch64::CPYEWTN: |
4976 | case AArch64::CPYERT: |
4977 | case AArch64::CPYERTWN: |
4978 | case AArch64::CPYERTRN: |
4979 | case AArch64::CPYERTN: |
4980 | case AArch64::CPYET: |
4981 | case AArch64::CPYETWN: |
4982 | case AArch64::CPYETRN: |
4983 | case AArch64::CPYETN: { |
4984 | unsigned Xd_wb = Inst.getOperand(0).getReg(); |
4985 | unsigned Xs_wb = Inst.getOperand(1).getReg(); |
4986 | unsigned Xn_wb = Inst.getOperand(2).getReg(); |
4987 | unsigned Xd = Inst.getOperand(3).getReg(); |
4988 | unsigned Xs = Inst.getOperand(4).getReg(); |
4989 | unsigned Xn = Inst.getOperand(5).getReg(); |
4990 | if (Xd_wb != Xd) |
4991 | return Error(Loc[0], |
4992 | "invalid CPY instruction, Xd_wb and Xd do not match"); |
4993 | if (Xs_wb != Xs) |
4994 | return Error(Loc[0], |
4995 | "invalid CPY instruction, Xs_wb and Xs do not match"); |
4996 | if (Xn_wb != Xn) |
4997 | return Error(Loc[0], |
4998 | "invalid CPY instruction, Xn_wb and Xn do not match"); |
4999 | if (Xd == Xs) |
5000 | return Error(Loc[0], "invalid CPY instruction, destination and source" |
5001 | " registers are the same"); |
5002 | if (Xd == Xn) |
5003 | return Error(Loc[0], "invalid CPY instruction, destination and size" |
5004 | " registers are the same"); |
5005 | if (Xs == Xn) |
5006 | return Error(Loc[0], "invalid CPY instruction, source and size" |
5007 | " registers are the same"); |
5008 | break; |
5009 | } |
5010 | case AArch64::SETP: |
5011 | case AArch64::SETPT: |
5012 | case AArch64::SETPN: |
5013 | case AArch64::SETPTN: |
5014 | case AArch64::SETM: |
5015 | case AArch64::SETMT: |
5016 | case AArch64::SETMN: |
5017 | case AArch64::SETMTN: |
5018 | case AArch64::SETE: |
5019 | case AArch64::SETET: |
5020 | case AArch64::SETEN: |
5021 | case AArch64::SETETN: |
5022 | case AArch64::SETGP: |
5023 | case AArch64::SETGPT: |
5024 | case AArch64::SETGPN: |
5025 | case AArch64::SETGPTN: |
5026 | case AArch64::SETGM: |
5027 | case AArch64::SETGMT: |
5028 | case AArch64::SETGMN: |
5029 | case AArch64::SETGMTN: |
5030 | case AArch64::MOPSSETGE: |
5031 | case AArch64::MOPSSETGET: |
5032 | case AArch64::MOPSSETGEN: |
5033 | case AArch64::MOPSSETGETN: { |
5034 | unsigned Xd_wb = Inst.getOperand(0).getReg(); |
5035 | unsigned Xn_wb = Inst.getOperand(1).getReg(); |
5036 | unsigned Xd = Inst.getOperand(2).getReg(); |
5037 | unsigned Xn = Inst.getOperand(3).getReg(); |
5038 | unsigned Xm = Inst.getOperand(4).getReg(); |
5039 | if (Xd_wb != Xd) |
5040 | return Error(Loc[0], |
5041 | "invalid SET instruction, Xd_wb and Xd do not match"); |
5042 | if (Xn_wb != Xn) |
5043 | return Error(Loc[0], |
5044 | "invalid SET instruction, Xn_wb and Xn do not match"); |
5045 | if (Xd == Xn) |
5046 | return Error(Loc[0], "invalid SET instruction, destination and size" |
5047 | " registers are the same"); |
5048 | if (Xd == Xm) |
5049 | return Error(Loc[0], "invalid SET instruction, destination and source" |
5050 | " registers are the same"); |
5051 | if (Xn == Xm) |
5052 | return Error(Loc[0], "invalid SET instruction, source and size" |
5053 | " registers are the same"); |
5054 | break; |
5055 | } |
5056 | } |
5057 | |
5058 | // Now check immediate ranges. Separate from the above as there is overlap |
5059 | // in the instructions being checked and this keeps the nested conditionals |
5060 | // to a minimum. |
5061 | switch (Inst.getOpcode()) { |
5062 | case AArch64::ADDSWri: |
5063 | case AArch64::ADDSXri: |
5064 | case AArch64::ADDWri: |
5065 | case AArch64::ADDXri: |
5066 | case AArch64::SUBSWri: |
5067 | case AArch64::SUBSXri: |
5068 | case AArch64::SUBWri: |
5069 | case AArch64::SUBXri: { |
5070 | // Annoyingly we can't do this in the isAddSubImm predicate, so there is |
5071 | // some slight duplication here. |
5072 | if (Inst.getOperand(2).isExpr()) { |
5073 | const MCExpr *Expr = Inst.getOperand(2).getExpr(); |
5074 | AArch64MCExpr::VariantKind ELFRefKind; |
5075 | MCSymbolRefExpr::VariantKind DarwinRefKind; |
5076 | int64_t Addend; |
5077 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { |
5078 | |
5079 | // Only allow these with ADDXri. |
5080 | if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || |
5081 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && |
5082 | Inst.getOpcode() == AArch64::ADDXri) |
5083 | return false; |
5084 | |
5085 | // Only allow these with ADDXri/ADDWri |
5086 | if ((ELFRefKind == AArch64MCExpr::VK_LO12 || |
5087 | ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || |
5088 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || |
5089 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || |
5090 | ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || |
5091 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || |
5092 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || |
5093 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || |
5094 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || |
5095 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && |
5096 | (Inst.getOpcode() == AArch64::ADDXri || |
5097 | Inst.getOpcode() == AArch64::ADDWri)) |
5098 | return false; |
5099 | |
5100 | // Don't allow symbol refs in the immediate field otherwise |
5101 | // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of |
5102 | // operands of the original instruction (i.e. 'add w0, w1, borked' vs |
5103 | // 'cmp w0, 'borked') |
5104 | return Error(Loc.back(), "invalid immediate expression"); |
5105 | } |
5106 | // We don't validate more complex expressions here |
5107 | } |
5108 | return false; |
5109 | } |
5110 | default: |
5111 | return false; |
5112 | } |
5113 | } |
5114 | |
5115 | static std::string AArch64MnemonicSpellCheck(StringRef S, |
5116 | const FeatureBitset &FBS, |
5117 | unsigned VariantID = 0); |
5118 | |
5119 | bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, |
5120 | uint64_t ErrorInfo, |
5121 | OperandVector &Operands) { |
5122 | switch (ErrCode) { |
5123 | case Match_InvalidTiedOperand: { |
5124 | RegConstraintEqualityTy EqTy = |
5125 | static_cast<const AArch64Operand &>(*Operands[ErrorInfo]) |
5126 | .getRegEqualityTy(); |
5127 | switch (EqTy) { |
5128 | case RegConstraintEqualityTy::EqualsSubReg: |
5129 | return Error(Loc, "operand must be 64-bit form of destination register"); |
5130 | case RegConstraintEqualityTy::EqualsSuperReg: |
5131 | return Error(Loc, "operand must be 32-bit form of destination register"); |
5132 | case RegConstraintEqualityTy::EqualsReg: |
5133 | return Error(Loc, "operand must match destination register"); |
5134 | } |
5135 | llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5135 ); |
5136 | } |
5137 | case Match_MissingFeature: |
5138 | return Error(Loc, |
5139 | "instruction requires a CPU feature not currently enabled"); |
5140 | case Match_InvalidOperand: |
5141 | return Error(Loc, "invalid operand for instruction"); |
5142 | case Match_InvalidSuffix: |
5143 | return Error(Loc, "invalid type suffix for instruction"); |
5144 | case Match_InvalidCondCode: |
5145 | return Error(Loc, "expected AArch64 condition code"); |
5146 | case Match_AddSubRegExtendSmall: |
5147 | return Error(Loc, |
5148 | "expected '[su]xt[bhw]' with optional integer in range [0, 4]"); |
5149 | case Match_AddSubRegExtendLarge: |
5150 | return Error(Loc, |
5151 | "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]"); |
5152 | case Match_AddSubSecondSource: |
5153 | return Error(Loc, |
5154 | "expected compatible register, symbol or integer in range [0, 4095]"); |
5155 | case Match_LogicalSecondSource: |
5156 | return Error(Loc, "expected compatible register or logical immediate"); |
5157 | case Match_InvalidMovImm32Shift: |
5158 | return Error(Loc, "expected 'lsl' with optional integer 0 or 16"); |
5159 | case Match_InvalidMovImm64Shift: |
5160 | return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48"); |
5161 | case Match_AddSubRegShift32: |
5162 | return Error(Loc, |
5163 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]"); |
5164 | case Match_AddSubRegShift64: |
5165 | return Error(Loc, |
5166 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]"); |
5167 | case Match_InvalidFPImm: |
5168 | return Error(Loc, |
5169 | "expected compatible register or floating-point constant"); |
5170 | case Match_InvalidMemoryIndexedSImm6: |
5171 | return Error(Loc, "index must be an integer in range [-32, 31]."); |
5172 | case Match_InvalidMemoryIndexedSImm5: |
5173 | return Error(Loc, "index must be an integer in range [-16, 15]."); |
5174 | case Match_InvalidMemoryIndexed1SImm4: |
5175 | return Error(Loc, "index must be an integer in range [-8, 7]."); |
5176 | case Match_InvalidMemoryIndexed2SImm4: |
5177 | return Error(Loc, "index must be a multiple of 2 in range [-16, 14]."); |
5178 | case Match_InvalidMemoryIndexed3SImm4: |
5179 | return Error(Loc, "index must be a multiple of 3 in range [-24, 21]."); |
5180 | case Match_InvalidMemoryIndexed4SImm4: |
5181 | return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); |
5182 | case Match_InvalidMemoryIndexed16SImm4: |
5183 | return Error(Loc, "index must be a multiple of 16 in range [-128, 112]."); |
5184 | case Match_InvalidMemoryIndexed32SImm4: |
5185 | return Error(Loc, "index must be a multiple of 32 in range [-256, 224]."); |
5186 | case Match_InvalidMemoryIndexed1SImm6: |
5187 | return Error(Loc, "index must be an integer in range [-32, 31]."); |
5188 | case Match_InvalidMemoryIndexedSImm8: |
5189 | return Error(Loc, "index must be an integer in range [-128, 127]."); |
5190 | case Match_InvalidMemoryIndexedSImm9: |
5191 | return Error(Loc, "index must be an integer in range [-256, 255]."); |
5192 | case Match_InvalidMemoryIndexed16SImm9: |
5193 | return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080]."); |
5194 | case Match_InvalidMemoryIndexed8SImm10: |
5195 | return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); |
5196 | case Match_InvalidMemoryIndexed4SImm7: |
5197 | return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); |
5198 | case Match_InvalidMemoryIndexed8SImm7: |
5199 | return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); |
5200 | case Match_InvalidMemoryIndexed16SImm7: |
5201 | return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); |
5202 | case Match_InvalidMemoryIndexed8UImm5: |
5203 | return Error(Loc, "index must be a multiple of 8 in range [0, 248]."); |
5204 | case Match_InvalidMemoryIndexed4UImm5: |
5205 | return Error(Loc, "index must be a multiple of 4 in range [0, 124]."); |
5206 | case Match_InvalidMemoryIndexed2UImm5: |
5207 | return Error(Loc, "index must be a multiple of 2 in range [0, 62]."); |
5208 | case Match_InvalidMemoryIndexed8UImm6: |
5209 | return Error(Loc, "index must be a multiple of 8 in range [0, 504]."); |
5210 | case Match_InvalidMemoryIndexed16UImm6: |
5211 | return Error(Loc, "index must be a multiple of 16 in range [0, 1008]."); |
5212 | case Match_InvalidMemoryIndexed4UImm6: |
5213 | return Error(Loc, "index must be a multiple of 4 in range [0, 252]."); |
5214 | case Match_InvalidMemoryIndexed2UImm6: |
5215 | return Error(Loc, "index must be a multiple of 2 in range [0, 126]."); |
5216 | case Match_InvalidMemoryIndexed1UImm6: |
5217 | return Error(Loc, "index must be in range [0, 63]."); |
5218 | case Match_InvalidMemoryWExtend8: |
5219 | return Error(Loc, |
5220 | "expected 'uxtw' or 'sxtw' with optional shift of #0"); |
5221 | case Match_InvalidMemoryWExtend16: |
5222 | return Error(Loc, |
5223 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1"); |
5224 | case Match_InvalidMemoryWExtend32: |
5225 | return Error(Loc, |
5226 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2"); |
5227 | case Match_InvalidMemoryWExtend64: |
5228 | return Error(Loc, |
5229 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3"); |
5230 | case Match_InvalidMemoryWExtend128: |
5231 | return Error(Loc, |
5232 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4"); |
5233 | case Match_InvalidMemoryXExtend8: |
5234 | return Error(Loc, |
5235 | "expected 'lsl' or 'sxtx' with optional shift of #0"); |
5236 | case Match_InvalidMemoryXExtend16: |
5237 | return Error(Loc, |
5238 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #1"); |
5239 | case Match_InvalidMemoryXExtend32: |
5240 | return Error(Loc, |
5241 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #2"); |
5242 | case Match_InvalidMemoryXExtend64: |
5243 | return Error(Loc, |
5244 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #3"); |
5245 | case Match_InvalidMemoryXExtend128: |
5246 | return Error(Loc, |
5247 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #4"); |
5248 | case Match_InvalidMemoryIndexed1: |
5249 | return Error(Loc, "index must be an integer in range [0, 4095]."); |
5250 | case Match_InvalidMemoryIndexed2: |
5251 | return Error(Loc, "index must be a multiple of 2 in range [0, 8190]."); |
5252 | case Match_InvalidMemoryIndexed4: |
5253 | return Error(Loc, "index must be a multiple of 4 in range [0, 16380]."); |
5254 | case Match_InvalidMemoryIndexed8: |
5255 | return Error(Loc, "index must be a multiple of 8 in range [0, 32760]."); |
5256 | case Match_InvalidMemoryIndexed16: |
5257 | return Error(Loc, "index must be a multiple of 16 in range [0, 65520]."); |
5258 | case Match_InvalidImm0_0: |
5259 | return Error(Loc, "immediate must be 0."); |
5260 | case Match_InvalidImm0_1: |
5261 | return Error(Loc, "immediate must be an integer in range [0, 1]."); |
5262 | case Match_InvalidImm0_3: |
5263 | return Error(Loc, "immediate must be an integer in range [0, 3]."); |
5264 | case Match_InvalidImm0_7: |
5265 | return Error(Loc, "immediate must be an integer in range [0, 7]."); |
5266 | case Match_InvalidImm0_15: |
5267 | return Error(Loc, "immediate must be an integer in range [0, 15]."); |
5268 | case Match_InvalidImm0_31: |
5269 | return Error(Loc, "immediate must be an integer in range [0, 31]."); |
5270 | case Match_InvalidImm0_63: |
5271 | return Error(Loc, "immediate must be an integer in range [0, 63]."); |
5272 | case Match_InvalidImm0_127: |
5273 | return Error(Loc, "immediate must be an integer in range [0, 127]."); |
5274 | case Match_InvalidImm0_255: |
5275 | return Error(Loc, "immediate must be an integer in range [0, 255]."); |
5276 | case Match_InvalidImm0_65535: |
5277 | return Error(Loc, "immediate must be an integer in range [0, 65535]."); |
5278 | case Match_InvalidImm1_8: |
5279 | return Error(Loc, "immediate must be an integer in range [1, 8]."); |
5280 | case Match_InvalidImm1_16: |
5281 | return Error(Loc, "immediate must be an integer in range [1, 16]."); |
5282 | case Match_InvalidImm1_32: |
5283 | return Error(Loc, "immediate must be an integer in range [1, 32]."); |
5284 | case Match_InvalidImm1_64: |
5285 | return Error(Loc, "immediate must be an integer in range [1, 64]."); |
5286 | case Match_InvalidSVEAddSubImm8: |
5287 | return Error(Loc, "immediate must be an integer in range [0, 255]" |
5288 | " with a shift amount of 0"); |
5289 | case Match_InvalidSVEAddSubImm16: |
5290 | case Match_InvalidSVEAddSubImm32: |
5291 | case Match_InvalidSVEAddSubImm64: |
5292 | return Error(Loc, "immediate must be an integer in range [0, 255] or a " |
5293 | "multiple of 256 in range [256, 65280]"); |
5294 | case Match_InvalidSVECpyImm8: |
5295 | return Error(Loc, "immediate must be an integer in range [-128, 255]" |
5296 | " with a shift amount of 0"); |
5297 | case Match_InvalidSVECpyImm16: |
5298 | return Error(Loc, "immediate must be an integer in range [-128, 127] or a " |
5299 | "multiple of 256 in range [-32768, 65280]"); |
5300 | case Match_InvalidSVECpyImm32: |
5301 | case Match_InvalidSVECpyImm64: |
5302 | return Error(Loc, "immediate must be an integer in range [-128, 127] or a " |
5303 | "multiple of 256 in range [-32768, 32512]"); |
5304 | case Match_InvalidIndexRange0_0: |
5305 | return Error(Loc, "expected lane specifier '[0]'"); |
5306 | case Match_InvalidIndexRange1_1: |
5307 | return Error(Loc, "expected lane specifier '[1]'"); |
5308 | case Match_InvalidIndexRange0_15: |
5309 | return Error(Loc, "vector lane must be an integer in range [0, 15]."); |
5310 | case Match_InvalidIndexRange0_7: |
5311 | return Error(Loc, "vector lane must be an integer in range [0, 7]."); |
5312 | case Match_InvalidIndexRange0_3: |
5313 | return Error(Loc, "vector lane must be an integer in range [0, 3]."); |
5314 | case Match_InvalidIndexRange0_1: |
5315 | return Error(Loc, "vector lane must be an integer in range [0, 1]."); |
5316 | case Match_InvalidSVEIndexRange0_63: |
5317 | return Error(Loc, "vector lane must be an integer in range [0, 63]."); |
5318 | case Match_InvalidSVEIndexRange0_31: |
5319 | return Error(Loc, "vector lane must be an integer in range [0, 31]."); |
5320 | case Match_InvalidSVEIndexRange0_15: |
5321 | return Error(Loc, "vector lane must be an integer in range [0, 15]."); |
5322 | case Match_InvalidSVEIndexRange0_7: |
5323 | return Error(Loc, "vector lane must be an integer in range [0, 7]."); |
5324 | case Match_InvalidSVEIndexRange0_3: |
5325 | return Error(Loc, "vector lane must be an integer in range [0, 3]."); |
5326 | case Match_InvalidLabel: |
5327 | return Error(Loc, "expected label or encodable integer pc offset"); |
5328 | case Match_MRS: |
5329 | return Error(Loc, "expected readable system register"); |
5330 | case Match_MSR: |
5331 | case Match_InvalidSVCR: |
5332 | return Error(Loc, "expected writable system register or pstate"); |
5333 | case Match_InvalidComplexRotationEven: |
5334 | return Error(Loc, "complex rotation must be 0, 90, 180 or 270."); |
5335 | case Match_InvalidComplexRotationOdd: |
5336 | return Error(Loc, "complex rotation must be 90 or 270."); |
5337 | case Match_MnemonicFail: { |
5338 | std::string Suggestion = AArch64MnemonicSpellCheck( |
5339 | ((AArch64Operand &)*Operands[0]).getToken(), |
5340 | ComputeAvailableFeatures(STI->getFeatureBits())); |
5341 | return Error(Loc, "unrecognized instruction mnemonic" + Suggestion); |
5342 | } |
5343 | case Match_InvalidGPR64shifted8: |
5344 | return Error(Loc, "register must be x0..x30 or xzr, without shift"); |
5345 | case Match_InvalidGPR64shifted16: |
5346 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'"); |
5347 | case Match_InvalidGPR64shifted32: |
5348 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'"); |
5349 | case Match_InvalidGPR64shifted64: |
5350 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'"); |
5351 | case Match_InvalidGPR64shifted128: |
5352 | return Error( |
5353 | Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'"); |
5354 | case Match_InvalidGPR64NoXZRshifted8: |
5355 | return Error(Loc, "register must be x0..x30 without shift"); |
5356 | case Match_InvalidGPR64NoXZRshifted16: |
5357 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'"); |
5358 | case Match_InvalidGPR64NoXZRshifted32: |
5359 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'"); |
5360 | case Match_InvalidGPR64NoXZRshifted64: |
5361 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'"); |
5362 | case Match_InvalidGPR64NoXZRshifted128: |
5363 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'"); |
5364 | case Match_InvalidZPR32UXTW8: |
5365 | case Match_InvalidZPR32SXTW8: |
5366 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'"); |
5367 | case Match_InvalidZPR32UXTW16: |
5368 | case Match_InvalidZPR32SXTW16: |
5369 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'"); |
5370 | case Match_InvalidZPR32UXTW32: |
5371 | case Match_InvalidZPR32SXTW32: |
5372 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'"); |
5373 | case Match_InvalidZPR32UXTW64: |
5374 | case Match_InvalidZPR32SXTW64: |
5375 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'"); |
5376 | case Match_InvalidZPR64UXTW8: |
5377 | case Match_InvalidZPR64SXTW8: |
5378 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'"); |
5379 | case Match_InvalidZPR64UXTW16: |
5380 | case Match_InvalidZPR64SXTW16: |
5381 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'"); |
5382 | case Match_InvalidZPR64UXTW32: |
5383 | case Match_InvalidZPR64SXTW32: |
5384 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'"); |
5385 | case Match_InvalidZPR64UXTW64: |
5386 | case Match_InvalidZPR64SXTW64: |
5387 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'"); |
5388 | case Match_InvalidZPR32LSL8: |
5389 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'"); |
5390 | case Match_InvalidZPR32LSL16: |
5391 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'"); |
5392 | case Match_InvalidZPR32LSL32: |
5393 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'"); |
5394 | case Match_InvalidZPR32LSL64: |
5395 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'"); |
5396 | case Match_InvalidZPR64LSL8: |
5397 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'"); |
5398 | case Match_InvalidZPR64LSL16: |
5399 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'"); |
5400 | case Match_InvalidZPR64LSL32: |
5401 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'"); |
5402 | case Match_InvalidZPR64LSL64: |
5403 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'"); |
5404 | case Match_InvalidZPR0: |
5405 | return Error(Loc, "expected register without element width suffix"); |
5406 | case Match_InvalidZPR8: |
5407 | case Match_InvalidZPR16: |
5408 | case Match_InvalidZPR32: |
5409 | case Match_InvalidZPR64: |
5410 | case Match_InvalidZPR128: |
5411 | return Error(Loc, "invalid element width"); |
5412 | case Match_InvalidZPR_3b8: |
5413 | return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b"); |
5414 | case Match_InvalidZPR_3b16: |
5415 | return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h"); |
5416 | case Match_InvalidZPR_3b32: |
5417 | return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s"); |
5418 | case Match_InvalidZPR_4b16: |
5419 | return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h"); |
5420 | case Match_InvalidZPR_4b32: |
5421 | return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s"); |
5422 | case Match_InvalidZPR_4b64: |
5423 | return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d"); |
5424 | case Match_InvalidSVEPattern: |
5425 | return Error(Loc, "invalid predicate pattern"); |
5426 | case Match_InvalidSVEPredicateAnyReg: |
5427 | case Match_InvalidSVEPredicateBReg: |
5428 | case Match_InvalidSVEPredicateHReg: |
5429 | case Match_InvalidSVEPredicateSReg: |
5430 | case Match_InvalidSVEPredicateDReg: |
5431 | return Error(Loc, "invalid predicate register."); |
5432 | case Match_InvalidSVEPredicate3bAnyReg: |
5433 | return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)"); |
5434 | case Match_InvalidSVEExactFPImmOperandHalfOne: |
5435 | return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0."); |
5436 | case Match_InvalidSVEExactFPImmOperandHalfTwo: |
5437 | return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0."); |
5438 | case Match_InvalidSVEExactFPImmOperandZeroOne: |
5439 | return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0."); |
5440 | case Match_InvalidMatrixTileVectorH8: |
5441 | case Match_InvalidMatrixTileVectorV8: |
5442 | return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b"); |
5443 | case Match_InvalidMatrixTileVectorH16: |
5444 | case Match_InvalidMatrixTileVectorV16: |
5445 | return Error(Loc, |
5446 | "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h"); |
5447 | case Match_InvalidMatrixTileVectorH32: |
5448 | case Match_InvalidMatrixTileVectorV32: |
5449 | return Error(Loc, |
5450 | "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s"); |
5451 | case Match_InvalidMatrixTileVectorH64: |
5452 | case Match_InvalidMatrixTileVectorV64: |
5453 | return Error(Loc, |
5454 | "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d"); |
5455 | case Match_InvalidMatrixTileVectorH128: |
5456 | case Match_InvalidMatrixTileVectorV128: |
5457 | return Error(Loc, |
5458 | "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q"); |
5459 | case Match_InvalidMatrixTile32: |
5460 | return Error(Loc, "invalid matrix operand, expected za[0-3].s"); |
5461 | case Match_InvalidMatrixTile64: |
5462 | return Error(Loc, "invalid matrix operand, expected za[0-7].d"); |
5463 | case Match_InvalidMatrix: |
5464 | return Error(Loc, "invalid matrix operand, expected za"); |
5465 | case Match_InvalidMatrixIndexGPR32_12_15: |
5466 | return Error(Loc, "operand must be a register in range [w12, w15]"); |
5467 | default: |
5468 | llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 5468); |
5469 | } |
5470 | } |
5471 | |
5472 | static const char *getSubtargetFeatureName(uint64_t Val); |
5473 | |
5474 | bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, |
5475 | OperandVector &Operands, |
5476 | MCStreamer &Out, |
5477 | uint64_t &ErrorInfo, |
5478 | bool MatchingInlineAsm) { |
5479 | assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!" ) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5479 , __extension__ __PRETTY_FUNCTION__)); |
5480 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]); |
5481 | assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!" ) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5481 , __extension__ __PRETTY_FUNCTION__)); |
5482 | |
5483 | StringRef Tok = Op.getToken(); |
5484 | unsigned NumOperands = Operands.size(); |
5485 | |
5486 | if (NumOperands == 4 && Tok == "lsl") { |
5487 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); |
5488 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
5489 | if (Op2.isScalarReg() && Op3.isImm()) { |
5490 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); |
5491 | if (Op3CE) { |
5492 | uint64_t Op3Val = Op3CE->getValue(); |
5493 | uint64_t NewOp3Val = 0; |
5494 | uint64_t NewOp4Val = 0; |
5495 | if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains( |
5496 | Op2.getReg())) { |
5497 | NewOp3Val = (32 - Op3Val) & 0x1f; |
5498 | NewOp4Val = 31 - Op3Val; |
5499 | } else { |
5500 | NewOp3Val = (64 - Op3Val) & 0x3f; |
5501 | NewOp4Val = 63 - Op3Val; |
5502 | } |
5503 | |
5504 | const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext()); |
5505 | const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext()); |
5506 | |
5507 | Operands[0] = |
5508 | AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext()); |
5509 | Operands.push_back(AArch64Operand::CreateImm( |
5510 | NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext())); |
5511 | Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(), |
5512 | Op3.getEndLoc(), getContext()); |
5513 | } |
5514 | } |
5515 | } else if (NumOperands == 4 && Tok == "bfc") { |
5516 | // FIXME: Horrible hack to handle BFC->BFM alias. |
5517 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
5518 | AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); |
5519 | AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); |
5520 | |
5521 | if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { |
5522 | const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm()); |
5523 | const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm()); |
5524 | |
5525 | if (LSBCE && WidthCE) { |
5526 | uint64_t LSB = LSBCE->getValue(); |
5527 | uint64_t Width = WidthCE->getValue(); |
5528 | |
5529 | uint64_t RegWidth = 0; |
5530 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
5531 | Op1.getReg())) |
5532 | RegWidth = 64; |
5533 | else |
5534 | RegWidth = 32; |
5535 | |
5536 | if (LSB >= RegWidth) |
5537 | return Error(LSBOp.getStartLoc(), |
5538 | "expected integer in range [0, 31]"); |
5539 | if (Width < 1 || Width > RegWidth) |
5540 | return Error(WidthOp.getStartLoc(), |
5541 | "expected integer in range [1, 32]"); |
5542 | |
5543 | uint64_t ImmR = 0; |
5544 | if (RegWidth == 32) |
5545 | ImmR = (32 - LSB) & 0x1f; |
5546 | else |
5547 | ImmR = (64 - LSB) & 0x3f; |
5548 | |
5549 | uint64_t ImmS = Width - 1; |
5550 | |
5551 | if (ImmR != 0 && ImmS >= ImmR) |
5552 | return Error(WidthOp.getStartLoc(), |
5553 | "requested insert overflows register"); |
5554 | |
5555 | const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext()); |
5556 | const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext()); |
5557 | Operands[0] = |
5558 | AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext()); |
5559 | Operands[2] = AArch64Operand::CreateReg( |
5560 | RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar, |
5561 | SMLoc(), SMLoc(), getContext()); |
5562 | Operands[3] = AArch64Operand::CreateImm( |
5563 | ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext()); |
5564 | Operands.emplace_back( |
5565 | AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(), |
5566 | WidthOp.getEndLoc(), getContext())); |
5567 | } |
5568 | } |
5569 | } else if (NumOperands == 5) { |
5570 | // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and |
5571 | // UBFIZ -> UBFM aliases. |
5572 | if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") { |
5573 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
5574 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
5575 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); |
5576 | |
5577 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { |
5578 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); |
5579 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); |
5580 | |
5581 | if (Op3CE && Op4CE) { |
5582 | uint64_t Op3Val = Op3CE->getValue(); |
5583 | uint64_t Op4Val = Op4CE->getValue(); |
5584 | |
5585 | uint64_t RegWidth = 0; |
5586 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
5587 | Op1.getReg())) |
5588 | RegWidth = 64; |
5589 | else |
5590 | RegWidth = 32; |
5591 | |
5592 | if (Op3Val >= RegWidth) |
5593 | return Error(Op3.getStartLoc(), |
5594 | "expected integer in range [0, 31]"); |
5595 | if (Op4Val < 1 || Op4Val > RegWidth) |
5596 | return Error(Op4.getStartLoc(), |
5597 | "expected integer in range [1, 32]"); |
5598 | |
5599 | uint64_t NewOp3Val = 0; |
5600 | if (RegWidth == 32) |
5601 | NewOp3Val = (32 - Op3Val) & 0x1f; |
5602 | else |
5603 | NewOp3Val = (64 - Op3Val) & 0x3f; |
5604 | |
5605 | uint64_t NewOp4Val = Op4Val - 1; |
5606 | |
5607 | if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) |
5608 | return Error(Op4.getStartLoc(), |
5609 | "requested insert overflows register"); |
5610 | |
5611 | const MCExpr *NewOp3 = |
5612 | MCConstantExpr::create(NewOp3Val, getContext()); |
5613 | const MCExpr *NewOp4 = |
5614 | MCConstantExpr::create(NewOp4Val, getContext()); |
5615 | Operands[3] = AArch64Operand::CreateImm( |
5616 | NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext()); |
5617 | Operands[4] = AArch64Operand::CreateImm( |
5618 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); |
5619 | if (Tok == "bfi") |
5620 | Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(), |
5621 | getContext()); |
5622 | else if (Tok == "sbfiz") |
5623 | Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(), |
5624 | getContext()); |
5625 | else if (Tok == "ubfiz") |
5626 | Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), |
5627 | getContext()); |
5628 | else |
5629 | llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5629 ); |
5630 | } |
5631 | } |
5632 | |
5633 | // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and |
5634 | // UBFX -> UBFM aliases. |
5635 | } else if (NumOperands == 5 && |
5636 | (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) { |
5637 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
5638 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
5639 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); |
5640 | |
5641 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { |
5642 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); |
5643 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); |
5644 | |
5645 | if (Op3CE && Op4CE) { |
5646 | uint64_t Op3Val = Op3CE->getValue(); |
5647 | uint64_t Op4Val = Op4CE->getValue(); |
5648 | |
5649 | uint64_t RegWidth = 0; |
5650 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
5651 | Op1.getReg())) |
5652 | RegWidth = 64; |
5653 | else |
5654 | RegWidth = 32; |
5655 | |
5656 | if (Op3Val >= RegWidth) |
5657 | return Error(Op3.getStartLoc(), |
5658 | "expected integer in range [0, 31]"); |
5659 | if (Op4Val < 1 || Op4Val > RegWidth) |
5660 | return Error(Op4.getStartLoc(), |
5661 | "expected integer in range [1, 32]"); |
5662 | |
5663 | uint64_t NewOp4Val = Op3Val + Op4Val - 1; |
5664 | |
5665 | if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) |
5666 | return Error(Op4.getStartLoc(), |
5667 | "requested extract overflows register"); |
5668 | |
5669 | const MCExpr *NewOp4 = |
5670 | MCConstantExpr::create(NewOp4Val, getContext()); |
5671 | Operands[4] = AArch64Operand::CreateImm( |
5672 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); |
5673 | if (Tok == "bfxil") |
5674 | Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(), |
5675 | getContext()); |
5676 | else if (Tok == "sbfx") |
5677 | Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(), |
5678 | getContext()); |
5679 | else if (Tok == "ubfx") |
5680 | Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), |
5681 | getContext()); |
5682 | else |
5683 | llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5683 ); |
5684 | } |
5685 | } |
5686 | } |
5687 | } |
5688 | |
5689 | // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing |
5690 | // instruction for FP registers correctly in some rare circumstances. Convert |
5691 | // it to a safe instruction and warn (because silently changing someone's |
5692 | // assembly is rude). |
5693 | if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] && |
5694 | NumOperands == 4 && Tok == "movi") { |
5695 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); |
5696 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); |
5697 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); |
5698 | if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) || |
5699 | (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) { |
5700 | StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken(); |
5701 | if (Suffix.lower() == ".2d" && |
5702 | cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) { |
5703 | Warning(IDLoc, "instruction movi.2d with immediate #0 may not function" |
5704 | " correctly on this CPU, converting to equivalent movi.16b"); |
5705 | // Switch the suffix to .16b. |
5706 | unsigned Idx = Op1.isToken() ? 1 : 2; |
5707 | Operands[Idx] = |
5708 | AArch64Operand::CreateToken(".16b", IDLoc, getContext()); |
5709 | } |
5710 | } |
5711 | } |
5712 | |
5713 | // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands. |
5714 | // InstAlias can't quite handle this since the reg classes aren't |
5715 | // subclasses. |
5716 | if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) { |
5717 | // The source register can be Wn here, but the matcher expects a |
5718 | // GPR64. Twiddle it here if necessary. |
5719 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); |
5720 | if (Op.isScalarReg()) { |
5721 | unsigned Reg = getXRegFromWReg(Op.getReg()); |
5722 | Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, |
5723 | Op.getStartLoc(), Op.getEndLoc(), |
5724 | getContext()); |
5725 | } |
5726 | } |
5727 | // FIXME: Likewise for sxt[bh] with a Xd dst operand |
5728 | else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { |
5729 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
5730 | if (Op.isScalarReg() && |
5731 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
5732 | Op.getReg())) { |
5733 | // The source register can be Wn here, but the matcher expects a |
5734 | // GPR64. Twiddle it here if necessary. |
5735 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); |
5736 | if (Op.isScalarReg()) { |
5737 | unsigned Reg = getXRegFromWReg(Op.getReg()); |
5738 | Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, |
5739 | Op.getStartLoc(), |
5740 | Op.getEndLoc(), getContext()); |
5741 | } |
5742 | } |
5743 | } |
5744 | // FIXME: Likewise for uxt[bh] with a Xd dst operand |
5745 | else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { |
5746 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
5747 | if (Op.isScalarReg() && |
5748 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( |
5749 | Op.getReg())) { |
5750 | // The source register can be Wn here, but the matcher expects a |
5751 | // GPR32. Twiddle it here if necessary. |
5752 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); |
5753 | if (Op.isScalarReg()) { |
5754 | unsigned Reg = getWRegFromXReg(Op.getReg()); |
5755 | Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, |
5756 | Op.getStartLoc(), |
5757 | Op.getEndLoc(), getContext()); |
5758 | } |
5759 | } |
5760 | } |
5761 | |
5762 | MCInst Inst; |
5763 | FeatureBitset MissingFeatures; |
5764 | // First try to match against the secondary set of tables containing the |
5765 | // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). |
5766 | unsigned MatchResult = |
5767 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, |
5768 | MatchingInlineAsm, 1); |
5769 | |
5770 | // If that fails, try against the alternate table containing long-form NEON: |
5771 | // "fadd v0.2s, v1.2s, v2.2s" |
5772 | if (MatchResult != Match_Success) { |
5773 | // But first, save the short-form match result: we can use it in case the |
5774 | // long-form match also fails. |
5775 | auto ShortFormNEONErrorInfo = ErrorInfo; |
5776 | auto ShortFormNEONMatchResult = MatchResult; |
5777 | auto ShortFormNEONMissingFeatures = MissingFeatures; |
5778 | |
5779 | MatchResult = |
5780 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, |
5781 | MatchingInlineAsm, 0); |
5782 | |
5783 | // Now, both matches failed, and the long-form match failed on the mnemonic |
5784 | // suffix token operand. The short-form match failure is probably more |
5785 | // relevant: use it instead. |
5786 | if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 && |
5787 | Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() && |
5788 | ((AArch64Operand &)*Operands[1]).isTokenSuffix()) { |
5789 | MatchResult = ShortFormNEONMatchResult; |
5790 | ErrorInfo = ShortFormNEONErrorInfo; |
5791 | MissingFeatures = ShortFormNEONMissingFeatures; |
5792 | } |
5793 | } |
5794 | |
5795 | switch (MatchResult) { |
5796 | case Match_Success: { |
5797 | // Perform range checking and other semantic validations |
5798 | SmallVector<SMLoc, 8> OperandLocs; |
5799 | NumOperands = Operands.size(); |
5800 | for (unsigned i = 1; i < NumOperands; ++i) |
5801 | OperandLocs.push_back(Operands[i]->getStartLoc()); |
5802 | if (validateInstruction(Inst, IDLoc, OperandLocs)) |
5803 | return true; |
5804 | |
5805 | Inst.setLoc(IDLoc); |
5806 | Out.emitInstruction(Inst, getSTI()); |
5807 | return false; |
5808 | } |
5809 | case Match_MissingFeature: { |
5810 | assert(MissingFeatures.any() && "Unknown missing feature!")(static_cast <bool> (MissingFeatures.any() && "Unknown missing feature!" ) ? void (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5810 , __extension__ __PRETTY_FUNCTION__)); |
5811 | // Special case the error message for the very common case where only |
5812 | // a single subtarget feature is missing (neon, e.g.). |
5813 | std::string Msg = "instruction requires:"; |
5814 | for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) { |
5815 | if (MissingFeatures[i]) { |
5816 | Msg += " "; |
5817 | Msg += getSubtargetFeatureName(i); |
5818 | } |
5819 | } |
5820 | return Error(IDLoc, Msg); |
5821 | } |
5822 | case Match_MnemonicFail: |
5823 | return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands); |
5824 | case Match_InvalidOperand: { |
5825 | SMLoc ErrorLoc = IDLoc; |
5826 | |
5827 | if (ErrorInfo != ~0ULL) { |