File: | build/source/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp |
Warning: | line 4971, column 9 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "AArch64InstrInfo.h" | |||
10 | #include "MCTargetDesc/AArch64AddressingModes.h" | |||
11 | #include "MCTargetDesc/AArch64InstPrinter.h" | |||
12 | #include "MCTargetDesc/AArch64MCExpr.h" | |||
13 | #include "MCTargetDesc/AArch64MCTargetDesc.h" | |||
14 | #include "MCTargetDesc/AArch64TargetStreamer.h" | |||
15 | #include "TargetInfo/AArch64TargetInfo.h" | |||
16 | #include "Utils/AArch64BaseInfo.h" | |||
17 | #include "llvm/ADT/APFloat.h" | |||
18 | #include "llvm/ADT/APInt.h" | |||
19 | #include "llvm/ADT/ArrayRef.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/SmallSet.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/StringExtras.h" | |||
24 | #include "llvm/ADT/StringMap.h" | |||
25 | #include "llvm/ADT/StringRef.h" | |||
26 | #include "llvm/ADT/StringSwitch.h" | |||
27 | #include "llvm/ADT/Twine.h" | |||
28 | #include "llvm/MC/MCContext.h" | |||
29 | #include "llvm/MC/MCExpr.h" | |||
30 | #include "llvm/MC/MCInst.h" | |||
31 | #include "llvm/MC/MCLinkerOptimizationHint.h" | |||
32 | #include "llvm/MC/MCObjectFileInfo.h" | |||
33 | #include "llvm/MC/MCParser/MCAsmLexer.h" | |||
34 | #include "llvm/MC/MCParser/MCAsmParser.h" | |||
35 | #include "llvm/MC/MCParser/MCAsmParserExtension.h" | |||
36 | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" | |||
37 | #include "llvm/MC/MCParser/MCTargetAsmParser.h" | |||
38 | #include "llvm/MC/MCRegisterInfo.h" | |||
39 | #include "llvm/MC/MCStreamer.h" | |||
40 | #include "llvm/MC/MCSubtargetInfo.h" | |||
41 | #include "llvm/MC/MCSymbol.h" | |||
42 | #include "llvm/MC/MCTargetOptions.h" | |||
43 | #include "llvm/MC/MCValue.h" | |||
44 | #include "llvm/MC/SubtargetFeature.h" | |||
45 | #include "llvm/MC/TargetRegistry.h" | |||
46 | #include "llvm/Support/Casting.h" | |||
47 | #include "llvm/Support/Compiler.h" | |||
48 | #include "llvm/Support/ErrorHandling.h" | |||
49 | #include "llvm/Support/MathExtras.h" | |||
50 | #include "llvm/Support/SMLoc.h" | |||
51 | #include "llvm/Support/raw_ostream.h" | |||
52 | #include "llvm/TargetParser/AArch64TargetParser.h" | |||
53 | #include <cassert> | |||
54 | #include <cctype> | |||
55 | #include <cstdint> | |||
56 | #include <cstdio> | |||
57 | #include <optional> | |||
58 | #include <string> | |||
59 | #include <tuple> | |||
60 | #include <utility> | |||
61 | #include <vector> | |||
62 | ||||
63 | using namespace llvm; | |||
64 | ||||
65 | namespace { | |||
66 | ||||
67 | enum class RegKind { | |||
68 | Scalar, | |||
69 | NeonVector, | |||
70 | SVEDataVector, | |||
71 | SVEPredicateAsCounter, | |||
72 | SVEPredicateVector, | |||
73 | Matrix, | |||
74 | LookupTable | |||
75 | }; | |||
76 | ||||
77 | enum class MatrixKind { Array, Tile, Row, Col }; | |||
78 | ||||
79 | enum RegConstraintEqualityTy { | |||
80 | EqualsReg, | |||
81 | EqualsSuperReg, | |||
82 | EqualsSubReg | |||
83 | }; | |||
84 | ||||
85 | class AArch64AsmParser : public MCTargetAsmParser { | |||
86 | private: | |||
87 | StringRef Mnemonic; ///< Instruction mnemonic. | |||
88 | ||||
89 | // Map of register aliases registers via the .req directive. | |||
90 | StringMap<std::pair<RegKind, unsigned>> RegisterReqs; | |||
91 | ||||
92 | class PrefixInfo { | |||
93 | public: | |||
94 | static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) { | |||
95 | PrefixInfo Prefix; | |||
96 | switch (Inst.getOpcode()) { | |||
97 | case AArch64::MOVPRFX_ZZ: | |||
98 | Prefix.Active = true; | |||
99 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
100 | break; | |||
101 | case AArch64::MOVPRFX_ZPmZ_B: | |||
102 | case AArch64::MOVPRFX_ZPmZ_H: | |||
103 | case AArch64::MOVPRFX_ZPmZ_S: | |||
104 | case AArch64::MOVPRFX_ZPmZ_D: | |||
105 | Prefix.Active = true; | |||
106 | Prefix.Predicated = true; | |||
107 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; | |||
108 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 109 , __extension__ __PRETTY_FUNCTION__)) | |||
109 | "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 109 , __extension__ __PRETTY_FUNCTION__)); | |||
110 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
111 | Prefix.Pg = Inst.getOperand(2).getReg(); | |||
112 | break; | |||
113 | case AArch64::MOVPRFX_ZPzZ_B: | |||
114 | case AArch64::MOVPRFX_ZPzZ_H: | |||
115 | case AArch64::MOVPRFX_ZPzZ_S: | |||
116 | case AArch64::MOVPRFX_ZPzZ_D: | |||
117 | Prefix.Active = true; | |||
118 | Prefix.Predicated = true; | |||
119 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; | |||
120 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 121 , __extension__ __PRETTY_FUNCTION__)) | |||
121 | "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 121 , __extension__ __PRETTY_FUNCTION__)); | |||
122 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
123 | Prefix.Pg = Inst.getOperand(1).getReg(); | |||
124 | break; | |||
125 | default: | |||
126 | break; | |||
127 | } | |||
128 | ||||
129 | return Prefix; | |||
130 | } | |||
131 | ||||
132 | PrefixInfo() = default; | |||
133 | bool isActive() const { return Active; } | |||
134 | bool isPredicated() const { return Predicated; } | |||
135 | unsigned getElementSize() const { | |||
136 | assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail ("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 136, __extension__ __PRETTY_FUNCTION__)); | |||
137 | return ElementSize; | |||
138 | } | |||
139 | unsigned getDstReg() const { return Dst; } | |||
140 | unsigned getPgReg() const { | |||
141 | assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail ("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 141, __extension__ __PRETTY_FUNCTION__)); | |||
142 | return Pg; | |||
143 | } | |||
144 | ||||
145 | private: | |||
146 | bool Active = false; | |||
147 | bool Predicated = false; | |||
148 | unsigned ElementSize; | |||
149 | unsigned Dst; | |||
150 | unsigned Pg; | |||
151 | } NextPrefix; | |||
152 | ||||
153 | AArch64TargetStreamer &getTargetStreamer() { | |||
154 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); | |||
155 | return static_cast<AArch64TargetStreamer &>(TS); | |||
156 | } | |||
157 | ||||
158 | SMLoc getLoc() const { return getParser().getTok().getLoc(); } | |||
159 | ||||
160 | bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); | |||
161 | bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); | |||
162 | void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S); | |||
163 | AArch64CC::CondCode parseCondCodeString(StringRef Cond, | |||
164 | std::string &Suggestion); | |||
165 | bool parseCondCode(OperandVector &Operands, bool invertCondCode); | |||
166 | unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); | |||
167 | bool parseRegister(OperandVector &Operands); | |||
168 | bool parseSymbolicImmVal(const MCExpr *&ImmVal); | |||
169 | bool parseNeonVectorList(OperandVector &Operands); | |||
170 | bool parseOptionalMulOperand(OperandVector &Operands); | |||
171 | bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup); | |||
172 | bool parseKeywordOperand(OperandVector &Operands); | |||
173 | bool parseOperand(OperandVector &Operands, bool isCondCode, | |||
174 | bool invertCondCode); | |||
175 | bool parseImmExpr(int64_t &Out); | |||
176 | bool parseComma(); | |||
177 | bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First, | |||
178 | unsigned Last); | |||
179 | ||||
180 | bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, | |||
181 | OperandVector &Operands); | |||
182 | ||||
183 | bool parseDirectiveArch(SMLoc L); | |||
184 | bool parseDirectiveArchExtension(SMLoc L); | |||
185 | bool parseDirectiveCPU(SMLoc L); | |||
186 | bool parseDirectiveInst(SMLoc L); | |||
187 | ||||
188 | bool parseDirectiveTLSDescCall(SMLoc L); | |||
189 | ||||
190 | bool parseDirectiveLOH(StringRef LOH, SMLoc L); | |||
191 | bool parseDirectiveLtorg(SMLoc L); | |||
192 | ||||
193 | bool parseDirectiveReq(StringRef Name, SMLoc L); | |||
194 | bool parseDirectiveUnreq(SMLoc L); | |||
195 | bool parseDirectiveCFINegateRAState(); | |||
196 | bool parseDirectiveCFIBKeyFrame(); | |||
197 | bool parseDirectiveCFIMTETaggedFrame(); | |||
198 | ||||
199 | bool parseDirectiveVariantPCS(SMLoc L); | |||
200 | ||||
201 | bool parseDirectiveSEHAllocStack(SMLoc L); | |||
202 | bool parseDirectiveSEHPrologEnd(SMLoc L); | |||
203 | bool parseDirectiveSEHSaveR19R20X(SMLoc L); | |||
204 | bool parseDirectiveSEHSaveFPLR(SMLoc L); | |||
205 | bool parseDirectiveSEHSaveFPLRX(SMLoc L); | |||
206 | bool parseDirectiveSEHSaveReg(SMLoc L); | |||
207 | bool parseDirectiveSEHSaveRegX(SMLoc L); | |||
208 | bool parseDirectiveSEHSaveRegP(SMLoc L); | |||
209 | bool parseDirectiveSEHSaveRegPX(SMLoc L); | |||
210 | bool parseDirectiveSEHSaveLRPair(SMLoc L); | |||
211 | bool parseDirectiveSEHSaveFReg(SMLoc L); | |||
212 | bool parseDirectiveSEHSaveFRegX(SMLoc L); | |||
213 | bool parseDirectiveSEHSaveFRegP(SMLoc L); | |||
214 | bool parseDirectiveSEHSaveFRegPX(SMLoc L); | |||
215 | bool parseDirectiveSEHSetFP(SMLoc L); | |||
216 | bool parseDirectiveSEHAddFP(SMLoc L); | |||
217 | bool parseDirectiveSEHNop(SMLoc L); | |||
218 | bool parseDirectiveSEHSaveNext(SMLoc L); | |||
219 | bool parseDirectiveSEHEpilogStart(SMLoc L); | |||
220 | bool parseDirectiveSEHEpilogEnd(SMLoc L); | |||
221 | bool parseDirectiveSEHTrapFrame(SMLoc L); | |||
222 | bool parseDirectiveSEHMachineFrame(SMLoc L); | |||
223 | bool parseDirectiveSEHContext(SMLoc L); | |||
224 | bool parseDirectiveSEHClearUnwoundToCall(SMLoc L); | |||
225 | bool parseDirectiveSEHPACSignLR(SMLoc L); | |||
226 | bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback); | |||
227 | ||||
228 | bool validateInstruction(MCInst &Inst, SMLoc &IDLoc, | |||
229 | SmallVectorImpl<SMLoc> &Loc); | |||
230 | unsigned getNumRegsForRegKind(RegKind K); | |||
231 | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
232 | OperandVector &Operands, MCStreamer &Out, | |||
233 | uint64_t &ErrorInfo, | |||
234 | bool MatchingInlineAsm) override; | |||
235 | /// @name Auto-generated Match Functions | |||
236 | /// { | |||
237 | ||||
238 | #define GET_ASSEMBLER_HEADER | |||
239 | #include "AArch64GenAsmMatcher.inc" | |||
240 | ||||
241 | /// } | |||
242 | ||||
243 | OperandMatchResultTy tryParseScalarRegister(MCRegister &Reg); | |||
244 | OperandMatchResultTy tryParseVectorRegister(MCRegister &Reg, StringRef &Kind, | |||
245 | RegKind MatchKind); | |||
246 | OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands); | |||
247 | OperandMatchResultTy tryParseSVCR(OperandVector &Operands); | |||
248 | OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); | |||
249 | OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); | |||
250 | OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands); | |||
251 | OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); | |||
252 | OperandMatchResultTy tryParseSysReg(OperandVector &Operands); | |||
253 | OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); | |||
254 | template <bool IsSVEPrefetch = false> | |||
255 | OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); | |||
256 | OperandMatchResultTy tryParseRPRFMOperand(OperandVector &Operands); | |||
257 | OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); | |||
258 | OperandMatchResultTy tryParseBTIHint(OperandVector &Operands); | |||
259 | OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); | |||
260 | OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); | |||
261 | template<bool AddFPZeroAsLiteral> | |||
262 | OperandMatchResultTy tryParseFPImm(OperandVector &Operands); | |||
263 | OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands); | |||
264 | OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); | |||
265 | bool tryParseNeonVectorRegister(OperandVector &Operands); | |||
266 | OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); | |||
267 | OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); | |||
268 | OperandMatchResultTy tryParseSyspXzrPair(OperandVector &Operands); | |||
269 | template <bool ParseShiftExtend, | |||
270 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> | |||
271 | OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); | |||
272 | OperandMatchResultTy tryParseZTOperand(OperandVector &Operands); | |||
273 | template <bool ParseShiftExtend, bool ParseSuffix> | |||
274 | OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); | |||
275 | template <RegKind RK> | |||
276 | OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands); | |||
277 | template <RegKind VectorKind> | |||
278 | OperandMatchResultTy tryParseVectorList(OperandVector &Operands, | |||
279 | bool ExpectMatch = false); | |||
280 | OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands); | |||
281 | OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands); | |||
282 | OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands); | |||
283 | OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands); | |||
284 | OperandMatchResultTy tryParseImmRange(OperandVector &Operands); | |||
285 | ||||
286 | public: | |||
287 | enum AArch64MatchResultTy { | |||
288 | Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, | |||
289 | #define GET_OPERAND_DIAGNOSTIC_TYPES | |||
290 | #include "AArch64GenAsmMatcher.inc" | |||
291 | }; | |||
292 | bool IsILP32; | |||
293 | ||||
294 | AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, | |||
295 | const MCInstrInfo &MII, const MCTargetOptions &Options) | |||
296 | : MCTargetAsmParser(Options, STI, MII) { | |||
297 | IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; | |||
298 | MCAsmParserExtension::Initialize(Parser); | |||
299 | MCStreamer &S = getParser().getStreamer(); | |||
300 | if (S.getTargetStreamer() == nullptr) | |||
301 | new AArch64TargetStreamer(S); | |||
302 | ||||
303 | // Alias .hword/.word/.[dx]word to the target-independent | |||
304 | // .2byte/.4byte/.8byte directives as they have the same form and | |||
305 | // semantics: | |||
306 | /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ] | |||
307 | Parser.addAliasForDirective(".hword", ".2byte"); | |||
308 | Parser.addAliasForDirective(".word", ".4byte"); | |||
309 | Parser.addAliasForDirective(".dword", ".8byte"); | |||
310 | Parser.addAliasForDirective(".xword", ".8byte"); | |||
311 | ||||
312 | // Initialize the set of available features. | |||
313 | setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); | |||
314 | } | |||
315 | ||||
316 | bool areEqualRegs(const MCParsedAsmOperand &Op1, | |||
317 | const MCParsedAsmOperand &Op2) const override; | |||
318 | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, | |||
319 | SMLoc NameLoc, OperandVector &Operands) override; | |||
320 | bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc, | |||
321 | SMLoc &EndLoc) override; | |||
322 | OperandMatchResultTy tryParseRegister(MCRegister &RegNo, SMLoc &StartLoc, | |||
323 | SMLoc &EndLoc) override; | |||
324 | bool ParseDirective(AsmToken DirectiveID) override; | |||
325 | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, | |||
326 | unsigned Kind) override; | |||
327 | ||||
328 | static bool classifySymbolRef(const MCExpr *Expr, | |||
329 | AArch64MCExpr::VariantKind &ELFRefKind, | |||
330 | MCSymbolRefExpr::VariantKind &DarwinRefKind, | |||
331 | int64_t &Addend); | |||
332 | }; | |||
333 | ||||
334 | /// AArch64Operand - Instances of this class represent a parsed AArch64 machine | |||
335 | /// instruction. | |||
336 | class AArch64Operand : public MCParsedAsmOperand { | |||
337 | private: | |||
338 | enum KindTy { | |||
339 | k_Immediate, | |||
340 | k_ShiftedImm, | |||
341 | k_ImmRange, | |||
342 | k_CondCode, | |||
343 | k_Register, | |||
344 | k_MatrixRegister, | |||
345 | k_MatrixTileList, | |||
346 | k_SVCR, | |||
347 | k_VectorList, | |||
348 | k_VectorIndex, | |||
349 | k_Token, | |||
350 | k_SysReg, | |||
351 | k_SysCR, | |||
352 | k_Prefetch, | |||
353 | k_ShiftExtend, | |||
354 | k_FPImm, | |||
355 | k_Barrier, | |||
356 | k_PSBHint, | |||
357 | k_BTIHint, | |||
358 | } Kind; | |||
359 | ||||
360 | SMLoc StartLoc, EndLoc; | |||
361 | ||||
362 | struct TokOp { | |||
363 | const char *Data; | |||
364 | unsigned Length; | |||
365 | bool IsSuffix; // Is the operand actually a suffix on the mnemonic. | |||
366 | }; | |||
367 | ||||
368 | // Separate shift/extend operand. | |||
369 | struct ShiftExtendOp { | |||
370 | AArch64_AM::ShiftExtendType Type; | |||
371 | unsigned Amount; | |||
372 | bool HasExplicitAmount; | |||
373 | }; | |||
374 | ||||
375 | struct RegOp { | |||
376 | unsigned RegNum; | |||
377 | RegKind Kind; | |||
378 | int ElementWidth; | |||
379 | ||||
380 | // The register may be allowed as a different register class, | |||
381 | // e.g. for GPR64as32 or GPR32as64. | |||
382 | RegConstraintEqualityTy EqualityTy; | |||
383 | ||||
384 | // In some cases the shift/extend needs to be explicitly parsed together | |||
385 | // with the register, rather than as a separate operand. This is needed | |||
386 | // for addressing modes where the instruction as a whole dictates the | |||
387 | // scaling/extend, rather than specific bits in the instruction. | |||
388 | // By parsing them as a single operand, we avoid the need to pass an | |||
389 | // extra operand in all CodeGen patterns (because all operands need to | |||
390 | // have an associated value), and we avoid the need to update TableGen to | |||
391 | // accept operands that have no associated bits in the instruction. | |||
392 | // | |||
393 | // An added benefit of parsing them together is that the assembler | |||
394 | // can give a sensible diagnostic if the scaling is not correct. | |||
395 | // | |||
396 | // The default is 'lsl #0' (HasExplicitAmount = false) if no | |||
397 | // ShiftExtend is specified. | |||
398 | ShiftExtendOp ShiftExtend; | |||
399 | }; | |||
400 | ||||
401 | struct MatrixRegOp { | |||
402 | unsigned RegNum; | |||
403 | unsigned ElementWidth; | |||
404 | MatrixKind Kind; | |||
405 | }; | |||
406 | ||||
407 | struct MatrixTileListOp { | |||
408 | unsigned RegMask = 0; | |||
409 | }; | |||
410 | ||||
411 | struct VectorListOp { | |||
412 | unsigned RegNum; | |||
413 | unsigned Count; | |||
414 | unsigned Stride; | |||
415 | unsigned NumElements; | |||
416 | unsigned ElementWidth; | |||
417 | RegKind RegisterKind; | |||
418 | }; | |||
419 | ||||
420 | struct VectorIndexOp { | |||
421 | int Val; | |||
422 | }; | |||
423 | ||||
424 | struct ImmOp { | |||
425 | const MCExpr *Val; | |||
426 | }; | |||
427 | ||||
428 | struct ShiftedImmOp { | |||
429 | const MCExpr *Val; | |||
430 | unsigned ShiftAmount; | |||
431 | }; | |||
432 | ||||
433 | struct ImmRangeOp { | |||
434 | unsigned First; | |||
435 | unsigned Last; | |||
436 | }; | |||
437 | ||||
438 | struct CondCodeOp { | |||
439 | AArch64CC::CondCode Code; | |||
440 | }; | |||
441 | ||||
442 | struct FPImmOp { | |||
443 | uint64_t Val; // APFloat value bitcasted to uint64_t. | |||
444 | bool IsExact; // describes whether parsed value was exact. | |||
445 | }; | |||
446 | ||||
447 | struct BarrierOp { | |||
448 | const char *Data; | |||
449 | unsigned Length; | |||
450 | unsigned Val; // Not the enum since not all values have names. | |||
451 | bool HasnXSModifier; | |||
452 | }; | |||
453 | ||||
454 | struct SysRegOp { | |||
455 | const char *Data; | |||
456 | unsigned Length; | |||
457 | uint32_t MRSReg; | |||
458 | uint32_t MSRReg; | |||
459 | uint32_t PStateField; | |||
460 | }; | |||
461 | ||||
462 | struct SysCRImmOp { | |||
463 | unsigned Val; | |||
464 | }; | |||
465 | ||||
466 | struct PrefetchOp { | |||
467 | const char *Data; | |||
468 | unsigned Length; | |||
469 | unsigned Val; | |||
470 | }; | |||
471 | ||||
472 | struct PSBHintOp { | |||
473 | const char *Data; | |||
474 | unsigned Length; | |||
475 | unsigned Val; | |||
476 | }; | |||
477 | ||||
478 | struct BTIHintOp { | |||
479 | const char *Data; | |||
480 | unsigned Length; | |||
481 | unsigned Val; | |||
482 | }; | |||
483 | ||||
484 | struct SVCROp { | |||
485 | const char *Data; | |||
486 | unsigned Length; | |||
487 | unsigned PStateField; | |||
488 | }; | |||
489 | ||||
490 | union { | |||
491 | struct TokOp Tok; | |||
492 | struct RegOp Reg; | |||
493 | struct MatrixRegOp MatrixReg; | |||
494 | struct MatrixTileListOp MatrixTileList; | |||
495 | struct VectorListOp VectorList; | |||
496 | struct VectorIndexOp VectorIndex; | |||
497 | struct ImmOp Imm; | |||
498 | struct ShiftedImmOp ShiftedImm; | |||
499 | struct ImmRangeOp ImmRange; | |||
500 | struct CondCodeOp CondCode; | |||
501 | struct FPImmOp FPImm; | |||
502 | struct BarrierOp Barrier; | |||
503 | struct SysRegOp SysReg; | |||
504 | struct SysCRImmOp SysCRImm; | |||
505 | struct PrefetchOp Prefetch; | |||
506 | struct PSBHintOp PSBHint; | |||
507 | struct BTIHintOp BTIHint; | |||
508 | struct ShiftExtendOp ShiftExtend; | |||
509 | struct SVCROp SVCR; | |||
510 | }; | |||
511 | ||||
512 | // Keep the MCContext around as the MCExprs may need manipulated during | |||
513 | // the add<>Operands() calls. | |||
514 | MCContext &Ctx; | |||
515 | ||||
516 | public: | |||
517 | AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} | |||
518 | ||||
519 | AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { | |||
520 | Kind = o.Kind; | |||
521 | StartLoc = o.StartLoc; | |||
522 | EndLoc = o.EndLoc; | |||
523 | switch (Kind) { | |||
524 | case k_Token: | |||
525 | Tok = o.Tok; | |||
526 | break; | |||
527 | case k_Immediate: | |||
528 | Imm = o.Imm; | |||
529 | break; | |||
530 | case k_ShiftedImm: | |||
531 | ShiftedImm = o.ShiftedImm; | |||
532 | break; | |||
533 | case k_ImmRange: | |||
534 | ImmRange = o.ImmRange; | |||
535 | break; | |||
536 | case k_CondCode: | |||
537 | CondCode = o.CondCode; | |||
538 | break; | |||
539 | case k_FPImm: | |||
540 | FPImm = o.FPImm; | |||
541 | break; | |||
542 | case k_Barrier: | |||
543 | Barrier = o.Barrier; | |||
544 | break; | |||
545 | case k_Register: | |||
546 | Reg = o.Reg; | |||
547 | break; | |||
548 | case k_MatrixRegister: | |||
549 | MatrixReg = o.MatrixReg; | |||
550 | break; | |||
551 | case k_MatrixTileList: | |||
552 | MatrixTileList = o.MatrixTileList; | |||
553 | break; | |||
554 | case k_VectorList: | |||
555 | VectorList = o.VectorList; | |||
556 | break; | |||
557 | case k_VectorIndex: | |||
558 | VectorIndex = o.VectorIndex; | |||
559 | break; | |||
560 | case k_SysReg: | |||
561 | SysReg = o.SysReg; | |||
562 | break; | |||
563 | case k_SysCR: | |||
564 | SysCRImm = o.SysCRImm; | |||
565 | break; | |||
566 | case k_Prefetch: | |||
567 | Prefetch = o.Prefetch; | |||
568 | break; | |||
569 | case k_PSBHint: | |||
570 | PSBHint = o.PSBHint; | |||
571 | break; | |||
572 | case k_BTIHint: | |||
573 | BTIHint = o.BTIHint; | |||
574 | break; | |||
575 | case k_ShiftExtend: | |||
576 | ShiftExtend = o.ShiftExtend; | |||
577 | break; | |||
578 | case k_SVCR: | |||
579 | SVCR = o.SVCR; | |||
580 | break; | |||
581 | } | |||
582 | } | |||
583 | ||||
584 | /// getStartLoc - Get the location of the first token of this operand. | |||
585 | SMLoc getStartLoc() const override { return StartLoc; } | |||
586 | /// getEndLoc - Get the location of the last token of this operand. | |||
587 | SMLoc getEndLoc() const override { return EndLoc; } | |||
588 | ||||
589 | StringRef getToken() const { | |||
590 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 590 , __extension__ __PRETTY_FUNCTION__)); | |||
591 | return StringRef(Tok.Data, Tok.Length); | |||
592 | } | |||
593 | ||||
594 | bool isTokenSuffix() const { | |||
595 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 595 , __extension__ __PRETTY_FUNCTION__)); | |||
596 | return Tok.IsSuffix; | |||
597 | } | |||
598 | ||||
599 | const MCExpr *getImm() const { | |||
600 | assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 600 , __extension__ __PRETTY_FUNCTION__)); | |||
601 | return Imm.Val; | |||
602 | } | |||
603 | ||||
604 | const MCExpr *getShiftedImmVal() const { | |||
605 | assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 605 , __extension__ __PRETTY_FUNCTION__)); | |||
606 | return ShiftedImm.Val; | |||
607 | } | |||
608 | ||||
609 | unsigned getShiftedImmShift() const { | |||
610 | assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 610 , __extension__ __PRETTY_FUNCTION__)); | |||
611 | return ShiftedImm.ShiftAmount; | |||
612 | } | |||
613 | ||||
614 | unsigned getFirstImmVal() const { | |||
615 | assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 615 , __extension__ __PRETTY_FUNCTION__)); | |||
616 | return ImmRange.First; | |||
617 | } | |||
618 | ||||
619 | unsigned getLastImmVal() const { | |||
620 | assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 620 , __extension__ __PRETTY_FUNCTION__)); | |||
621 | return ImmRange.Last; | |||
622 | } | |||
623 | ||||
624 | AArch64CC::CondCode getCondCode() const { | |||
625 | assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 625 , __extension__ __PRETTY_FUNCTION__)); | |||
626 | return CondCode.Code; | |||
627 | } | |||
628 | ||||
629 | APFloat getFPImm() const { | |||
630 | assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 630 , __extension__ __PRETTY_FUNCTION__)); | |||
631 | return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); | |||
632 | } | |||
633 | ||||
634 | bool getFPImmIsExact() const { | |||
635 | assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 635 , __extension__ __PRETTY_FUNCTION__)); | |||
636 | return FPImm.IsExact; | |||
637 | } | |||
638 | ||||
639 | unsigned getBarrier() const { | |||
640 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 640 , __extension__ __PRETTY_FUNCTION__)); | |||
641 | return Barrier.Val; | |||
642 | } | |||
643 | ||||
644 | StringRef getBarrierName() const { | |||
645 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 645 , __extension__ __PRETTY_FUNCTION__)); | |||
646 | return StringRef(Barrier.Data, Barrier.Length); | |||
647 | } | |||
648 | ||||
649 | bool getBarriernXSModifier() const { | |||
650 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 650 , __extension__ __PRETTY_FUNCTION__)); | |||
651 | return Barrier.HasnXSModifier; | |||
652 | } | |||
653 | ||||
654 | unsigned getReg() const override { | |||
655 | assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 655 , __extension__ __PRETTY_FUNCTION__)); | |||
656 | return Reg.RegNum; | |||
657 | } | |||
658 | ||||
659 | unsigned getMatrixReg() const { | |||
660 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 660 , __extension__ __PRETTY_FUNCTION__)); | |||
661 | return MatrixReg.RegNum; | |||
662 | } | |||
663 | ||||
664 | unsigned getMatrixElementWidth() const { | |||
665 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 665 , __extension__ __PRETTY_FUNCTION__)); | |||
666 | return MatrixReg.ElementWidth; | |||
667 | } | |||
668 | ||||
669 | MatrixKind getMatrixKind() const { | |||
670 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 670 , __extension__ __PRETTY_FUNCTION__)); | |||
671 | return MatrixReg.Kind; | |||
672 | } | |||
673 | ||||
674 | unsigned getMatrixTileListRegMask() const { | |||
675 | assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!" ) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 675 , __extension__ __PRETTY_FUNCTION__)); | |||
676 | return MatrixTileList.RegMask; | |||
677 | } | |||
678 | ||||
679 | RegConstraintEqualityTy getRegEqualityTy() const { | |||
680 | assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 680 , __extension__ __PRETTY_FUNCTION__)); | |||
681 | return Reg.EqualityTy; | |||
682 | } | |||
683 | ||||
684 | unsigned getVectorListStart() const { | |||
685 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 685 , __extension__ __PRETTY_FUNCTION__)); | |||
686 | return VectorList.RegNum; | |||
687 | } | |||
688 | ||||
689 | unsigned getVectorListCount() const { | |||
690 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 690 , __extension__ __PRETTY_FUNCTION__)); | |||
691 | return VectorList.Count; | |||
692 | } | |||
693 | ||||
694 | unsigned getVectorListStride() const { | |||
695 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 695 , __extension__ __PRETTY_FUNCTION__)); | |||
696 | return VectorList.Stride; | |||
697 | } | |||
698 | ||||
699 | int getVectorIndex() const { | |||
700 | assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 700 , __extension__ __PRETTY_FUNCTION__)); | |||
701 | return VectorIndex.Val; | |||
702 | } | |||
703 | ||||
704 | StringRef getSysReg() const { | |||
705 | assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 705 , __extension__ __PRETTY_FUNCTION__)); | |||
706 | return StringRef(SysReg.Data, SysReg.Length); | |||
707 | } | |||
708 | ||||
709 | unsigned getSysCR() const { | |||
710 | assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 710 , __extension__ __PRETTY_FUNCTION__)); | |||
711 | return SysCRImm.Val; | |||
712 | } | |||
713 | ||||
714 | unsigned getPrefetch() const { | |||
715 | assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 715 , __extension__ __PRETTY_FUNCTION__)); | |||
716 | return Prefetch.Val; | |||
717 | } | |||
718 | ||||
719 | unsigned getPSBHint() const { | |||
720 | assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 720 , __extension__ __PRETTY_FUNCTION__)); | |||
721 | return PSBHint.Val; | |||
722 | } | |||
723 | ||||
724 | StringRef getPSBHintName() const { | |||
725 | assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 725 , __extension__ __PRETTY_FUNCTION__)); | |||
726 | return StringRef(PSBHint.Data, PSBHint.Length); | |||
727 | } | |||
728 | ||||
729 | unsigned getBTIHint() const { | |||
730 | assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 730 , __extension__ __PRETTY_FUNCTION__)); | |||
731 | return BTIHint.Val; | |||
732 | } | |||
733 | ||||
734 | StringRef getBTIHintName() const { | |||
735 | assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 735 , __extension__ __PRETTY_FUNCTION__)); | |||
736 | return StringRef(BTIHint.Data, BTIHint.Length); | |||
737 | } | |||
738 | ||||
739 | StringRef getSVCR() const { | |||
740 | assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 740 , __extension__ __PRETTY_FUNCTION__)); | |||
741 | return StringRef(SVCR.Data, SVCR.Length); | |||
742 | } | |||
743 | ||||
744 | StringRef getPrefetchName() const { | |||
745 | assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 745 , __extension__ __PRETTY_FUNCTION__)); | |||
746 | return StringRef(Prefetch.Data, Prefetch.Length); | |||
747 | } | |||
748 | ||||
749 | AArch64_AM::ShiftExtendType getShiftExtendType() const { | |||
750 | if (Kind == k_ShiftExtend) | |||
751 | return ShiftExtend.Type; | |||
752 | if (Kind == k_Register) | |||
753 | return Reg.ShiftExtend.Type; | |||
754 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 754); | |||
755 | } | |||
756 | ||||
757 | unsigned getShiftExtendAmount() const { | |||
758 | if (Kind == k_ShiftExtend) | |||
759 | return ShiftExtend.Amount; | |||
760 | if (Kind == k_Register) | |||
761 | return Reg.ShiftExtend.Amount; | |||
762 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 762); | |||
763 | } | |||
764 | ||||
765 | bool hasShiftExtendAmount() const { | |||
766 | if (Kind == k_ShiftExtend) | |||
767 | return ShiftExtend.HasExplicitAmount; | |||
768 | if (Kind == k_Register) | |||
769 | return Reg.ShiftExtend.HasExplicitAmount; | |||
770 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 770); | |||
771 | } | |||
772 | ||||
773 | bool isImm() const override { return Kind == k_Immediate; } | |||
774 | bool isMem() const override { return false; } | |||
775 | ||||
776 | bool isUImm6() const { | |||
777 | if (!isImm()) | |||
778 | return false; | |||
779 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
780 | if (!MCE) | |||
781 | return false; | |||
782 | int64_t Val = MCE->getValue(); | |||
783 | return (Val >= 0 && Val < 64); | |||
784 | } | |||
785 | ||||
786 | template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } | |||
787 | ||||
788 | template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { | |||
789 | return isImmScaled<Bits, Scale>(true); | |||
790 | } | |||
791 | ||||
792 | template <int Bits, int Scale, int Offset = 0, bool IsRange = false> | |||
793 | DiagnosticPredicate isUImmScaled() const { | |||
794 | if (IsRange && isImmRange() && | |||
795 | (getLastImmVal() != getFirstImmVal() + Offset)) | |||
796 | return DiagnosticPredicateTy::NoMatch; | |||
797 | ||||
798 | return isImmScaled<Bits, Scale, IsRange>(false); | |||
799 | } | |||
800 | ||||
801 | template <int Bits, int Scale, bool IsRange = false> | |||
802 | DiagnosticPredicate isImmScaled(bool Signed) const { | |||
803 | if ((!isImm() && !isImmRange()) || (isImm() && IsRange) || | |||
804 | (isImmRange() && !IsRange)) | |||
805 | return DiagnosticPredicateTy::NoMatch; | |||
806 | ||||
807 | int64_t Val; | |||
808 | if (isImmRange()) | |||
809 | Val = getFirstImmVal(); | |||
810 | else { | |||
811 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
812 | if (!MCE) | |||
813 | return DiagnosticPredicateTy::NoMatch; | |||
814 | Val = MCE->getValue(); | |||
815 | } | |||
816 | ||||
817 | int64_t MinVal, MaxVal; | |||
818 | if (Signed) { | |||
819 | int64_t Shift = Bits - 1; | |||
820 | MinVal = (int64_t(1) << Shift) * -Scale; | |||
821 | MaxVal = ((int64_t(1) << Shift) - 1) * Scale; | |||
822 | } else { | |||
823 | MinVal = 0; | |||
824 | MaxVal = ((int64_t(1) << Bits) - 1) * Scale; | |||
825 | } | |||
826 | ||||
827 | if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) | |||
828 | return DiagnosticPredicateTy::Match; | |||
829 | ||||
830 | return DiagnosticPredicateTy::NearMatch; | |||
831 | } | |||
832 | ||||
833 | DiagnosticPredicate isSVEPattern() const { | |||
834 | if (!isImm()) | |||
835 | return DiagnosticPredicateTy::NoMatch; | |||
836 | auto *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
837 | if (!MCE) | |||
838 | return DiagnosticPredicateTy::NoMatch; | |||
839 | int64_t Val = MCE->getValue(); | |||
840 | if (Val >= 0 && Val < 32) | |||
841 | return DiagnosticPredicateTy::Match; | |||
842 | return DiagnosticPredicateTy::NearMatch; | |||
843 | } | |||
844 | ||||
845 | DiagnosticPredicate isSVEVecLenSpecifier() const { | |||
846 | if (!isImm()) | |||
847 | return DiagnosticPredicateTy::NoMatch; | |||
848 | auto *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
849 | if (!MCE) | |||
850 | return DiagnosticPredicateTy::NoMatch; | |||
851 | int64_t Val = MCE->getValue(); | |||
852 | if (Val >= 0 && Val <= 1) | |||
853 | return DiagnosticPredicateTy::Match; | |||
854 | return DiagnosticPredicateTy::NearMatch; | |||
855 | } | |||
856 | ||||
857 | bool isSymbolicUImm12Offset(const MCExpr *Expr) const { | |||
858 | AArch64MCExpr::VariantKind ELFRefKind; | |||
859 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
860 | int64_t Addend; | |||
861 | if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, | |||
862 | Addend)) { | |||
863 | // If we don't understand the expression, assume the best and | |||
864 | // let the fixup and relocation code deal with it. | |||
865 | return true; | |||
866 | } | |||
867 | ||||
868 | if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || | |||
869 | ELFRefKind == AArch64MCExpr::VK_LO12 || | |||
870 | ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || | |||
871 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || | |||
872 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || | |||
873 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || | |||
874 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || | |||
875 | ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || | |||
876 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || | |||
877 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || | |||
878 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 || | |||
879 | ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) { | |||
880 | // Note that we don't range-check the addend. It's adjusted modulo page | |||
881 | // size when converted, so there is no "out of range" condition when using | |||
882 | // @pageoff. | |||
883 | return true; | |||
884 | } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || | |||
885 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { | |||
886 | // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. | |||
887 | return Addend == 0; | |||
888 | } | |||
889 | ||||
890 | return false; | |||
891 | } | |||
892 | ||||
893 | template <int Scale> bool isUImm12Offset() const { | |||
894 | if (!isImm()) | |||
895 | return false; | |||
896 | ||||
897 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
898 | if (!MCE) | |||
899 | return isSymbolicUImm12Offset(getImm()); | |||
900 | ||||
901 | int64_t Val = MCE->getValue(); | |||
902 | return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; | |||
903 | } | |||
904 | ||||
905 | template <int N, int M> | |||
906 | bool isImmInRange() const { | |||
907 | if (!isImm()) | |||
908 | return false; | |||
909 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
910 | if (!MCE) | |||
911 | return false; | |||
912 | int64_t Val = MCE->getValue(); | |||
913 | return (Val >= N && Val <= M); | |||
914 | } | |||
915 | ||||
916 | // NOTE: Also used for isLogicalImmNot as anything that can be represented as | |||
917 | // a logical immediate can always be represented when inverted. | |||
918 | template <typename T> | |||
919 | bool isLogicalImm() const { | |||
920 | if (!isImm()) | |||
921 | return false; | |||
922 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
923 | if (!MCE) | |||
924 | return false; | |||
925 | ||||
926 | int64_t Val = MCE->getValue(); | |||
927 | // Avoid left shift by 64 directly. | |||
928 | uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4); | |||
929 | // Allow all-0 or all-1 in top bits to permit bitwise NOT. | |||
930 | if ((Val & Upper) && (Val & Upper) != Upper) | |||
931 | return false; | |||
932 | ||||
933 | return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8); | |||
934 | } | |||
935 | ||||
936 | bool isShiftedImm() const { return Kind == k_ShiftedImm; } | |||
937 | ||||
938 | bool isImmRange() const { return Kind == k_ImmRange; } | |||
939 | ||||
940 | /// Returns the immediate value as a pair of (imm, shift) if the immediate is | |||
941 | /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted | |||
942 | /// immediate that can be shifted by 'Shift'. | |||
943 | template <unsigned Width> | |||
944 | std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const { | |||
945 | if (isShiftedImm() && Width == getShiftedImmShift()) | |||
946 | if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal())) | |||
947 | return std::make_pair(CE->getValue(), Width); | |||
948 | ||||
949 | if (isImm()) | |||
950 | if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) { | |||
951 | int64_t Val = CE->getValue(); | |||
952 | if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) | |||
953 | return std::make_pair(Val >> Width, Width); | |||
954 | else | |||
955 | return std::make_pair(Val, 0u); | |||
956 | } | |||
957 | ||||
958 | return {}; | |||
959 | } | |||
960 | ||||
961 | bool isAddSubImm() const { | |||
962 | if (!isShiftedImm() && !isImm()) | |||
963 | return false; | |||
964 | ||||
965 | const MCExpr *Expr; | |||
966 | ||||
967 | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. | |||
968 | if (isShiftedImm()) { | |||
969 | unsigned Shift = ShiftedImm.ShiftAmount; | |||
970 | Expr = ShiftedImm.Val; | |||
971 | if (Shift != 0 && Shift != 12) | |||
972 | return false; | |||
973 | } else { | |||
974 | Expr = getImm(); | |||
975 | } | |||
976 | ||||
977 | AArch64MCExpr::VariantKind ELFRefKind; | |||
978 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
979 | int64_t Addend; | |||
980 | if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, | |||
981 | DarwinRefKind, Addend)) { | |||
982 | return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF | |||
983 | || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF | |||
984 | || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) | |||
985 | || ELFRefKind == AArch64MCExpr::VK_LO12 | |||
986 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 | |||
987 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 | |||
988 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC | |||
989 | || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 | |||
990 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 | |||
991 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC | |||
992 | || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 | |||
993 | || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 | |||
994 | || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; | |||
995 | } | |||
996 | ||||
997 | // If it's a constant, it should be a real immediate in range. | |||
998 | if (auto ShiftedVal = getShiftedVal<12>()) | |||
999 | return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; | |||
1000 | ||||
1001 | // If it's an expression, we hope for the best and let the fixup/relocation | |||
1002 | // code deal with it. | |||
1003 | return true; | |||
1004 | } | |||
1005 | ||||
1006 | bool isAddSubImmNeg() const { | |||
1007 | if (!isShiftedImm() && !isImm()) | |||
1008 | return false; | |||
1009 | ||||
1010 | // Otherwise it should be a real negative immediate in range. | |||
1011 | if (auto ShiftedVal = getShiftedVal<12>()) | |||
1012 | return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; | |||
1013 | ||||
1014 | return false; | |||
1015 | } | |||
1016 | ||||
1017 | // Signed value in the range -128 to +127. For element widths of | |||
1018 | // 16 bits or higher it may also be a signed multiple of 256 in the | |||
1019 | // range -32768 to +32512. | |||
1020 | // For element-width of 8 bits a range of -128 to 255 is accepted, | |||
1021 | // since a copy of a byte can be either signed/unsigned. | |||
1022 | template <typename T> | |||
1023 | DiagnosticPredicate isSVECpyImm() const { | |||
1024 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) | |||
1025 | return DiagnosticPredicateTy::NoMatch; | |||
1026 | ||||
1027 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || | |||
1028 | std::is_same<int8_t, T>::value; | |||
1029 | if (auto ShiftedImm = getShiftedVal<8>()) | |||
1030 | if (!(IsByte && ShiftedImm->second) && | |||
1031 | AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) | |||
1032 | << ShiftedImm->second)) | |||
1033 | return DiagnosticPredicateTy::Match; | |||
1034 | ||||
1035 | return DiagnosticPredicateTy::NearMatch; | |||
1036 | } | |||
1037 | ||||
1038 | // Unsigned value in the range 0 to 255. For element widths of | |||
1039 | // 16 bits or higher it may also be a signed multiple of 256 in the | |||
1040 | // range 0 to 65280. | |||
1041 | template <typename T> DiagnosticPredicate isSVEAddSubImm() const { | |||
1042 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) | |||
1043 | return DiagnosticPredicateTy::NoMatch; | |||
1044 | ||||
1045 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || | |||
1046 | std::is_same<int8_t, T>::value; | |||
1047 | if (auto ShiftedImm = getShiftedVal<8>()) | |||
1048 | if (!(IsByte && ShiftedImm->second) && | |||
1049 | AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first | |||
1050 | << ShiftedImm->second)) | |||
1051 | return DiagnosticPredicateTy::Match; | |||
1052 | ||||
1053 | return DiagnosticPredicateTy::NearMatch; | |||
1054 | } | |||
1055 | ||||
1056 | template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { | |||
1057 | if (isLogicalImm<T>() && !isSVECpyImm<T>()) | |||
1058 | return DiagnosticPredicateTy::Match; | |||
1059 | return DiagnosticPredicateTy::NoMatch; | |||
1060 | } | |||
1061 | ||||
1062 | bool isCondCode() const { return Kind == k_CondCode; } | |||
1063 | ||||
1064 | bool isSIMDImmType10() const { | |||
1065 | if (!isImm()) | |||
1066 | return false; | |||
1067 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1068 | if (!MCE) | |||
1069 | return false; | |||
1070 | return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); | |||
1071 | } | |||
1072 | ||||
1073 | template<int N> | |||
1074 | bool isBranchTarget() const { | |||
1075 | if (!isImm()) | |||
1076 | return false; | |||
1077 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1078 | if (!MCE) | |||
1079 | return true; | |||
1080 | int64_t Val = MCE->getValue(); | |||
1081 | if (Val & 0x3) | |||
1082 | return false; | |||
1083 | assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!" ) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1083 , __extension__ __PRETTY_FUNCTION__)); | |||
1084 | return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2)); | |||
1085 | } | |||
1086 | ||||
1087 | bool | |||
1088 | isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { | |||
1089 | if (!isImm()) | |||
1090 | return false; | |||
1091 | ||||
1092 | AArch64MCExpr::VariantKind ELFRefKind; | |||
1093 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
1094 | int64_t Addend; | |||
1095 | if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind, | |||
1096 | DarwinRefKind, Addend)) { | |||
1097 | return false; | |||
1098 | } | |||
1099 | if (DarwinRefKind != MCSymbolRefExpr::VK_None) | |||
1100 | return false; | |||
1101 | ||||
1102 | return llvm::is_contained(AllowedModifiers, ELFRefKind); | |||
1103 | } | |||
1104 | ||||
1105 | bool isMovWSymbolG3() const { | |||
1106 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3}); | |||
1107 | } | |||
1108 | ||||
1109 | bool isMovWSymbolG2() const { | |||
1110 | return isMovWSymbol( | |||
1111 | {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, | |||
1112 | AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2, | |||
1113 | AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2, | |||
1114 | AArch64MCExpr::VK_DTPREL_G2}); | |||
1115 | } | |||
1116 | ||||
1117 | bool isMovWSymbolG1() const { | |||
1118 | return isMovWSymbol( | |||
1119 | {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, | |||
1120 | AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1, | |||
1121 | AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1, | |||
1122 | AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC, | |||
1123 | AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC}); | |||
1124 | } | |||
1125 | ||||
1126 | bool isMovWSymbolG0() const { | |||
1127 | return isMovWSymbol( | |||
1128 | {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, | |||
1129 | AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0, | |||
1130 | AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, | |||
1131 | AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC, | |||
1132 | AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC}); | |||
1133 | } | |||
1134 | ||||
1135 | template<int RegWidth, int Shift> | |||
1136 | bool isMOVZMovAlias() const { | |||
1137 | if (!isImm()) return false; | |||
1138 | ||||
1139 | const MCExpr *E = getImm(); | |||
1140 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) { | |||
1141 | uint64_t Value = CE->getValue(); | |||
1142 | ||||
1143 | return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); | |||
1144 | } | |||
1145 | // Only supports the case of Shift being 0 if an expression is used as an | |||
1146 | // operand | |||
1147 | return !Shift && E; | |||
1148 | } | |||
1149 | ||||
1150 | template<int RegWidth, int Shift> | |||
1151 | bool isMOVNMovAlias() const { | |||
1152 | if (!isImm()) return false; | |||
1153 | ||||
1154 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1155 | if (!CE) return false; | |||
1156 | uint64_t Value = CE->getValue(); | |||
1157 | ||||
1158 | return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); | |||
1159 | } | |||
1160 | ||||
1161 | bool isFPImm() const { | |||
1162 | return Kind == k_FPImm && | |||
1163 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1; | |||
1164 | } | |||
1165 | ||||
1166 | bool isBarrier() const { | |||
1167 | return Kind == k_Barrier && !getBarriernXSModifier(); | |||
1168 | } | |||
1169 | bool isBarriernXS() const { | |||
1170 | return Kind == k_Barrier && getBarriernXSModifier(); | |||
1171 | } | |||
1172 | bool isSysReg() const { return Kind == k_SysReg; } | |||
1173 | ||||
1174 | bool isMRSSystemRegister() const { | |||
1175 | if (!isSysReg()) return false; | |||
1176 | ||||
1177 | return SysReg.MRSReg != -1U; | |||
1178 | } | |||
1179 | ||||
1180 | bool isMSRSystemRegister() const { | |||
1181 | if (!isSysReg()) return false; | |||
1182 | return SysReg.MSRReg != -1U; | |||
1183 | } | |||
1184 | ||||
1185 | bool isSystemPStateFieldWithImm0_1() const { | |||
1186 | if (!isSysReg()) return false; | |||
1187 | return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField); | |||
1188 | } | |||
1189 | ||||
1190 | bool isSystemPStateFieldWithImm0_15() const { | |||
1191 | if (!isSysReg()) | |||
1192 | return false; | |||
1193 | return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField); | |||
1194 | } | |||
1195 | ||||
1196 | bool isSVCR() const { | |||
1197 | if (Kind != k_SVCR) | |||
1198 | return false; | |||
1199 | return SVCR.PStateField != -1U; | |||
1200 | } | |||
1201 | ||||
1202 | bool isReg() const override { | |||
1203 | return Kind == k_Register; | |||
1204 | } | |||
1205 | ||||
1206 | bool isVectorList() const { return Kind == k_VectorList; } | |||
1207 | ||||
1208 | bool isScalarReg() const { | |||
1209 | return Kind == k_Register && Reg.Kind == RegKind::Scalar; | |||
1210 | } | |||
1211 | ||||
1212 | bool isNeonVectorReg() const { | |||
1213 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector; | |||
1214 | } | |||
1215 | ||||
1216 | bool isNeonVectorRegLo() const { | |||
1217 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector && | |||
1218 | (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( | |||
1219 | Reg.RegNum) || | |||
1220 | AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains( | |||
1221 | Reg.RegNum)); | |||
1222 | } | |||
1223 | ||||
1224 | bool isMatrix() const { return Kind == k_MatrixRegister; } | |||
1225 | bool isMatrixTileList() const { return Kind == k_MatrixTileList; } | |||
1226 | ||||
1227 | template <unsigned Class> bool isSVEPredicateAsCounterReg() const { | |||
1228 | RegKind RK; | |||
1229 | switch (Class) { | |||
1230 | case AArch64::PPRRegClassID: | |||
1231 | case AArch64::PPR_3bRegClassID: | |||
1232 | case AArch64::PPR_p8to15RegClassID: | |||
1233 | RK = RegKind::SVEPredicateAsCounter; | |||
1234 | break; | |||
1235 | default: | |||
1236 | llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1236 ); | |||
1237 | } | |||
1238 | ||||
1239 | return (Kind == k_Register && Reg.Kind == RK) && | |||
1240 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
1241 | } | |||
1242 | ||||
1243 | template <unsigned Class> bool isSVEVectorReg() const { | |||
1244 | RegKind RK; | |||
1245 | switch (Class) { | |||
1246 | case AArch64::ZPRRegClassID: | |||
1247 | case AArch64::ZPR_3bRegClassID: | |||
1248 | case AArch64::ZPR_4bRegClassID: | |||
1249 | RK = RegKind::SVEDataVector; | |||
1250 | break; | |||
1251 | case AArch64::PPRRegClassID: | |||
1252 | case AArch64::PPR_3bRegClassID: | |||
1253 | RK = RegKind::SVEPredicateVector; | |||
1254 | break; | |||
1255 | default: | |||
1256 | llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1256 ); | |||
1257 | } | |||
1258 | ||||
1259 | return (Kind == k_Register && Reg.Kind == RK) && | |||
1260 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
1261 | } | |||
1262 | ||||
1263 | template <unsigned Class> bool isFPRasZPR() const { | |||
1264 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1265 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
1266 | } | |||
1267 | ||||
1268 | template <int ElementWidth, unsigned Class> | |||
1269 | DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { | |||
1270 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) | |||
1271 | return DiagnosticPredicateTy::NoMatch; | |||
1272 | ||||
1273 | if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth)) | |||
1274 | return DiagnosticPredicateTy::Match; | |||
1275 | ||||
1276 | return DiagnosticPredicateTy::NearMatch; | |||
1277 | } | |||
1278 | ||||
1279 | template <int ElementWidth, unsigned Class> | |||
1280 | DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const { | |||
1281 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter) | |||
1282 | return DiagnosticPredicateTy::NoMatch; | |||
1283 | ||||
1284 | if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth)) | |||
1285 | return DiagnosticPredicateTy::Match; | |||
1286 | ||||
1287 | return DiagnosticPredicateTy::NearMatch; | |||
1288 | } | |||
1289 | ||||
1290 | template <int ElementWidth, unsigned Class> | |||
1291 | DiagnosticPredicate isSVEDataVectorRegOfWidth() const { | |||
1292 | if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) | |||
1293 | return DiagnosticPredicateTy::NoMatch; | |||
1294 | ||||
1295 | if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth) | |||
1296 | return DiagnosticPredicateTy::Match; | |||
1297 | ||||
1298 | return DiagnosticPredicateTy::NearMatch; | |||
1299 | } | |||
1300 | ||||
1301 | template <int ElementWidth, unsigned Class, | |||
1302 | AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, | |||
1303 | bool ShiftWidthAlwaysSame> | |||
1304 | DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { | |||
1305 | auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); | |||
1306 | if (!VectorMatch.isMatch()) | |||
1307 | return DiagnosticPredicateTy::NoMatch; | |||
1308 | ||||
1309 | // Give a more specific diagnostic when the user has explicitly typed in | |||
1310 | // a shift-amount that does not match what is expected, but for which | |||
1311 | // there is also an unscaled addressing mode (e.g. sxtw/uxtw). | |||
1312 | bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); | |||
1313 | if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || | |||
1314 | ShiftExtendTy == AArch64_AM::SXTW) && | |||
1315 | !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) | |||
1316 | return DiagnosticPredicateTy::NoMatch; | |||
1317 | ||||
1318 | if (MatchShift && ShiftExtendTy == getShiftExtendType()) | |||
1319 | return DiagnosticPredicateTy::Match; | |||
1320 | ||||
1321 | return DiagnosticPredicateTy::NearMatch; | |||
1322 | } | |||
1323 | ||||
1324 | bool isGPR32as64() const { | |||
1325 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1326 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); | |||
1327 | } | |||
1328 | ||||
1329 | bool isGPR64as32() const { | |||
1330 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1331 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); | |||
1332 | } | |||
1333 | ||||
1334 | bool isGPR64x8() const { | |||
1335 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1336 | AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains( | |||
1337 | Reg.RegNum); | |||
1338 | } | |||
1339 | ||||
1340 | bool isWSeqPair() const { | |||
1341 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1342 | AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( | |||
1343 | Reg.RegNum); | |||
1344 | } | |||
1345 | ||||
1346 | bool isXSeqPair() const { | |||
1347 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1348 | AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( | |||
1349 | Reg.RegNum); | |||
1350 | } | |||
1351 | ||||
1352 | bool isSyspXzrPair() const { | |||
1353 | return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR; | |||
1354 | } | |||
1355 | ||||
1356 | template<int64_t Angle, int64_t Remainder> | |||
1357 | DiagnosticPredicate isComplexRotation() const { | |||
1358 | if (!isImm()) return DiagnosticPredicateTy::NoMatch; | |||
1359 | ||||
1360 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1361 | if (!CE) return DiagnosticPredicateTy::NoMatch; | |||
1362 | uint64_t Value = CE->getValue(); | |||
1363 | ||||
1364 | if (Value % Angle == Remainder && Value <= 270) | |||
1365 | return DiagnosticPredicateTy::Match; | |||
1366 | return DiagnosticPredicateTy::NearMatch; | |||
1367 | } | |||
1368 | ||||
1369 | template <unsigned RegClassID> bool isGPR64() const { | |||
1370 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1371 | AArch64MCRegisterClasses[RegClassID].contains(getReg()); | |||
1372 | } | |||
1373 | ||||
1374 | template <unsigned RegClassID, int ExtWidth> | |||
1375 | DiagnosticPredicate isGPR64WithShiftExtend() const { | |||
1376 | if (Kind != k_Register || Reg.Kind != RegKind::Scalar) | |||
1377 | return DiagnosticPredicateTy::NoMatch; | |||
1378 | ||||
1379 | if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && | |||
1380 | getShiftExtendAmount() == Log2_32(ExtWidth / 8)) | |||
1381 | return DiagnosticPredicateTy::Match; | |||
1382 | return DiagnosticPredicateTy::NearMatch; | |||
1383 | } | |||
1384 | ||||
1385 | /// Is this a vector list with the type implicit (presumably attached to the | |||
1386 | /// instruction itself)? | |||
1387 | template <RegKind VectorKind, unsigned NumRegs> | |||
1388 | bool isImplicitlyTypedVectorList() const { | |||
1389 | return Kind == k_VectorList && VectorList.Count == NumRegs && | |||
1390 | VectorList.NumElements == 0 && | |||
1391 | VectorList.RegisterKind == VectorKind; | |||
1392 | } | |||
1393 | ||||
1394 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, | |||
1395 | unsigned ElementWidth, unsigned Stride = 1> | |||
1396 | bool isTypedVectorList() const { | |||
1397 | if (Kind != k_VectorList) | |||
1398 | return false; | |||
1399 | if (VectorList.Count != NumRegs) | |||
1400 | return false; | |||
1401 | if (VectorList.RegisterKind != VectorKind) | |||
1402 | return false; | |||
1403 | if (VectorList.ElementWidth != ElementWidth) | |||
1404 | return false; | |||
1405 | if (VectorList.Stride != Stride) | |||
1406 | return false; | |||
1407 | return VectorList.NumElements == NumElements; | |||
1408 | } | |||
1409 | ||||
1410 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, | |||
1411 | unsigned ElementWidth> | |||
1412 | DiagnosticPredicate isTypedVectorListMultiple() const { | |||
1413 | bool Res = | |||
1414 | isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>(); | |||
1415 | if (!Res) | |||
1416 | return DiagnosticPredicateTy::NoMatch; | |||
1417 | if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0) | |||
1418 | return DiagnosticPredicateTy::NearMatch; | |||
1419 | return DiagnosticPredicateTy::Match; | |||
1420 | } | |||
1421 | ||||
1422 | template <RegKind VectorKind, unsigned NumRegs, unsigned Stride, | |||
1423 | unsigned ElementWidth> | |||
1424 | DiagnosticPredicate isTypedVectorListStrided() const { | |||
1425 | bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0, | |||
1426 | ElementWidth, Stride>(); | |||
1427 | if (!Res) | |||
1428 | return DiagnosticPredicateTy::NoMatch; | |||
1429 | if ((VectorList.RegNum < (AArch64::Z0 + Stride)) || | |||
1430 | ((VectorList.RegNum >= AArch64::Z16) && | |||
1431 | (VectorList.RegNum < (AArch64::Z16 + Stride)))) | |||
1432 | return DiagnosticPredicateTy::Match; | |||
1433 | return DiagnosticPredicateTy::NoMatch; | |||
1434 | } | |||
1435 | ||||
1436 | template <int Min, int Max> | |||
1437 | DiagnosticPredicate isVectorIndex() const { | |||
1438 | if (Kind != k_VectorIndex) | |||
1439 | return DiagnosticPredicateTy::NoMatch; | |||
1440 | if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) | |||
1441 | return DiagnosticPredicateTy::Match; | |||
1442 | return DiagnosticPredicateTy::NearMatch; | |||
1443 | } | |||
1444 | ||||
1445 | bool isToken() const override { return Kind == k_Token; } | |||
1446 | ||||
1447 | bool isTokenEqual(StringRef Str) const { | |||
1448 | return Kind == k_Token && getToken() == Str; | |||
1449 | } | |||
1450 | bool isSysCR() const { return Kind == k_SysCR; } | |||
1451 | bool isPrefetch() const { return Kind == k_Prefetch; } | |||
1452 | bool isPSBHint() const { return Kind == k_PSBHint; } | |||
1453 | bool isBTIHint() const { return Kind == k_BTIHint; } | |||
1454 | bool isShiftExtend() const { return Kind == k_ShiftExtend; } | |||
1455 | bool isShifter() const { | |||
1456 | if (!isShiftExtend()) | |||
1457 | return false; | |||
1458 | ||||
1459 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1460 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
1461 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || | |||
1462 | ST == AArch64_AM::MSL); | |||
1463 | } | |||
1464 | ||||
1465 | template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { | |||
1466 | if (Kind != k_FPImm) | |||
1467 | return DiagnosticPredicateTy::NoMatch; | |||
1468 | ||||
1469 | if (getFPImmIsExact()) { | |||
1470 | // Lookup the immediate from table of supported immediates. | |||
1471 | auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum); | |||
1472 | assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value" ) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1472 , __extension__ __PRETTY_FUNCTION__)); | |||
1473 | ||||
1474 | // Calculate its FP value. | |||
1475 | APFloat RealVal(APFloat::IEEEdouble()); | |||
1476 | auto StatusOrErr = | |||
1477 | RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero); | |||
1478 | if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK) | |||
1479 | llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1479 ); | |||
1480 | ||||
1481 | if (getFPImm().bitwiseIsEqual(RealVal)) | |||
1482 | return DiagnosticPredicateTy::Match; | |||
1483 | } | |||
1484 | ||||
1485 | return DiagnosticPredicateTy::NearMatch; | |||
1486 | } | |||
1487 | ||||
1488 | template <unsigned ImmA, unsigned ImmB> | |||
1489 | DiagnosticPredicate isExactFPImm() const { | |||
1490 | DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; | |||
1491 | if ((Res = isExactFPImm<ImmA>())) | |||
1492 | return DiagnosticPredicateTy::Match; | |||
1493 | if ((Res = isExactFPImm<ImmB>())) | |||
1494 | return DiagnosticPredicateTy::Match; | |||
1495 | return Res; | |||
1496 | } | |||
1497 | ||||
1498 | bool isExtend() const { | |||
1499 | if (!isShiftExtend()) | |||
1500 | return false; | |||
1501 | ||||
1502 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1503 | return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || | |||
1504 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || | |||
1505 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || | |||
1506 | ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || | |||
1507 | ET == AArch64_AM::LSL) && | |||
1508 | getShiftExtendAmount() <= 4; | |||
1509 | } | |||
1510 | ||||
1511 | bool isExtend64() const { | |||
1512 | if (!isExtend()) | |||
1513 | return false; | |||
1514 | // Make sure the extend expects a 32-bit source register. | |||
1515 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1516 | return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || | |||
1517 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || | |||
1518 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; | |||
1519 | } | |||
1520 | ||||
1521 | bool isExtendLSL64() const { | |||
1522 | if (!isExtend()) | |||
1523 | return false; | |||
1524 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1525 | return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || | |||
1526 | ET == AArch64_AM::LSL) && | |||
1527 | getShiftExtendAmount() <= 4; | |||
1528 | } | |||
1529 | ||||
1530 | template<int Width> bool isMemXExtend() const { | |||
1531 | if (!isExtend()) | |||
1532 | return false; | |||
1533 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1534 | return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && | |||
1535 | (getShiftExtendAmount() == Log2_32(Width / 8) || | |||
1536 | getShiftExtendAmount() == 0); | |||
1537 | } | |||
1538 | ||||
1539 | template<int Width> bool isMemWExtend() const { | |||
1540 | if (!isExtend()) | |||
1541 | return false; | |||
1542 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1543 | return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && | |||
1544 | (getShiftExtendAmount() == Log2_32(Width / 8) || | |||
1545 | getShiftExtendAmount() == 0); | |||
1546 | } | |||
1547 | ||||
1548 | template <unsigned width> | |||
1549 | bool isArithmeticShifter() const { | |||
1550 | if (!isShifter()) | |||
1551 | return false; | |||
1552 | ||||
1553 | // An arithmetic shifter is LSL, LSR, or ASR. | |||
1554 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1555 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
1556 | ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; | |||
1557 | } | |||
1558 | ||||
1559 | template <unsigned width> | |||
1560 | bool isLogicalShifter() const { | |||
1561 | if (!isShifter()) | |||
1562 | return false; | |||
1563 | ||||
1564 | // A logical shifter is LSL, LSR, ASR or ROR. | |||
1565 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1566 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
1567 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && | |||
1568 | getShiftExtendAmount() < width; | |||
1569 | } | |||
1570 | ||||
1571 | bool isMovImm32Shifter() const { | |||
1572 | if (!isShifter()) | |||
1573 | return false; | |||
1574 | ||||
1575 | // A MOVi shifter is LSL of 0, 16, 32, or 48. | |||
1576 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1577 | if (ST != AArch64_AM::LSL) | |||
1578 | return false; | |||
1579 | uint64_t Val = getShiftExtendAmount(); | |||
1580 | return (Val == 0 || Val == 16); | |||
1581 | } | |||
1582 | ||||
1583 | bool isMovImm64Shifter() const { | |||
1584 | if (!isShifter()) | |||
1585 | return false; | |||
1586 | ||||
1587 | // A MOVi shifter is LSL of 0 or 16. | |||
1588 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1589 | if (ST != AArch64_AM::LSL) | |||
1590 | return false; | |||
1591 | uint64_t Val = getShiftExtendAmount(); | |||
1592 | return (Val == 0 || Val == 16 || Val == 32 || Val == 48); | |||
1593 | } | |||
1594 | ||||
1595 | bool isLogicalVecShifter() const { | |||
1596 | if (!isShifter()) | |||
1597 | return false; | |||
1598 | ||||
1599 | // A logical vector shifter is a left shift by 0, 8, 16, or 24. | |||
1600 | unsigned Shift = getShiftExtendAmount(); | |||
1601 | return getShiftExtendType() == AArch64_AM::LSL && | |||
1602 | (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); | |||
1603 | } | |||
1604 | ||||
1605 | bool isLogicalVecHalfWordShifter() const { | |||
1606 | if (!isLogicalVecShifter()) | |||
1607 | return false; | |||
1608 | ||||
1609 | // A logical vector shifter is a left shift by 0 or 8. | |||
1610 | unsigned Shift = getShiftExtendAmount(); | |||
1611 | return getShiftExtendType() == AArch64_AM::LSL && | |||
1612 | (Shift == 0 || Shift == 8); | |||
1613 | } | |||
1614 | ||||
1615 | bool isMoveVecShifter() const { | |||
1616 | if (!isShiftExtend()) | |||
1617 | return false; | |||
1618 | ||||
1619 | // A logical vector shifter is a left shift by 8 or 16. | |||
1620 | unsigned Shift = getShiftExtendAmount(); | |||
1621 | return getShiftExtendType() == AArch64_AM::MSL && | |||
1622 | (Shift == 8 || Shift == 16); | |||
1623 | } | |||
1624 | ||||
1625 | // Fallback unscaled operands are for aliases of LDR/STR that fall back | |||
1626 | // to LDUR/STUR when the offset is not legal for the former but is for | |||
1627 | // the latter. As such, in addition to checking for being a legal unscaled | |||
1628 | // address, also check that it is not a legal scaled address. This avoids | |||
1629 | // ambiguity in the matcher. | |||
1630 | template<int Width> | |||
1631 | bool isSImm9OffsetFB() const { | |||
1632 | return isSImm<9>() && !isUImm12Offset<Width / 8>(); | |||
1633 | } | |||
1634 | ||||
1635 | bool isAdrpLabel() const { | |||
1636 | // Validation was handled during parsing, so we just verify that | |||
1637 | // something didn't go haywire. | |||
1638 | if (!isImm()) | |||
1639 | return false; | |||
1640 | ||||
1641 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1642 | int64_t Val = CE->getValue(); | |||
1643 | int64_t Min = - (4096 * (1LL << (21 - 1))); | |||
1644 | int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); | |||
1645 | return (Val % 4096) == 0 && Val >= Min && Val <= Max; | |||
1646 | } | |||
1647 | ||||
1648 | return true; | |||
1649 | } | |||
1650 | ||||
1651 | bool isAdrLabel() const { | |||
1652 | // Validation was handled during parsing, so we just verify that | |||
1653 | // something didn't go haywire. | |||
1654 | if (!isImm()) | |||
1655 | return false; | |||
1656 | ||||
1657 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1658 | int64_t Val = CE->getValue(); | |||
1659 | int64_t Min = - (1LL << (21 - 1)); | |||
1660 | int64_t Max = ((1LL << (21 - 1)) - 1); | |||
1661 | return Val >= Min && Val <= Max; | |||
1662 | } | |||
1663 | ||||
1664 | return true; | |||
1665 | } | |||
1666 | ||||
1667 | template <MatrixKind Kind, unsigned EltSize, unsigned RegClass> | |||
1668 | DiagnosticPredicate isMatrixRegOperand() const { | |||
1669 | if (!isMatrix()) | |||
1670 | return DiagnosticPredicateTy::NoMatch; | |||
1671 | if (getMatrixKind() != Kind || | |||
1672 | !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) || | |||
1673 | EltSize != getMatrixElementWidth()) | |||
1674 | return DiagnosticPredicateTy::NearMatch; | |||
1675 | return DiagnosticPredicateTy::Match; | |||
1676 | } | |||
1677 | ||||
1678 | void addExpr(MCInst &Inst, const MCExpr *Expr) const { | |||
1679 | // Add as immediates when possible. Null MCExpr = 0. | |||
1680 | if (!Expr) | |||
1681 | Inst.addOperand(MCOperand::createImm(0)); | |||
1682 | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) | |||
1683 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
1684 | else | |||
1685 | Inst.addOperand(MCOperand::createExpr(Expr)); | |||
1686 | } | |||
1687 | ||||
1688 | void addRegOperands(MCInst &Inst, unsigned N) const { | |||
1689 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1689 , __extension__ __PRETTY_FUNCTION__)); | |||
1690 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
1691 | } | |||
1692 | ||||
1693 | void addMatrixOperands(MCInst &Inst, unsigned N) const { | |||
1694 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1694 , __extension__ __PRETTY_FUNCTION__)); | |||
1695 | Inst.addOperand(MCOperand::createReg(getMatrixReg())); | |||
1696 | } | |||
1697 | ||||
1698 | void addGPR32as64Operands(MCInst &Inst, unsigned N) const { | |||
1699 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1699 , __extension__ __PRETTY_FUNCTION__)); | |||
1700 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1701 , __extension__ __PRETTY_FUNCTION__)) | |||
1701 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1701 , __extension__ __PRETTY_FUNCTION__)); | |||
1702 | ||||
1703 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
1704 | uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister( | |||
1705 | RI->getEncodingValue(getReg())); | |||
1706 | ||||
1707 | Inst.addOperand(MCOperand::createReg(Reg)); | |||
1708 | } | |||
1709 | ||||
1710 | void addGPR64as32Operands(MCInst &Inst, unsigned N) const { | |||
1711 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1711 , __extension__ __PRETTY_FUNCTION__)); | |||
1712 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1713 , __extension__ __PRETTY_FUNCTION__)) | |||
1713 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1713 , __extension__ __PRETTY_FUNCTION__)); | |||
1714 | ||||
1715 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
1716 | uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( | |||
1717 | RI->getEncodingValue(getReg())); | |||
1718 | ||||
1719 | Inst.addOperand(MCOperand::createReg(Reg)); | |||
1720 | } | |||
1721 | ||||
1722 | template <int Width> | |||
1723 | void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { | |||
1724 | unsigned Base; | |||
1725 | switch (Width) { | |||
1726 | case 8: Base = AArch64::B0; break; | |||
1727 | case 16: Base = AArch64::H0; break; | |||
1728 | case 32: Base = AArch64::S0; break; | |||
1729 | case 64: Base = AArch64::D0; break; | |||
1730 | case 128: Base = AArch64::Q0; break; | |||
1731 | default: | |||
1732 | llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1732); | |||
1733 | } | |||
1734 | Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base)); | |||
1735 | } | |||
1736 | ||||
1737 | void addVectorReg64Operands(MCInst &Inst, unsigned N) const { | |||
1738 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1738 , __extension__ __PRETTY_FUNCTION__)); | |||
1739 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1740 , __extension__ __PRETTY_FUNCTION__)) | |||
1740 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1740 , __extension__ __PRETTY_FUNCTION__)); | |||
1741 | Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0)); | |||
1742 | } | |||
1743 | ||||
1744 | void addVectorReg128Operands(MCInst &Inst, unsigned N) const { | |||
1745 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1745 , __extension__ __PRETTY_FUNCTION__)); | |||
1746 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1747 , __extension__ __PRETTY_FUNCTION__)) | |||
1747 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1747 , __extension__ __PRETTY_FUNCTION__)); | |||
1748 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
1749 | } | |||
1750 | ||||
1751 | void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { | |||
1752 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1752 , __extension__ __PRETTY_FUNCTION__)); | |||
1753 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
1754 | } | |||
1755 | ||||
1756 | enum VecListIndexType { | |||
1757 | VecListIdx_DReg = 0, | |||
1758 | VecListIdx_QReg = 1, | |||
1759 | VecListIdx_ZReg = 2, | |||
1760 | VecListIdx_PReg = 3, | |||
1761 | }; | |||
1762 | ||||
1763 | template <VecListIndexType RegTy, unsigned NumRegs> | |||
1764 | void addVectorListOperands(MCInst &Inst, unsigned N) const { | |||
1765 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1765 , __extension__ __PRETTY_FUNCTION__)); | |||
1766 | static const unsigned FirstRegs[][5] = { | |||
1767 | /* DReg */ { AArch64::Q0, | |||
1768 | AArch64::D0, AArch64::D0_D1, | |||
1769 | AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, | |||
1770 | /* QReg */ { AArch64::Q0, | |||
1771 | AArch64::Q0, AArch64::Q0_Q1, | |||
1772 | AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, | |||
1773 | /* ZReg */ { AArch64::Z0, | |||
1774 | AArch64::Z0, AArch64::Z0_Z1, | |||
1775 | AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }, | |||
1776 | /* PReg */ { AArch64::P0, | |||
1777 | AArch64::P0, AArch64::P0_P1 } | |||
1778 | }; | |||
1779 | ||||
1780 | assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1781 , __extension__ __PRETTY_FUNCTION__)) | |||
1781 | " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1781 , __extension__ __PRETTY_FUNCTION__)); | |||
1782 | ||||
1783 | assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs <= 2) && " NumRegs must be <= 2 for PRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1784 , __extension__ __PRETTY_FUNCTION__)) | |||
1784 | " NumRegs must be <= 2 for PRegs")(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs <= 2) && " NumRegs must be <= 2 for PRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1784 , __extension__ __PRETTY_FUNCTION__)); | |||
1785 | ||||
1786 | unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; | |||
1787 | Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() - | |||
1788 | FirstRegs[(unsigned)RegTy][0])); | |||
1789 | } | |||
1790 | ||||
1791 | template <unsigned NumRegs> | |||
1792 | void addStridedVectorListOperands(MCInst &Inst, unsigned N) const { | |||
1793 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1793 , __extension__ __PRETTY_FUNCTION__)); | |||
1794 | assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4")(static_cast <bool> ((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4") ? void (0) : __assert_fail ("(NumRegs == 2 || NumRegs == 4) && \" NumRegs must be 2 or 4\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1794 , __extension__ __PRETTY_FUNCTION__)); | |||
1795 | ||||
1796 | switch (NumRegs) { | |||
1797 | case 2: | |||
1798 | if (getVectorListStart() < AArch64::Z16) { | |||
1799 | assert((getVectorListStart() < AArch64::Z8) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z8) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z8) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1800 , __extension__ __PRETTY_FUNCTION__)) | |||
1800 | (getVectorListStart() >= AArch64::Z0) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z8) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z8) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1800 , __extension__ __PRETTY_FUNCTION__)); | |||
1801 | Inst.addOperand(MCOperand::createReg( | |||
1802 | AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0)); | |||
1803 | } else { | |||
1804 | assert((getVectorListStart() < AArch64::Z24) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z24) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z24) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1805 , __extension__ __PRETTY_FUNCTION__)) | |||
1805 | (getVectorListStart() >= AArch64::Z16) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z24) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z24) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1805 , __extension__ __PRETTY_FUNCTION__)); | |||
1806 | Inst.addOperand(MCOperand::createReg( | |||
1807 | AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16)); | |||
1808 | } | |||
1809 | break; | |||
1810 | case 4: | |||
1811 | if (getVectorListStart() < AArch64::Z16) { | |||
1812 | assert((getVectorListStart() < AArch64::Z4) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z4) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z4) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1813 , __extension__ __PRETTY_FUNCTION__)) | |||
1813 | (getVectorListStart() >= AArch64::Z0) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z4) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z4) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1813 , __extension__ __PRETTY_FUNCTION__)); | |||
1814 | Inst.addOperand(MCOperand::createReg( | |||
1815 | AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0)); | |||
1816 | } else { | |||
1817 | assert((getVectorListStart() < AArch64::Z20) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z20) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z20) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1818 , __extension__ __PRETTY_FUNCTION__)) | |||
1818 | (getVectorListStart() >= AArch64::Z16) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z20) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z20) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1818 , __extension__ __PRETTY_FUNCTION__)); | |||
1819 | Inst.addOperand(MCOperand::createReg( | |||
1820 | AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16)); | |||
1821 | } | |||
1822 | break; | |||
1823 | default: | |||
1824 | llvm_unreachable("Unsupported number of registers for strided vec list")::llvm::llvm_unreachable_internal("Unsupported number of registers for strided vec list" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1824 ); | |||
1825 | } | |||
1826 | } | |||
1827 | ||||
1828 | void addMatrixTileListOperands(MCInst &Inst, unsigned N) const { | |||
1829 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1829 , __extension__ __PRETTY_FUNCTION__)); | |||
1830 | unsigned RegMask = getMatrixTileListRegMask(); | |||
1831 | assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!" ) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1831 , __extension__ __PRETTY_FUNCTION__)); | |||
1832 | Inst.addOperand(MCOperand::createImm(RegMask)); | |||
1833 | } | |||
1834 | ||||
1835 | void addVectorIndexOperands(MCInst &Inst, unsigned N) const { | |||
1836 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1836 , __extension__ __PRETTY_FUNCTION__)); | |||
1837 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
1838 | } | |||
1839 | ||||
1840 | template <unsigned ImmIs0, unsigned ImmIs1> | |||
1841 | void addExactFPImmOperands(MCInst &Inst, unsigned N) const { | |||
1842 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1842 , __extension__ __PRETTY_FUNCTION__)); | |||
1843 | assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1 >()) && "Invalid operand") ? void (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1843 , __extension__ __PRETTY_FUNCTION__)); | |||
1844 | Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>()))); | |||
1845 | } | |||
1846 | ||||
1847 | void addImmOperands(MCInst &Inst, unsigned N) const { | |||
1848 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1848 , __extension__ __PRETTY_FUNCTION__)); | |||
1849 | // If this is a pageoff symrefexpr with an addend, adjust the addend | |||
1850 | // to be only the page-offset portion. Otherwise, just add the expr | |||
1851 | // as-is. | |||
1852 | addExpr(Inst, getImm()); | |||
1853 | } | |||
1854 | ||||
1855 | template <int Shift> | |||
1856 | void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { | |||
1857 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1857 , __extension__ __PRETTY_FUNCTION__)); | |||
1858 | if (auto ShiftedVal = getShiftedVal<Shift>()) { | |||
1859 | Inst.addOperand(MCOperand::createImm(ShiftedVal->first)); | |||
1860 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); | |||
1861 | } else if (isShiftedImm()) { | |||
1862 | addExpr(Inst, getShiftedImmVal()); | |||
1863 | Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); | |||
1864 | } else { | |||
1865 | addExpr(Inst, getImm()); | |||
1866 | Inst.addOperand(MCOperand::createImm(0)); | |||
1867 | } | |||
1868 | } | |||
1869 | ||||
1870 | template <int Shift> | |||
1871 | void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { | |||
1872 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1872 , __extension__ __PRETTY_FUNCTION__)); | |||
1873 | if (auto ShiftedVal = getShiftedVal<Shift>()) { | |||
1874 | Inst.addOperand(MCOperand::createImm(-ShiftedVal->first)); | |||
1875 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); | |||
1876 | } else | |||
1877 | llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1877 ); | |||
1878 | } | |||
1879 | ||||
1880 | void addCondCodeOperands(MCInst &Inst, unsigned N) const { | |||
1881 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1881 , __extension__ __PRETTY_FUNCTION__)); | |||
1882 | Inst.addOperand(MCOperand::createImm(getCondCode())); | |||
1883 | } | |||
1884 | ||||
1885 | void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { | |||
1886 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1886 , __extension__ __PRETTY_FUNCTION__)); | |||
1887 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1888 | if (!MCE) | |||
1889 | addExpr(Inst, getImm()); | |||
1890 | else | |||
1891 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12)); | |||
1892 | } | |||
1893 | ||||
1894 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { | |||
1895 | addImmOperands(Inst, N); | |||
1896 | } | |||
1897 | ||||
1898 | template<int Scale> | |||
1899 | void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { | |||
1900 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1900 , __extension__ __PRETTY_FUNCTION__)); | |||
1901 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1902 | ||||
1903 | if (!MCE) { | |||
1904 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
1905 | return; | |||
1906 | } | |||
1907 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); | |||
1908 | } | |||
1909 | ||||
1910 | void addUImm6Operands(MCInst &Inst, unsigned N) const { | |||
1911 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1911 , __extension__ __PRETTY_FUNCTION__)); | |||
1912 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1913 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); | |||
1914 | } | |||
1915 | ||||
1916 | template <int Scale> | |||
1917 | void addImmScaledOperands(MCInst &Inst, unsigned N) const { | |||
1918 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1918 , __extension__ __PRETTY_FUNCTION__)); | |||
1919 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1920 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); | |||
1921 | } | |||
1922 | ||||
1923 | template <int Scale> | |||
1924 | void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const { | |||
1925 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1925 , __extension__ __PRETTY_FUNCTION__)); | |||
1926 | Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale)); | |||
1927 | } | |||
1928 | ||||
1929 | template <typename T> | |||
1930 | void addLogicalImmOperands(MCInst &Inst, unsigned N) const { | |||
1931 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1931 , __extension__ __PRETTY_FUNCTION__)); | |||
1932 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1933 | std::make_unsigned_t<T> Val = MCE->getValue(); | |||
1934 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); | |||
1935 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
1936 | } | |||
1937 | ||||
1938 | template <typename T> | |||
1939 | void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { | |||
1940 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1940 , __extension__ __PRETTY_FUNCTION__)); | |||
1941 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1942 | std::make_unsigned_t<T> Val = ~MCE->getValue(); | |||
1943 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); | |||
1944 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
1945 | } | |||
1946 | ||||
1947 | void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { | |||
1948 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1948 , __extension__ __PRETTY_FUNCTION__)); | |||
1949 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1950 | uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); | |||
1951 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
1952 | } | |||
1953 | ||||
1954 | void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { | |||
1955 | // Branch operands don't encode the low bits, so shift them off | |||
1956 | // here. If it's a label, however, just put it on directly as there's | |||
1957 | // not enough information now to do anything. | |||
1958 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1958 , __extension__ __PRETTY_FUNCTION__)); | |||
1959 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1960 | if (!MCE) { | |||
1961 | addExpr(Inst, getImm()); | |||
1962 | return; | |||
1963 | } | |||
1964 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1964 , __extension__ __PRETTY_FUNCTION__)); | |||
1965 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
1966 | } | |||
1967 | ||||
1968 | void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { | |||
1969 | // Branch operands don't encode the low bits, so shift them off | |||
1970 | // here. If it's a label, however, just put it on directly as there's | |||
1971 | // not enough information now to do anything. | |||
1972 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1972 , __extension__ __PRETTY_FUNCTION__)); | |||
1973 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1974 | if (!MCE) { | |||
1975 | addExpr(Inst, getImm()); | |||
1976 | return; | |||
1977 | } | |||
1978 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1978 , __extension__ __PRETTY_FUNCTION__)); | |||
1979 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
1980 | } | |||
1981 | ||||
1982 | void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { | |||
1983 | // Branch operands don't encode the low bits, so shift them off | |||
1984 | // here. If it's a label, however, just put it on directly as there's | |||
1985 | // not enough information now to do anything. | |||
1986 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1986 , __extension__ __PRETTY_FUNCTION__)); | |||
1987 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1988 | if (!MCE) { | |||
1989 | addExpr(Inst, getImm()); | |||
1990 | return; | |||
1991 | } | |||
1992 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1992 , __extension__ __PRETTY_FUNCTION__)); | |||
1993 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
1994 | } | |||
1995 | ||||
1996 | void addFPImmOperands(MCInst &Inst, unsigned N) const { | |||
1997 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1997 , __extension__ __PRETTY_FUNCTION__)); | |||
1998 | Inst.addOperand(MCOperand::createImm( | |||
1999 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()))); | |||
2000 | } | |||
2001 | ||||
2002 | void addBarrierOperands(MCInst &Inst, unsigned N) const { | |||
2003 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2003 , __extension__ __PRETTY_FUNCTION__)); | |||
2004 | Inst.addOperand(MCOperand::createImm(getBarrier())); | |||
2005 | } | |||
2006 | ||||
2007 | void addBarriernXSOperands(MCInst &Inst, unsigned N) const { | |||
2008 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2008 , __extension__ __PRETTY_FUNCTION__)); | |||
2009 | Inst.addOperand(MCOperand::createImm(getBarrier())); | |||
2010 | } | |||
2011 | ||||
2012 | void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { | |||
2013 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2013 , __extension__ __PRETTY_FUNCTION__)); | |||
2014 | ||||
2015 | Inst.addOperand(MCOperand::createImm(SysReg.MRSReg)); | |||
2016 | } | |||
2017 | ||||
2018 | void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { | |||
2019 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2019 , __extension__ __PRETTY_FUNCTION__)); | |||
2020 | ||||
2021 | Inst.addOperand(MCOperand::createImm(SysReg.MSRReg)); | |||
2022 | } | |||
2023 | ||||
2024 | void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { | |||
2025 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2025 , __extension__ __PRETTY_FUNCTION__)); | |||
2026 | ||||
2027 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); | |||
2028 | } | |||
2029 | ||||
2030 | void addSVCROperands(MCInst &Inst, unsigned N) const { | |||
2031 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2031 , __extension__ __PRETTY_FUNCTION__)); | |||
2032 | ||||
2033 | Inst.addOperand(MCOperand::createImm(SVCR.PStateField)); | |||
2034 | } | |||
2035 | ||||
2036 | void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { | |||
2037 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2037 , __extension__ __PRETTY_FUNCTION__)); | |||
2038 | ||||
2039 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); | |||
2040 | } | |||
2041 | ||||
2042 | void addSysCROperands(MCInst &Inst, unsigned N) const { | |||
2043 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2043 , __extension__ __PRETTY_FUNCTION__)); | |||
2044 | Inst.addOperand(MCOperand::createImm(getSysCR())); | |||
2045 | } | |||
2046 | ||||
2047 | void addPrefetchOperands(MCInst &Inst, unsigned N) const { | |||
2048 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2048 , __extension__ __PRETTY_FUNCTION__)); | |||
2049 | Inst.addOperand(MCOperand::createImm(getPrefetch())); | |||
2050 | } | |||
2051 | ||||
2052 | void addPSBHintOperands(MCInst &Inst, unsigned N) const { | |||
2053 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2053 , __extension__ __PRETTY_FUNCTION__)); | |||
2054 | Inst.addOperand(MCOperand::createImm(getPSBHint())); | |||
2055 | } | |||
2056 | ||||
2057 | void addBTIHintOperands(MCInst &Inst, unsigned N) const { | |||
2058 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2058 , __extension__ __PRETTY_FUNCTION__)); | |||
2059 | Inst.addOperand(MCOperand::createImm(getBTIHint())); | |||
2060 | } | |||
2061 | ||||
2062 | void addShifterOperands(MCInst &Inst, unsigned N) const { | |||
2063 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2063 , __extension__ __PRETTY_FUNCTION__)); | |||
2064 | unsigned Imm = | |||
2065 | AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount()); | |||
2066 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
2067 | } | |||
2068 | ||||
2069 | void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const { | |||
2070 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2070 , __extension__ __PRETTY_FUNCTION__)); | |||
2071 | ||||
2072 | if (!isScalarReg()) | |||
2073 | return; | |||
2074 | ||||
2075 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
2076 | uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID) | |||
2077 | .getRegister(RI->getEncodingValue(getReg())); | |||
2078 | if (Reg != AArch64::XZR) | |||
2079 | llvm_unreachable("wrong register")::llvm::llvm_unreachable_internal("wrong register", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2079); | |||
2080 | ||||
2081 | Inst.addOperand(MCOperand::createReg(AArch64::XZR)); | |||
2082 | } | |||
2083 | ||||
2084 | void addExtendOperands(MCInst &Inst, unsigned N) const { | |||
2085 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2085 , __extension__ __PRETTY_FUNCTION__)); | |||
2086 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
2087 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; | |||
2088 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); | |||
2089 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
2090 | } | |||
2091 | ||||
2092 | void addExtend64Operands(MCInst &Inst, unsigned N) const { | |||
2093 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2093 , __extension__ __PRETTY_FUNCTION__)); | |||
2094 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
2095 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; | |||
2096 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); | |||
2097 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
2098 | } | |||
2099 | ||||
2100 | void addMemExtendOperands(MCInst &Inst, unsigned N) const { | |||
2101 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2101 , __extension__ __PRETTY_FUNCTION__)); | |||
2102 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
2103 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; | |||
2104 | Inst.addOperand(MCOperand::createImm(IsSigned)); | |||
2105 | Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0)); | |||
2106 | } | |||
2107 | ||||
2108 | // For 8-bit load/store instructions with a register offset, both the | |||
2109 | // "DoShift" and "NoShift" variants have a shift of 0. Because of this, | |||
2110 | // they're disambiguated by whether the shift was explicit or implicit rather | |||
2111 | // than its size. | |||
2112 | void addMemExtend8Operands(MCInst &Inst, unsigned N) const { | |||
2113 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2113 , __extension__ __PRETTY_FUNCTION__)); | |||
2114 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
2115 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; | |||
2116 | Inst.addOperand(MCOperand::createImm(IsSigned)); | |||
2117 | Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount())); | |||
2118 | } | |||
2119 | ||||
2120 | template<int Shift> | |||
2121 | void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { | |||
2122 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2122 , __extension__ __PRETTY_FUNCTION__)); | |||
2123 | ||||
2124 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
2125 | if (CE) { | |||
2126 | uint64_t Value = CE->getValue(); | |||
2127 | Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); | |||
2128 | } else { | |||
2129 | addExpr(Inst, getImm()); | |||
2130 | } | |||
2131 | } | |||
2132 | ||||
2133 | template<int Shift> | |||
2134 | void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { | |||
2135 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2135 , __extension__ __PRETTY_FUNCTION__)); | |||
2136 | ||||
2137 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
2138 | uint64_t Value = CE->getValue(); | |||
2139 | Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); | |||
2140 | } | |||
2141 | ||||
2142 | void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { | |||
2143 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2143 , __extension__ __PRETTY_FUNCTION__)); | |||
2144 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
2145 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90)); | |||
2146 | } | |||
2147 | ||||
2148 | void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { | |||
2149 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2149 , __extension__ __PRETTY_FUNCTION__)); | |||
2150 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
2151 | Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180)); | |||
2152 | } | |||
2153 | ||||
2154 | void print(raw_ostream &OS) const override; | |||
2155 | ||||
2156 | static std::unique_ptr<AArch64Operand> | |||
2157 | CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) { | |||
2158 | auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx); | |||
2159 | Op->Tok.Data = Str.data(); | |||
2160 | Op->Tok.Length = Str.size(); | |||
2161 | Op->Tok.IsSuffix = IsSuffix; | |||
2162 | Op->StartLoc = S; | |||
2163 | Op->EndLoc = S; | |||
2164 | return Op; | |||
2165 | } | |||
2166 | ||||
2167 | static std::unique_ptr<AArch64Operand> | |||
2168 | CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, | |||
2169 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, | |||
2170 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, | |||
2171 | unsigned ShiftAmount = 0, | |||
2172 | unsigned HasExplicitAmount = false) { | |||
2173 | auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx); | |||
2174 | Op->Reg.RegNum = RegNum; | |||
2175 | Op->Reg.Kind = Kind; | |||
2176 | Op->Reg.ElementWidth = 0; | |||
2177 | Op->Reg.EqualityTy = EqTy; | |||
2178 | Op->Reg.ShiftExtend.Type = ExtTy; | |||
2179 | Op->Reg.ShiftExtend.Amount = ShiftAmount; | |||
2180 | Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; | |||
2181 | Op->StartLoc = S; | |||
2182 | Op->EndLoc = E; | |||
2183 | return Op; | |||
2184 | } | |||
2185 | ||||
2186 | static std::unique_ptr<AArch64Operand> | |||
2187 | CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, | |||
2188 | SMLoc S, SMLoc E, MCContext &Ctx, | |||
2189 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, | |||
2190 | unsigned ShiftAmount = 0, | |||
2191 | unsigned HasExplicitAmount = false) { | |||
2192 | assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)) | |||
2193 | Kind == RegKind::SVEPredicateVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)) | |||
2194 | Kind == RegKind::SVEPredicateAsCounter) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)) | |||
2195 | "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)); | |||
2196 | auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, | |||
2197 | HasExplicitAmount); | |||
2198 | Op->Reg.ElementWidth = ElementWidth; | |||
2199 | return Op; | |||
2200 | } | |||
2201 | ||||
2202 | static std::unique_ptr<AArch64Operand> | |||
2203 | CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride, | |||
2204 | unsigned NumElements, unsigned ElementWidth, | |||
2205 | RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2206 | auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx); | |||
2207 | Op->VectorList.RegNum = RegNum; | |||
2208 | Op->VectorList.Count = Count; | |||
2209 | Op->VectorList.Stride = Stride; | |||
2210 | Op->VectorList.NumElements = NumElements; | |||
2211 | Op->VectorList.ElementWidth = ElementWidth; | |||
2212 | Op->VectorList.RegisterKind = RegisterKind; | |||
2213 | Op->StartLoc = S; | |||
2214 | Op->EndLoc = E; | |||
2215 | return Op; | |||
2216 | } | |||
2217 | ||||
2218 | static std::unique_ptr<AArch64Operand> | |||
2219 | CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2220 | auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx); | |||
2221 | Op->VectorIndex.Val = Idx; | |||
2222 | Op->StartLoc = S; | |||
2223 | Op->EndLoc = E; | |||
2224 | return Op; | |||
2225 | } | |||
2226 | ||||
2227 | static std::unique_ptr<AArch64Operand> | |||
2228 | CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2229 | auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx); | |||
2230 | Op->MatrixTileList.RegMask = RegMask; | |||
2231 | Op->StartLoc = S; | |||
2232 | Op->EndLoc = E; | |||
2233 | return Op; | |||
2234 | } | |||
2235 | ||||
2236 | static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs, | |||
2237 | const unsigned ElementWidth) { | |||
2238 | static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>> | |||
2239 | RegMap = { | |||
2240 | {{0, AArch64::ZAB0}, | |||
2241 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, | |||
2242 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, | |||
2243 | {{8, AArch64::ZAB0}, | |||
2244 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, | |||
2245 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, | |||
2246 | {{16, AArch64::ZAH0}, | |||
2247 | {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}}, | |||
2248 | {{16, AArch64::ZAH1}, | |||
2249 | {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}}, | |||
2250 | {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}}, | |||
2251 | {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}}, | |||
2252 | {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}}, | |||
2253 | {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}}, | |||
2254 | }; | |||
2255 | ||||
2256 | if (ElementWidth == 64) | |||
2257 | OutRegs.insert(Reg); | |||
2258 | else { | |||
2259 | std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)]; | |||
2260 | assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!" ) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2260 , __extension__ __PRETTY_FUNCTION__)); | |||
2261 | for (auto OutReg : Regs) | |||
2262 | OutRegs.insert(OutReg); | |||
2263 | } | |||
2264 | } | |||
2265 | ||||
2266 | static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, | |||
2267 | SMLoc E, MCContext &Ctx) { | |||
2268 | auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx); | |||
2269 | Op->Imm.Val = Val; | |||
2270 | Op->StartLoc = S; | |||
2271 | Op->EndLoc = E; | |||
2272 | return Op; | |||
2273 | } | |||
2274 | ||||
2275 | static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, | |||
2276 | unsigned ShiftAmount, | |||
2277 | SMLoc S, SMLoc E, | |||
2278 | MCContext &Ctx) { | |||
2279 | auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx); | |||
2280 | Op->ShiftedImm .Val = Val; | |||
2281 | Op->ShiftedImm.ShiftAmount = ShiftAmount; | |||
2282 | Op->StartLoc = S; | |||
2283 | Op->EndLoc = E; | |||
2284 | return Op; | |||
2285 | } | |||
2286 | ||||
2287 | static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First, | |||
2288 | unsigned Last, SMLoc S, | |||
2289 | SMLoc E, | |||
2290 | MCContext &Ctx) { | |||
2291 | auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx); | |||
2292 | Op->ImmRange.First = First; | |||
2293 | Op->ImmRange.Last = Last; | |||
2294 | Op->EndLoc = E; | |||
2295 | return Op; | |||
2296 | } | |||
2297 | ||||
2298 | static std::unique_ptr<AArch64Operand> | |||
2299 | CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2300 | auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx); | |||
2301 | Op->CondCode.Code = Code; | |||
2302 | Op->StartLoc = S; | |||
2303 | Op->EndLoc = E; | |||
2304 | return Op; | |||
2305 | } | |||
2306 | ||||
2307 | static std::unique_ptr<AArch64Operand> | |||
2308 | CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { | |||
2309 | auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx); | |||
2310 | Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); | |||
2311 | Op->FPImm.IsExact = IsExact; | |||
2312 | Op->StartLoc = S; | |||
2313 | Op->EndLoc = S; | |||
2314 | return Op; | |||
2315 | } | |||
2316 | ||||
2317 | static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, | |||
2318 | StringRef Str, | |||
2319 | SMLoc S, | |||
2320 | MCContext &Ctx, | |||
2321 | bool HasnXSModifier) { | |||
2322 | auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx); | |||
2323 | Op->Barrier.Val = Val; | |||
2324 | Op->Barrier.Data = Str.data(); | |||
2325 | Op->Barrier.Length = Str.size(); | |||
2326 | Op->Barrier.HasnXSModifier = HasnXSModifier; | |||
2327 | Op->StartLoc = S; | |||
2328 | Op->EndLoc = S; | |||
2329 | return Op; | |||
2330 | } | |||
2331 | ||||
2332 | static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, | |||
2333 | uint32_t MRSReg, | |||
2334 | uint32_t MSRReg, | |||
2335 | uint32_t PStateField, | |||
2336 | MCContext &Ctx) { | |||
2337 | auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx); | |||
2338 | Op->SysReg.Data = Str.data(); | |||
2339 | Op->SysReg.Length = Str.size(); | |||
2340 | Op->SysReg.MRSReg = MRSReg; | |||
2341 | Op->SysReg.MSRReg = MSRReg; | |||
2342 | Op->SysReg.PStateField = PStateField; | |||
2343 | Op->StartLoc = S; | |||
2344 | Op->EndLoc = S; | |||
2345 | return Op; | |||
2346 | } | |||
2347 | ||||
2348 | static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, | |||
2349 | SMLoc E, MCContext &Ctx) { | |||
2350 | auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx); | |||
2351 | Op->SysCRImm.Val = Val; | |||
2352 | Op->StartLoc = S; | |||
2353 | Op->EndLoc = E; | |||
2354 | return Op; | |||
2355 | } | |||
2356 | ||||
2357 | static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, | |||
2358 | StringRef Str, | |||
2359 | SMLoc S, | |||
2360 | MCContext &Ctx) { | |||
2361 | auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx); | |||
2362 | Op->Prefetch.Val = Val; | |||
2363 | Op->Barrier.Data = Str.data(); | |||
2364 | Op->Barrier.Length = Str.size(); | |||
2365 | Op->StartLoc = S; | |||
2366 | Op->EndLoc = S; | |||
2367 | return Op; | |||
2368 | } | |||
2369 | ||||
2370 | static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, | |||
2371 | StringRef Str, | |||
2372 | SMLoc S, | |||
2373 | MCContext &Ctx) { | |||
2374 | auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx); | |||
2375 | Op->PSBHint.Val = Val; | |||
2376 | Op->PSBHint.Data = Str.data(); | |||
2377 | Op->PSBHint.Length = Str.size(); | |||
2378 | Op->StartLoc = S; | |||
2379 | Op->EndLoc = S; | |||
2380 | return Op; | |||
2381 | } | |||
2382 | ||||
2383 | static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val, | |||
2384 | StringRef Str, | |||
2385 | SMLoc S, | |||
2386 | MCContext &Ctx) { | |||
2387 | auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx); | |||
2388 | Op->BTIHint.Val = Val | 32; | |||
2389 | Op->BTIHint.Data = Str.data(); | |||
2390 | Op->BTIHint.Length = Str.size(); | |||
2391 | Op->StartLoc = S; | |||
2392 | Op->EndLoc = S; | |||
2393 | return Op; | |||
2394 | } | |||
2395 | ||||
2396 | static std::unique_ptr<AArch64Operand> | |||
2397 | CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind, | |||
2398 | SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2399 | auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx); | |||
2400 | Op->MatrixReg.RegNum = RegNum; | |||
2401 | Op->MatrixReg.ElementWidth = ElementWidth; | |||
2402 | Op->MatrixReg.Kind = Kind; | |||
2403 | Op->StartLoc = S; | |||
2404 | Op->EndLoc = E; | |||
2405 | return Op; | |||
2406 | } | |||
2407 | ||||
2408 | static std::unique_ptr<AArch64Operand> | |||
2409 | CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) { | |||
2410 | auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx); | |||
2411 | Op->SVCR.PStateField = PStateField; | |||
2412 | Op->SVCR.Data = Str.data(); | |||
2413 | Op->SVCR.Length = Str.size(); | |||
2414 | Op->StartLoc = S; | |||
2415 | Op->EndLoc = S; | |||
2416 | return Op; | |||
2417 | } | |||
2418 | ||||
2419 | static std::unique_ptr<AArch64Operand> | |||
2420 | CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, | |||
2421 | bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2422 | auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx); | |||
2423 | Op->ShiftExtend.Type = ShOp; | |||
2424 | Op->ShiftExtend.Amount = Val; | |||
2425 | Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; | |||
2426 | Op->StartLoc = S; | |||
2427 | Op->EndLoc = E; | |||
2428 | return Op; | |||
2429 | } | |||
2430 | }; | |||
2431 | ||||
2432 | } // end anonymous namespace. | |||
2433 | ||||
2434 | void AArch64Operand::print(raw_ostream &OS) const { | |||
2435 | switch (Kind) { | |||
2436 | case k_FPImm: | |||
2437 | OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); | |||
2438 | if (!getFPImmIsExact()) | |||
2439 | OS << " (inexact)"; | |||
2440 | OS << ">"; | |||
2441 | break; | |||
2442 | case k_Barrier: { | |||
2443 | StringRef Name = getBarrierName(); | |||
2444 | if (!Name.empty()) | |||
2445 | OS << "<barrier " << Name << ">"; | |||
2446 | else | |||
2447 | OS << "<barrier invalid #" << getBarrier() << ">"; | |||
2448 | break; | |||
2449 | } | |||
2450 | case k_Immediate: | |||
2451 | OS << *getImm(); | |||
2452 | break; | |||
2453 | case k_ShiftedImm: { | |||
2454 | unsigned Shift = getShiftedImmShift(); | |||
2455 | OS << "<shiftedimm "; | |||
2456 | OS << *getShiftedImmVal(); | |||
2457 | OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">"; | |||
2458 | break; | |||
2459 | } | |||
2460 | case k_ImmRange: { | |||
2461 | OS << "<immrange "; | |||
2462 | OS << getFirstImmVal(); | |||
2463 | OS << ":" << getLastImmVal() << ">"; | |||
2464 | break; | |||
2465 | } | |||
2466 | case k_CondCode: | |||
2467 | OS << "<condcode " << getCondCode() << ">"; | |||
2468 | break; | |||
2469 | case k_VectorList: { | |||
2470 | OS << "<vectorlist "; | |||
2471 | unsigned Reg = getVectorListStart(); | |||
2472 | for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) | |||
2473 | OS << Reg + i * getVectorListStride() << " "; | |||
2474 | OS << ">"; | |||
2475 | break; | |||
2476 | } | |||
2477 | case k_VectorIndex: | |||
2478 | OS << "<vectorindex " << getVectorIndex() << ">"; | |||
2479 | break; | |||
2480 | case k_SysReg: | |||
2481 | OS << "<sysreg: " << getSysReg() << '>'; | |||
2482 | break; | |||
2483 | case k_Token: | |||
2484 | OS << "'" << getToken() << "'"; | |||
2485 | break; | |||
2486 | case k_SysCR: | |||
2487 | OS << "c" << getSysCR(); | |||
2488 | break; | |||
2489 | case k_Prefetch: { | |||
2490 | StringRef Name = getPrefetchName(); | |||
2491 | if (!Name.empty()) | |||
2492 | OS << "<prfop " << Name << ">"; | |||
2493 | else | |||
2494 | OS << "<prfop invalid #" << getPrefetch() << ">"; | |||
2495 | break; | |||
2496 | } | |||
2497 | case k_PSBHint: | |||
2498 | OS << getPSBHintName(); | |||
2499 | break; | |||
2500 | case k_BTIHint: | |||
2501 | OS << getBTIHintName(); | |||
2502 | break; | |||
2503 | case k_MatrixRegister: | |||
2504 | OS << "<matrix " << getMatrixReg() << ">"; | |||
2505 | break; | |||
2506 | case k_MatrixTileList: { | |||
2507 | OS << "<matrixlist "; | |||
2508 | unsigned RegMask = getMatrixTileListRegMask(); | |||
2509 | unsigned MaxBits = 8; | |||
2510 | for (unsigned I = MaxBits; I > 0; --I) | |||
2511 | OS << ((RegMask & (1 << (I - 1))) >> (I - 1)); | |||
2512 | OS << '>'; | |||
2513 | break; | |||
2514 | } | |||
2515 | case k_SVCR: { | |||
2516 | OS << getSVCR(); | |||
2517 | break; | |||
2518 | } | |||
2519 | case k_Register: | |||
2520 | OS << "<register " << getReg() << ">"; | |||
2521 | if (!getShiftExtendAmount() && !hasShiftExtendAmount()) | |||
2522 | break; | |||
2523 | [[fallthrough]]; | |||
2524 | case k_ShiftExtend: | |||
2525 | OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" | |||
2526 | << getShiftExtendAmount(); | |||
2527 | if (!hasShiftExtendAmount()) | |||
2528 | OS << "<imp>"; | |||
2529 | OS << '>'; | |||
2530 | break; | |||
2531 | } | |||
2532 | } | |||
2533 | ||||
2534 | /// @name Auto-generated Match Functions | |||
2535 | /// { | |||
2536 | ||||
2537 | static unsigned MatchRegisterName(StringRef Name); | |||
2538 | ||||
2539 | /// } | |||
2540 | ||||
2541 | static unsigned MatchNeonVectorRegName(StringRef Name) { | |||
2542 | return StringSwitch<unsigned>(Name.lower()) | |||
2543 | .Case("v0", AArch64::Q0) | |||
2544 | .Case("v1", AArch64::Q1) | |||
2545 | .Case("v2", AArch64::Q2) | |||
2546 | .Case("v3", AArch64::Q3) | |||
2547 | .Case("v4", AArch64::Q4) | |||
2548 | .Case("v5", AArch64::Q5) | |||
2549 | .Case("v6", AArch64::Q6) | |||
2550 | .Case("v7", AArch64::Q7) | |||
2551 | .Case("v8", AArch64::Q8) | |||
2552 | .Case("v9", AArch64::Q9) | |||
2553 | .Case("v10", AArch64::Q10) | |||
2554 | .Case("v11", AArch64::Q11) | |||
2555 | .Case("v12", AArch64::Q12) | |||
2556 | .Case("v13", AArch64::Q13) | |||
2557 | .Case("v14", AArch64::Q14) | |||
2558 | .Case("v15", AArch64::Q15) | |||
2559 | .Case("v16", AArch64::Q16) | |||
2560 | .Case("v17", AArch64::Q17) | |||
2561 | .Case("v18", AArch64::Q18) | |||
2562 | .Case("v19", AArch64::Q19) | |||
2563 | .Case("v20", AArch64::Q20) | |||
2564 | .Case("v21", AArch64::Q21) | |||
2565 | .Case("v22", AArch64::Q22) | |||
2566 | .Case("v23", AArch64::Q23) | |||
2567 | .Case("v24", AArch64::Q24) | |||
2568 | .Case("v25", AArch64::Q25) | |||
2569 | .Case("v26", AArch64::Q26) | |||
2570 | .Case("v27", AArch64::Q27) | |||
2571 | .Case("v28", AArch64::Q28) | |||
2572 | .Case("v29", AArch64::Q29) | |||
2573 | .Case("v30", AArch64::Q30) | |||
2574 | .Case("v31", AArch64::Q31) | |||
2575 | .Default(0); | |||
2576 | } | |||
2577 | ||||
2578 | /// Returns an optional pair of (#elements, element-width) if Suffix | |||
2579 | /// is a valid vector kind. Where the number of elements in a vector | |||
2580 | /// or the vector width is implicit or explicitly unknown (but still a | |||
2581 | /// valid suffix kind), 0 is used. | |||
2582 | static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, | |||
2583 | RegKind VectorKind) { | |||
2584 | std::pair<int, int> Res = {-1, -1}; | |||
2585 | ||||
2586 | switch (VectorKind) { | |||
2587 | case RegKind::NeonVector: | |||
2588 | Res = | |||
2589 | StringSwitch<std::pair<int, int>>(Suffix.lower()) | |||
2590 | .Case("", {0, 0}) | |||
2591 | .Case(".1d", {1, 64}) | |||
2592 | .Case(".1q", {1, 128}) | |||
2593 | // '.2h' needed for fp16 scalar pairwise reductions | |||
2594 | .Case(".2h", {2, 16}) | |||
2595 | .Case(".2s", {2, 32}) | |||
2596 | .Case(".2d", {2, 64}) | |||
2597 | // '.4b' is another special case for the ARMv8.2a dot product | |||
2598 | // operand | |||
2599 | .Case(".4b", {4, 8}) | |||
2600 | .Case(".4h", {4, 16}) | |||
2601 | .Case(".4s", {4, 32}) | |||
2602 | .Case(".8b", {8, 8}) | |||
2603 | .Case(".8h", {8, 16}) | |||
2604 | .Case(".16b", {16, 8}) | |||
2605 | // Accept the width neutral ones, too, for verbose syntax. If those | |||
2606 | // aren't used in the right places, the token operand won't match so | |||
2607 | // all will work out. | |||
2608 | .Case(".b", {0, 8}) | |||
2609 | .Case(".h", {0, 16}) | |||
2610 | .Case(".s", {0, 32}) | |||
2611 | .Case(".d", {0, 64}) | |||
2612 | .Default({-1, -1}); | |||
2613 | break; | |||
2614 | case RegKind::SVEPredicateAsCounter: | |||
2615 | case RegKind::SVEPredicateVector: | |||
2616 | case RegKind::SVEDataVector: | |||
2617 | case RegKind::Matrix: | |||
2618 | Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) | |||
2619 | .Case("", {0, 0}) | |||
2620 | .Case(".b", {0, 8}) | |||
2621 | .Case(".h", {0, 16}) | |||
2622 | .Case(".s", {0, 32}) | |||
2623 | .Case(".d", {0, 64}) | |||
2624 | .Case(".q", {0, 128}) | |||
2625 | .Default({-1, -1}); | |||
2626 | break; | |||
2627 | default: | |||
2628 | llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2628); | |||
2629 | } | |||
2630 | ||||
2631 | if (Res == std::make_pair(-1, -1)) | |||
2632 | return std::nullopt; | |||
2633 | ||||
2634 | return std::optional<std::pair<int, int>>(Res); | |||
2635 | } | |||
2636 | ||||
2637 | static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { | |||
2638 | return parseVectorKind(Suffix, VectorKind).has_value(); | |||
2639 | } | |||
2640 | ||||
2641 | static unsigned matchSVEDataVectorRegName(StringRef Name) { | |||
2642 | return StringSwitch<unsigned>(Name.lower()) | |||
2643 | .Case("z0", AArch64::Z0) | |||
2644 | .Case("z1", AArch64::Z1) | |||
2645 | .Case("z2", AArch64::Z2) | |||
2646 | .Case("z3", AArch64::Z3) | |||
2647 | .Case("z4", AArch64::Z4) | |||
2648 | .Case("z5", AArch64::Z5) | |||
2649 | .Case("z6", AArch64::Z6) | |||
2650 | .Case("z7", AArch64::Z7) | |||
2651 | .Case("z8", AArch64::Z8) | |||
2652 | .Case("z9", AArch64::Z9) | |||
2653 | .Case("z10", AArch64::Z10) | |||
2654 | .Case("z11", AArch64::Z11) | |||
2655 | .Case("z12", AArch64::Z12) | |||
2656 | .Case("z13", AArch64::Z13) | |||
2657 | .Case("z14", AArch64::Z14) | |||
2658 | .Case("z15", AArch64::Z15) | |||
2659 | .Case("z16", AArch64::Z16) | |||
2660 | .Case("z17", AArch64::Z17) | |||
2661 | .Case("z18", AArch64::Z18) | |||
2662 | .Case("z19", AArch64::Z19) | |||
2663 | .Case("z20", AArch64::Z20) | |||
2664 | .Case("z21", AArch64::Z21) | |||
2665 | .Case("z22", AArch64::Z22) | |||
2666 | .Case("z23", AArch64::Z23) | |||
2667 | .Case("z24", AArch64::Z24) | |||
2668 | .Case("z25", AArch64::Z25) | |||
2669 | .Case("z26", AArch64::Z26) | |||
2670 | .Case("z27", AArch64::Z27) | |||
2671 | .Case("z28", AArch64::Z28) | |||
2672 | .Case("z29", AArch64::Z29) | |||
2673 | .Case("z30", AArch64::Z30) | |||
2674 | .Case("z31", AArch64::Z31) | |||
2675 | .Default(0); | |||
2676 | } | |||
2677 | ||||
2678 | static unsigned matchSVEPredicateVectorRegName(StringRef Name) { | |||
2679 | return StringSwitch<unsigned>(Name.lower()) | |||
2680 | .Case("p0", AArch64::P0) | |||
2681 | .Case("p1", AArch64::P1) | |||
2682 | .Case("p2", AArch64::P2) | |||
2683 | .Case("p3", AArch64::P3) | |||
2684 | .Case("p4", AArch64::P4) | |||
2685 | .Case("p5", AArch64::P5) | |||
2686 | .Case("p6", AArch64::P6) | |||
2687 | .Case("p7", AArch64::P7) | |||
2688 | .Case("p8", AArch64::P8) | |||
2689 | .Case("p9", AArch64::P9) | |||
2690 | .Case("p10", AArch64::P10) | |||
2691 | .Case("p11", AArch64::P11) | |||
2692 | .Case("p12", AArch64::P12) | |||
2693 | .Case("p13", AArch64::P13) | |||
2694 | .Case("p14", AArch64::P14) | |||
2695 | .Case("p15", AArch64::P15) | |||
2696 | .Default(0); | |||
2697 | } | |||
2698 | ||||
2699 | static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) { | |||
2700 | return StringSwitch<unsigned>(Name.lower()) | |||
2701 | .Case("pn0", AArch64::P0) | |||
2702 | .Case("pn1", AArch64::P1) | |||
2703 | .Case("pn2", AArch64::P2) | |||
2704 | .Case("pn3", AArch64::P3) | |||
2705 | .Case("pn4", AArch64::P4) | |||
2706 | .Case("pn5", AArch64::P5) | |||
2707 | .Case("pn6", AArch64::P6) | |||
2708 | .Case("pn7", AArch64::P7) | |||
2709 | .Case("pn8", AArch64::P8) | |||
2710 | .Case("pn9", AArch64::P9) | |||
2711 | .Case("pn10", AArch64::P10) | |||
2712 | .Case("pn11", AArch64::P11) | |||
2713 | .Case("pn12", AArch64::P12) | |||
2714 | .Case("pn13", AArch64::P13) | |||
2715 | .Case("pn14", AArch64::P14) | |||
2716 | .Case("pn15", AArch64::P15) | |||
2717 | .Default(0); | |||
2718 | } | |||
2719 | ||||
2720 | static unsigned matchMatrixTileListRegName(StringRef Name) { | |||
2721 | return StringSwitch<unsigned>(Name.lower()) | |||
2722 | .Case("za0.d", AArch64::ZAD0) | |||
2723 | .Case("za1.d", AArch64::ZAD1) | |||
2724 | .Case("za2.d", AArch64::ZAD2) | |||
2725 | .Case("za3.d", AArch64::ZAD3) | |||
2726 | .Case("za4.d", AArch64::ZAD4) | |||
2727 | .Case("za5.d", AArch64::ZAD5) | |||
2728 | .Case("za6.d", AArch64::ZAD6) | |||
2729 | .Case("za7.d", AArch64::ZAD7) | |||
2730 | .Case("za0.s", AArch64::ZAS0) | |||
2731 | .Case("za1.s", AArch64::ZAS1) | |||
2732 | .Case("za2.s", AArch64::ZAS2) | |||
2733 | .Case("za3.s", AArch64::ZAS3) | |||
2734 | .Case("za0.h", AArch64::ZAH0) | |||
2735 | .Case("za1.h", AArch64::ZAH1) | |||
2736 | .Case("za0.b", AArch64::ZAB0) | |||
2737 | .Default(0); | |||
2738 | } | |||
2739 | ||||
2740 | static unsigned matchMatrixRegName(StringRef Name) { | |||
2741 | return StringSwitch<unsigned>(Name.lower()) | |||
2742 | .Case("za", AArch64::ZA) | |||
2743 | .Case("za0.q", AArch64::ZAQ0) | |||
2744 | .Case("za1.q", AArch64::ZAQ1) | |||
2745 | .Case("za2.q", AArch64::ZAQ2) | |||
2746 | .Case("za3.q", AArch64::ZAQ3) | |||
2747 | .Case("za4.q", AArch64::ZAQ4) | |||
2748 | .Case("za5.q", AArch64::ZAQ5) | |||
2749 | .Case("za6.q", AArch64::ZAQ6) | |||
2750 | .Case("za7.q", AArch64::ZAQ7) | |||
2751 | .Case("za8.q", AArch64::ZAQ8) | |||
2752 | .Case("za9.q", AArch64::ZAQ9) | |||
2753 | .Case("za10.q", AArch64::ZAQ10) | |||
2754 | .Case("za11.q", AArch64::ZAQ11) | |||
2755 | .Case("za12.q", AArch64::ZAQ12) | |||
2756 | .Case("za13.q", AArch64::ZAQ13) | |||
2757 | .Case("za14.q", AArch64::ZAQ14) | |||
2758 | .Case("za15.q", AArch64::ZAQ15) | |||
2759 | .Case("za0.d", AArch64::ZAD0) | |||
2760 | .Case("za1.d", AArch64::ZAD1) | |||
2761 | .Case("za2.d", AArch64::ZAD2) | |||
2762 | .Case("za3.d", AArch64::ZAD3) | |||
2763 | .Case("za4.d", AArch64::ZAD4) | |||
2764 | .Case("za5.d", AArch64::ZAD5) | |||
2765 | .Case("za6.d", AArch64::ZAD6) | |||
2766 | .Case("za7.d", AArch64::ZAD7) | |||
2767 | .Case("za0.s", AArch64::ZAS0) | |||
2768 | .Case("za1.s", AArch64::ZAS1) | |||
2769 | .Case("za2.s", AArch64::ZAS2) | |||
2770 | .Case("za3.s", AArch64::ZAS3) | |||
2771 | .Case("za0.h", AArch64::ZAH0) | |||
2772 | .Case("za1.h", AArch64::ZAH1) | |||
2773 | .Case("za0.b", AArch64::ZAB0) | |||
2774 | .Case("za0h.q", AArch64::ZAQ0) | |||
2775 | .Case("za1h.q", AArch64::ZAQ1) | |||
2776 | .Case("za2h.q", AArch64::ZAQ2) | |||
2777 | .Case("za3h.q", AArch64::ZAQ3) | |||
2778 | .Case("za4h.q", AArch64::ZAQ4) | |||
2779 | .Case("za5h.q", AArch64::ZAQ5) | |||
2780 | .Case("za6h.q", AArch64::ZAQ6) | |||
2781 | .Case("za7h.q", AArch64::ZAQ7) | |||
2782 | .Case("za8h.q", AArch64::ZAQ8) | |||
2783 | .Case("za9h.q", AArch64::ZAQ9) | |||
2784 | .Case("za10h.q", AArch64::ZAQ10) | |||
2785 | .Case("za11h.q", AArch64::ZAQ11) | |||
2786 | .Case("za12h.q", AArch64::ZAQ12) | |||
2787 | .Case("za13h.q", AArch64::ZAQ13) | |||
2788 | .Case("za14h.q", AArch64::ZAQ14) | |||
2789 | .Case("za15h.q", AArch64::ZAQ15) | |||
2790 | .Case("za0h.d", AArch64::ZAD0) | |||
2791 | .Case("za1h.d", AArch64::ZAD1) | |||
2792 | .Case("za2h.d", AArch64::ZAD2) | |||
2793 | .Case("za3h.d", AArch64::ZAD3) | |||
2794 | .Case("za4h.d", AArch64::ZAD4) | |||
2795 | .Case("za5h.d", AArch64::ZAD5) | |||
2796 | .Case("za6h.d", AArch64::ZAD6) | |||
2797 | .Case("za7h.d", AArch64::ZAD7) | |||
2798 | .Case("za0h.s", AArch64::ZAS0) | |||
2799 | .Case("za1h.s", AArch64::ZAS1) | |||
2800 | .Case("za2h.s", AArch64::ZAS2) | |||
2801 | .Case("za3h.s", AArch64::ZAS3) | |||
2802 | .Case("za0h.h", AArch64::ZAH0) | |||
2803 | .Case("za1h.h", AArch64::ZAH1) | |||
2804 | .Case("za0h.b", AArch64::ZAB0) | |||
2805 | .Case("za0v.q", AArch64::ZAQ0) | |||
2806 | .Case("za1v.q", AArch64::ZAQ1) | |||
2807 | .Case("za2v.q", AArch64::ZAQ2) | |||
2808 | .Case("za3v.q", AArch64::ZAQ3) | |||
2809 | .Case("za4v.q", AArch64::ZAQ4) | |||
2810 | .Case("za5v.q", AArch64::ZAQ5) | |||
2811 | .Case("za6v.q", AArch64::ZAQ6) | |||
2812 | .Case("za7v.q", AArch64::ZAQ7) | |||
2813 | .Case("za8v.q", AArch64::ZAQ8) | |||
2814 | .Case("za9v.q", AArch64::ZAQ9) | |||
2815 | .Case("za10v.q", AArch64::ZAQ10) | |||
2816 | .Case("za11v.q", AArch64::ZAQ11) | |||
2817 | .Case("za12v.q", AArch64::ZAQ12) | |||
2818 | .Case("za13v.q", AArch64::ZAQ13) | |||
2819 | .Case("za14v.q", AArch64::ZAQ14) | |||
2820 | .Case("za15v.q", AArch64::ZAQ15) | |||
2821 | .Case("za0v.d", AArch64::ZAD0) | |||
2822 | .Case("za1v.d", AArch64::ZAD1) | |||
2823 | .Case("za2v.d", AArch64::ZAD2) | |||
2824 | .Case("za3v.d", AArch64::ZAD3) | |||
2825 | .Case("za4v.d", AArch64::ZAD4) | |||
2826 | .Case("za5v.d", AArch64::ZAD5) | |||
2827 | .Case("za6v.d", AArch64::ZAD6) | |||
2828 | .Case("za7v.d", AArch64::ZAD7) | |||
2829 | .Case("za0v.s", AArch64::ZAS0) | |||
2830 | .Case("za1v.s", AArch64::ZAS1) | |||
2831 | .Case("za2v.s", AArch64::ZAS2) | |||
2832 | .Case("za3v.s", AArch64::ZAS3) | |||
2833 | .Case("za0v.h", AArch64::ZAH0) | |||
2834 | .Case("za1v.h", AArch64::ZAH1) | |||
2835 | .Case("za0v.b", AArch64::ZAB0) | |||
2836 | .Default(0); | |||
2837 | } | |||
2838 | ||||
2839 | bool AArch64AsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc, | |||
2840 | SMLoc &EndLoc) { | |||
2841 | return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success; | |||
2842 | } | |||
2843 | ||||
2844 | OperandMatchResultTy AArch64AsmParser::tryParseRegister(MCRegister &RegNo, | |||
2845 | SMLoc &StartLoc, | |||
2846 | SMLoc &EndLoc) { | |||
2847 | StartLoc = getLoc(); | |||
2848 | auto Res = tryParseScalarRegister(RegNo); | |||
2849 | EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
2850 | return Res; | |||
2851 | } | |||
2852 | ||||
2853 | // Matches a register name or register alias previously defined by '.req' | |||
2854 | unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, | |||
2855 | RegKind Kind) { | |||
2856 | unsigned RegNum = 0; | |||
2857 | if ((RegNum = matchSVEDataVectorRegName(Name))) | |||
2858 | return Kind == RegKind::SVEDataVector ? RegNum : 0; | |||
2859 | ||||
2860 | if ((RegNum = matchSVEPredicateVectorRegName(Name))) | |||
2861 | return Kind == RegKind::SVEPredicateVector ? RegNum : 0; | |||
2862 | ||||
2863 | if ((RegNum = matchSVEPredicateAsCounterRegName(Name))) | |||
2864 | return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0; | |||
2865 | ||||
2866 | if ((RegNum = MatchNeonVectorRegName(Name))) | |||
2867 | return Kind == RegKind::NeonVector ? RegNum : 0; | |||
2868 | ||||
2869 | if ((RegNum = matchMatrixRegName(Name))) | |||
2870 | return Kind == RegKind::Matrix ? RegNum : 0; | |||
2871 | ||||
2872 | if (Name.equals_insensitive("zt0")) | |||
2873 | return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0; | |||
2874 | ||||
2875 | // The parsed register must be of RegKind Scalar | |||
2876 | if ((RegNum = MatchRegisterName(Name))) | |||
2877 | return (Kind == RegKind::Scalar) ? RegNum : 0; | |||
2878 | ||||
2879 | if (!RegNum) { | |||
2880 | // Handle a few common aliases of registers. | |||
2881 | if (auto RegNum = StringSwitch<unsigned>(Name.lower()) | |||
2882 | .Case("fp", AArch64::FP) | |||
2883 | .Case("lr", AArch64::LR) | |||
2884 | .Case("x31", AArch64::XZR) | |||
2885 | .Case("w31", AArch64::WZR) | |||
2886 | .Default(0)) | |||
2887 | return Kind == RegKind::Scalar ? RegNum : 0; | |||
2888 | ||||
2889 | // Check for aliases registered via .req. Canonicalize to lower case. | |||
2890 | // That's more consistent since register names are case insensitive, and | |||
2891 | // it's how the original entry was passed in from MC/MCParser/AsmParser. | |||
2892 | auto Entry = RegisterReqs.find(Name.lower()); | |||
2893 | if (Entry == RegisterReqs.end()) | |||
2894 | return 0; | |||
2895 | ||||
2896 | // set RegNum if the match is the right kind of register | |||
2897 | if (Kind == Entry->getValue().first) | |||
2898 | RegNum = Entry->getValue().second; | |||
2899 | } | |||
2900 | return RegNum; | |||
2901 | } | |||
2902 | ||||
2903 | unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) { | |||
2904 | switch (K) { | |||
2905 | case RegKind::Scalar: | |||
2906 | case RegKind::NeonVector: | |||
2907 | case RegKind::SVEDataVector: | |||
2908 | return 32; | |||
2909 | case RegKind::Matrix: | |||
2910 | case RegKind::SVEPredicateVector: | |||
2911 | case RegKind::SVEPredicateAsCounter: | |||
2912 | return 16; | |||
2913 | case RegKind::LookupTable: | |||
2914 | return 1; | |||
2915 | } | |||
2916 | llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2916); | |||
2917 | } | |||
2918 | ||||
2919 | /// tryParseScalarRegister - Try to parse a register name. The token must be an | |||
2920 | /// Identifier when called, and if it is a register name the token is eaten and | |||
2921 | /// the register is added to the operand list. | |||
2922 | OperandMatchResultTy | |||
2923 | AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) { | |||
2924 | const AsmToken &Tok = getTok(); | |||
2925 | if (Tok.isNot(AsmToken::Identifier)) | |||
2926 | return MatchOperand_NoMatch; | |||
2927 | ||||
2928 | std::string lowerCase = Tok.getString().lower(); | |||
2929 | unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar); | |||
2930 | if (Reg == 0) | |||
2931 | return MatchOperand_NoMatch; | |||
2932 | ||||
2933 | RegNum = Reg; | |||
2934 | Lex(); // Eat identifier token. | |||
2935 | return MatchOperand_Success; | |||
2936 | } | |||
2937 | ||||
2938 | /// tryParseSysCROperand - Try to parse a system instruction CR operand name. | |||
2939 | OperandMatchResultTy | |||
2940 | AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { | |||
2941 | SMLoc S = getLoc(); | |||
2942 | ||||
2943 | if (getTok().isNot(AsmToken::Identifier)) { | |||
2944 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
2945 | return MatchOperand_ParseFail; | |||
2946 | } | |||
2947 | ||||
2948 | StringRef Tok = getTok().getIdentifier(); | |||
2949 | if (Tok[0] != 'c' && Tok[0] != 'C') { | |||
2950 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
2951 | return MatchOperand_ParseFail; | |||
2952 | } | |||
2953 | ||||
2954 | uint32_t CRNum; | |||
2955 | bool BadNum = Tok.drop_front().getAsInteger(10, CRNum); | |||
2956 | if (BadNum || CRNum > 15) { | |||
2957 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
2958 | return MatchOperand_ParseFail; | |||
2959 | } | |||
2960 | ||||
2961 | Lex(); // Eat identifier token. | |||
2962 | Operands.push_back( | |||
2963 | AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext())); | |||
2964 | return MatchOperand_Success; | |||
2965 | } | |||
2966 | ||||
2967 | // Either an identifier for named values or a 6-bit immediate. | |||
2968 | OperandMatchResultTy | |||
2969 | AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) { | |||
2970 | SMLoc S = getLoc(); | |||
2971 | const AsmToken &Tok = getTok(); | |||
2972 | ||||
2973 | unsigned MaxVal = 63; | |||
2974 | ||||
2975 | // Immediate case, with optional leading hash: | |||
2976 | if (parseOptionalToken(AsmToken::Hash) || | |||
2977 | Tok.is(AsmToken::Integer)) { | |||
2978 | const MCExpr *ImmVal; | |||
2979 | if (getParser().parseExpression(ImmVal)) | |||
2980 | return MatchOperand_ParseFail; | |||
2981 | ||||
2982 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
2983 | if (!MCE) { | |||
2984 | TokError("immediate value expected for prefetch operand"); | |||
2985 | return MatchOperand_ParseFail; | |||
2986 | } | |||
2987 | unsigned prfop = MCE->getValue(); | |||
2988 | if (prfop > MaxVal) { | |||
2989 | TokError("prefetch operand out of range, [0," + utostr(MaxVal) + | |||
2990 | "] expected"); | |||
2991 | return MatchOperand_ParseFail; | |||
2992 | } | |||
2993 | ||||
2994 | auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue()); | |||
2995 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
2996 | prfop, RPRFM ? RPRFM->Name : "", S, getContext())); | |||
2997 | return MatchOperand_Success; | |||
2998 | } | |||
2999 | ||||
3000 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3001 | TokError("prefetch hint expected"); | |||
3002 | return MatchOperand_ParseFail; | |||
3003 | } | |||
3004 | ||||
3005 | auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString()); | |||
3006 | if (!RPRFM) { | |||
3007 | TokError("prefetch hint expected"); | |||
3008 | return MatchOperand_ParseFail; | |||
3009 | } | |||
3010 | ||||
3011 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
3012 | RPRFM->Encoding, Tok.getString(), S, getContext())); | |||
3013 | Lex(); // Eat identifier token. | |||
3014 | return MatchOperand_Success; | |||
3015 | } | |||
3016 | ||||
3017 | /// tryParsePrefetch - Try to parse a prefetch operand. | |||
3018 | template <bool IsSVEPrefetch> | |||
3019 | OperandMatchResultTy | |||
3020 | AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { | |||
3021 | SMLoc S = getLoc(); | |||
3022 | const AsmToken &Tok = getTok(); | |||
3023 | ||||
3024 | auto LookupByName = [](StringRef N) { | |||
3025 | if (IsSVEPrefetch) { | |||
3026 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) | |||
3027 | return std::optional<unsigned>(Res->Encoding); | |||
3028 | } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) | |||
3029 | return std::optional<unsigned>(Res->Encoding); | |||
3030 | return std::optional<unsigned>(); | |||
3031 | }; | |||
3032 | ||||
3033 | auto LookupByEncoding = [](unsigned E) { | |||
3034 | if (IsSVEPrefetch) { | |||
3035 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) | |||
3036 | return std::optional<StringRef>(Res->Name); | |||
3037 | } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) | |||
3038 | return std::optional<StringRef>(Res->Name); | |||
3039 | return std::optional<StringRef>(); | |||
3040 | }; | |||
3041 | unsigned MaxVal = IsSVEPrefetch ? 15 : 31; | |||
3042 | ||||
3043 | // Either an identifier for named values or a 5-bit immediate. | |||
3044 | // Eat optional hash. | |||
3045 | if (parseOptionalToken(AsmToken::Hash) || | |||
3046 | Tok.is(AsmToken::Integer)) { | |||
3047 | const MCExpr *ImmVal; | |||
3048 | if (getParser().parseExpression(ImmVal)) | |||
3049 | return MatchOperand_ParseFail; | |||
3050 | ||||
3051 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
3052 | if (!MCE) { | |||
3053 | TokError("immediate value expected for prefetch operand"); | |||
3054 | return MatchOperand_ParseFail; | |||
3055 | } | |||
3056 | unsigned prfop = MCE->getValue(); | |||
3057 | if (prfop > MaxVal) { | |||
3058 | TokError("prefetch operand out of range, [0," + utostr(MaxVal) + | |||
3059 | "] expected"); | |||
3060 | return MatchOperand_ParseFail; | |||
3061 | } | |||
3062 | ||||
3063 | auto PRFM = LookupByEncoding(MCE->getValue()); | |||
3064 | Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""), | |||
3065 | S, getContext())); | |||
3066 | return MatchOperand_Success; | |||
3067 | } | |||
3068 | ||||
3069 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3070 | TokError("prefetch hint expected"); | |||
3071 | return MatchOperand_ParseFail; | |||
3072 | } | |||
3073 | ||||
3074 | auto PRFM = LookupByName(Tok.getString()); | |||
3075 | if (!PRFM) { | |||
3076 | TokError("prefetch hint expected"); | |||
3077 | return MatchOperand_ParseFail; | |||
3078 | } | |||
3079 | ||||
3080 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
3081 | *PRFM, Tok.getString(), S, getContext())); | |||
3082 | Lex(); // Eat identifier token. | |||
3083 | return MatchOperand_Success; | |||
3084 | } | |||
3085 | ||||
3086 | /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command | |||
3087 | OperandMatchResultTy | |||
3088 | AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { | |||
3089 | SMLoc S = getLoc(); | |||
3090 | const AsmToken &Tok = getTok(); | |||
3091 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3092 | TokError("invalid operand for instruction"); | |||
3093 | return MatchOperand_ParseFail; | |||
3094 | } | |||
3095 | ||||
3096 | auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString()); | |||
3097 | if (!PSB) { | |||
3098 | TokError("invalid operand for instruction"); | |||
3099 | return MatchOperand_ParseFail; | |||
3100 | } | |||
3101 | ||||
3102 | Operands.push_back(AArch64Operand::CreatePSBHint( | |||
3103 | PSB->Encoding, Tok.getString(), S, getContext())); | |||
3104 | Lex(); // Eat identifier token. | |||
3105 | return MatchOperand_Success; | |||
3106 | } | |||
3107 | ||||
3108 | OperandMatchResultTy | |||
3109 | AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) { | |||
3110 | SMLoc StartLoc = getLoc(); | |||
3111 | ||||
3112 | MCRegister RegNum; | |||
3113 | ||||
3114 | // The case where xzr, xzr is not present is handled by an InstAlias. | |||
3115 | ||||
3116 | auto RegTok = getTok(); // in case we need to backtrack | |||
3117 | if (tryParseScalarRegister(RegNum) != MatchOperand_Success) | |||
3118 | return MatchOperand_NoMatch; | |||
3119 | ||||
3120 | if (RegNum != AArch64::XZR) { | |||
3121 | getLexer().UnLex(RegTok); | |||
3122 | return MatchOperand_NoMatch; | |||
3123 | } | |||
3124 | ||||
3125 | if (parseComma()) | |||
3126 | return MatchOperand_ParseFail; | |||
3127 | ||||
3128 | if (tryParseScalarRegister(RegNum) != MatchOperand_Success) { | |||
3129 | TokError("expected register operand"); | |||
3130 | return MatchOperand_ParseFail; | |||
3131 | } | |||
3132 | ||||
3133 | if (RegNum != AArch64::XZR) { | |||
3134 | TokError("xzr must be followed by xzr"); | |||
3135 | return MatchOperand_ParseFail; | |||
3136 | } | |||
3137 | ||||
3138 | // We need to push something, since we claim this is an operand in .td. | |||
3139 | // See also AArch64AsmParser::parseKeywordOperand. | |||
3140 | Operands.push_back(AArch64Operand::CreateReg( | |||
3141 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
3142 | ||||
3143 | return MatchOperand_Success; | |||
3144 | } | |||
3145 | ||||
3146 | /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command | |||
3147 | OperandMatchResultTy | |||
3148 | AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) { | |||
3149 | SMLoc S = getLoc(); | |||
3150 | const AsmToken &Tok = getTok(); | |||
3151 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3152 | TokError("invalid operand for instruction"); | |||
3153 | return MatchOperand_ParseFail; | |||
3154 | } | |||
3155 | ||||
3156 | auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString()); | |||
3157 | if (!BTI) { | |||
3158 | TokError("invalid operand for instruction"); | |||
3159 | return MatchOperand_ParseFail; | |||
3160 | } | |||
3161 | ||||
3162 | Operands.push_back(AArch64Operand::CreateBTIHint( | |||
3163 | BTI->Encoding, Tok.getString(), S, getContext())); | |||
3164 | Lex(); // Eat identifier token. | |||
3165 | return MatchOperand_Success; | |||
3166 | } | |||
3167 | ||||
3168 | /// tryParseAdrpLabel - Parse and validate a source label for the ADRP | |||
3169 | /// instruction. | |||
3170 | OperandMatchResultTy | |||
3171 | AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { | |||
3172 | SMLoc S = getLoc(); | |||
3173 | const MCExpr *Expr = nullptr; | |||
3174 | ||||
3175 | if (getTok().is(AsmToken::Hash)) { | |||
3176 | Lex(); // Eat hash token. | |||
3177 | } | |||
3178 | ||||
3179 | if (parseSymbolicImmVal(Expr)) | |||
3180 | return MatchOperand_ParseFail; | |||
3181 | ||||
3182 | AArch64MCExpr::VariantKind ELFRefKind; | |||
3183 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
3184 | int64_t Addend; | |||
3185 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
3186 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && | |||
3187 | ELFRefKind == AArch64MCExpr::VK_INVALID) { | |||
3188 | // No modifier was specified at all; this is the syntax for an ELF basic | |||
3189 | // ADRP relocation (unfortunately). | |||
3190 | Expr = | |||
3191 | AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext()); | |||
3192 | } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || | |||
3193 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && | |||
3194 | Addend != 0) { | |||
3195 | Error(S, "gotpage label reference not allowed an addend"); | |||
3196 | return MatchOperand_ParseFail; | |||
3197 | } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && | |||
3198 | DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && | |||
3199 | DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && | |||
3200 | ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC && | |||
3201 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && | |||
3202 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 && | |||
3203 | ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && | |||
3204 | ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { | |||
3205 | // The operand must be an @page or @gotpage qualified symbolref. | |||
3206 | Error(S, "page or gotpage label reference expected"); | |||
3207 | return MatchOperand_ParseFail; | |||
3208 | } | |||
3209 | } | |||
3210 | ||||
3211 | // We have either a label reference possibly with addend or an immediate. The | |||
3212 | // addend is a raw value here. The linker will adjust it to only reference the | |||
3213 | // page. | |||
3214 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3215 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
3216 | ||||
3217 | return MatchOperand_Success; | |||
3218 | } | |||
3219 | ||||
3220 | /// tryParseAdrLabel - Parse and validate a source label for the ADR | |||
3221 | /// instruction. | |||
3222 | OperandMatchResultTy | |||
3223 | AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { | |||
3224 | SMLoc S = getLoc(); | |||
3225 | const MCExpr *Expr = nullptr; | |||
3226 | ||||
3227 | // Leave anything with a bracket to the default for SVE | |||
3228 | if (getTok().is(AsmToken::LBrac)) | |||
3229 | return MatchOperand_NoMatch; | |||
3230 | ||||
3231 | if (getTok().is(AsmToken::Hash)) | |||
3232 | Lex(); // Eat hash token. | |||
3233 | ||||
3234 | if (parseSymbolicImmVal(Expr)) | |||
3235 | return MatchOperand_ParseFail; | |||
3236 | ||||
3237 | AArch64MCExpr::VariantKind ELFRefKind; | |||
3238 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
3239 | int64_t Addend; | |||
3240 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
3241 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && | |||
3242 | ELFRefKind == AArch64MCExpr::VK_INVALID) { | |||
3243 | // No modifier was specified at all; this is the syntax for an ELF basic | |||
3244 | // ADR relocation (unfortunately). | |||
3245 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext()); | |||
3246 | } else { | |||
3247 | Error(S, "unexpected adr label"); | |||
3248 | return MatchOperand_ParseFail; | |||
3249 | } | |||
3250 | } | |||
3251 | ||||
3252 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3253 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
3254 | return MatchOperand_Success; | |||
3255 | } | |||
3256 | ||||
3257 | /// tryParseFPImm - A floating point immediate expression operand. | |||
3258 | template<bool AddFPZeroAsLiteral> | |||
3259 | OperandMatchResultTy | |||
3260 | AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { | |||
3261 | SMLoc S = getLoc(); | |||
3262 | ||||
3263 | bool Hash = parseOptionalToken(AsmToken::Hash); | |||
3264 | ||||
3265 | // Handle negation, as that still comes through as a separate token. | |||
3266 | bool isNegative = parseOptionalToken(AsmToken::Minus); | |||
3267 | ||||
3268 | const AsmToken &Tok = getTok(); | |||
3269 | if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) { | |||
3270 | if (!Hash) | |||
3271 | return MatchOperand_NoMatch; | |||
3272 | TokError("invalid floating point immediate"); | |||
3273 | return MatchOperand_ParseFail; | |||
3274 | } | |||
3275 | ||||
3276 | // Parse hexadecimal representation. | |||
3277 | if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) { | |||
3278 | if (Tok.getIntVal() > 255 || isNegative) { | |||
3279 | TokError("encoded floating point value out of range"); | |||
3280 | return MatchOperand_ParseFail; | |||
3281 | } | |||
3282 | ||||
3283 | APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal())); | |||
3284 | Operands.push_back( | |||
3285 | AArch64Operand::CreateFPImm(F, true, S, getContext())); | |||
3286 | } else { | |||
3287 | // Parse FP representation. | |||
3288 | APFloat RealVal(APFloat::IEEEdouble()); | |||
3289 | auto StatusOrErr = | |||
3290 | RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); | |||
3291 | if (errorToBool(StatusOrErr.takeError())) { | |||
3292 | TokError("invalid floating point representation"); | |||
3293 | return MatchOperand_ParseFail; | |||
3294 | } | |||
3295 | ||||
3296 | if (isNegative) | |||
3297 | RealVal.changeSign(); | |||
3298 | ||||
3299 | if (AddFPZeroAsLiteral && RealVal.isPosZero()) { | |||
3300 | Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext())); | |||
3301 | Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext())); | |||
3302 | } else | |||
3303 | Operands.push_back(AArch64Operand::CreateFPImm( | |||
3304 | RealVal, *StatusOrErr == APFloat::opOK, S, getContext())); | |||
3305 | } | |||
3306 | ||||
3307 | Lex(); // Eat the token. | |||
3308 | ||||
3309 | return MatchOperand_Success; | |||
3310 | } | |||
3311 | ||||
3312 | /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with | |||
3313 | /// a shift suffix, for example '#1, lsl #12'. | |||
3314 | OperandMatchResultTy | |||
3315 | AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { | |||
3316 | SMLoc S = getLoc(); | |||
3317 | ||||
3318 | if (getTok().is(AsmToken::Hash)) | |||
3319 | Lex(); // Eat '#' | |||
3320 | else if (getTok().isNot(AsmToken::Integer)) | |||
3321 | // Operand should start from # or should be integer, emit error otherwise. | |||
3322 | return MatchOperand_NoMatch; | |||
3323 | ||||
3324 | if (getTok().is(AsmToken::Integer) && | |||
3325 | getLexer().peekTok().is(AsmToken::Colon)) | |||
3326 | return tryParseImmRange(Operands); | |||
3327 | ||||
3328 | const MCExpr *Imm = nullptr; | |||
3329 | if (parseSymbolicImmVal(Imm)) | |||
3330 | return MatchOperand_ParseFail; | |||
3331 | else if (getTok().isNot(AsmToken::Comma)) { | |||
3332 | Operands.push_back( | |||
3333 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); | |||
3334 | return MatchOperand_Success; | |||
3335 | } | |||
3336 | ||||
3337 | // Eat ',' | |||
3338 | Lex(); | |||
3339 | StringRef VecGroup; | |||
3340 | if (!parseOptionalVGOperand(Operands, VecGroup)) { | |||
3341 | Operands.push_back( | |||
3342 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); | |||
3343 | Operands.push_back( | |||
3344 | AArch64Operand::CreateToken(VecGroup, getLoc(), getContext())); | |||
3345 | return MatchOperand_Success; | |||
3346 | } | |||
3347 | ||||
3348 | // The optional operand must be "lsl #N" where N is non-negative. | |||
3349 | if (!getTok().is(AsmToken::Identifier) || | |||
3350 | !getTok().getIdentifier().equals_insensitive("lsl")) { | |||
3351 | Error(getLoc(), "only 'lsl #+N' valid after immediate"); | |||
3352 | return MatchOperand_ParseFail; | |||
3353 | } | |||
3354 | ||||
3355 | // Eat 'lsl' | |||
3356 | Lex(); | |||
3357 | ||||
3358 | parseOptionalToken(AsmToken::Hash); | |||
3359 | ||||
3360 | if (getTok().isNot(AsmToken::Integer)) { | |||
3361 | Error(getLoc(), "only 'lsl #+N' valid after immediate"); | |||
3362 | return MatchOperand_ParseFail; | |||
3363 | } | |||
3364 | ||||
3365 | int64_t ShiftAmount = getTok().getIntVal(); | |||
3366 | ||||
3367 | if (ShiftAmount < 0) { | |||
3368 | Error(getLoc(), "positive shift amount required"); | |||
3369 | return MatchOperand_ParseFail; | |||
3370 | } | |||
3371 | Lex(); // Eat the number | |||
3372 | ||||
3373 | // Just in case the optional lsl #0 is used for immediates other than zero. | |||
3374 | if (ShiftAmount == 0 && Imm != nullptr) { | |||
3375 | Operands.push_back( | |||
3376 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); | |||
3377 | return MatchOperand_Success; | |||
3378 | } | |||
3379 | ||||
3380 | Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, | |||
3381 | getLoc(), getContext())); | |||
3382 | return MatchOperand_Success; | |||
3383 | } | |||
3384 | ||||
3385 | /// parseCondCodeString - Parse a Condition Code string, optionally returning a | |||
3386 | /// suggestion to help common typos. | |||
3387 | AArch64CC::CondCode | |||
3388 | AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) { | |||
3389 | AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) | |||
3390 | .Case("eq", AArch64CC::EQ) | |||
3391 | .Case("ne", AArch64CC::NE) | |||
3392 | .Case("cs", AArch64CC::HS) | |||
3393 | .Case("hs", AArch64CC::HS) | |||
3394 | .Case("cc", AArch64CC::LO) | |||
3395 | .Case("lo", AArch64CC::LO) | |||
3396 | .Case("mi", AArch64CC::MI) | |||
3397 | .Case("pl", AArch64CC::PL) | |||
3398 | .Case("vs", AArch64CC::VS) | |||
3399 | .Case("vc", AArch64CC::VC) | |||
3400 | .Case("hi", AArch64CC::HI) | |||
3401 | .Case("ls", AArch64CC::LS) | |||
3402 | .Case("ge", AArch64CC::GE) | |||
3403 | .Case("lt", AArch64CC::LT) | |||
3404 | .Case("gt", AArch64CC::GT) | |||
3405 | .Case("le", AArch64CC::LE) | |||
3406 | .Case("al", AArch64CC::AL) | |||
3407 | .Case("nv", AArch64CC::NV) | |||
3408 | .Default(AArch64CC::Invalid); | |||
3409 | ||||
3410 | if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) { | |||
3411 | CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) | |||
3412 | .Case("none", AArch64CC::EQ) | |||
3413 | .Case("any", AArch64CC::NE) | |||
3414 | .Case("nlast", AArch64CC::HS) | |||
3415 | .Case("last", AArch64CC::LO) | |||
3416 | .Case("first", AArch64CC::MI) | |||
3417 | .Case("nfrst", AArch64CC::PL) | |||
3418 | .Case("pmore", AArch64CC::HI) | |||
3419 | .Case("plast", AArch64CC::LS) | |||
3420 | .Case("tcont", AArch64CC::GE) | |||
3421 | .Case("tstop", AArch64CC::LT) | |||
3422 | .Default(AArch64CC::Invalid); | |||
3423 | ||||
3424 | if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst") | |||
3425 | Suggestion = "nfrst"; | |||
3426 | } | |||
3427 | return CC; | |||
3428 | } | |||
3429 | ||||
3430 | /// parseCondCode - Parse a Condition Code operand. | |||
3431 | bool AArch64AsmParser::parseCondCode(OperandVector &Operands, | |||
3432 | bool invertCondCode) { | |||
3433 | SMLoc S = getLoc(); | |||
3434 | const AsmToken &Tok = getTok(); | |||
3435 | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) && "Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3435 , __extension__ __PRETTY_FUNCTION__)); | |||
3436 | ||||
3437 | StringRef Cond = Tok.getString(); | |||
3438 | std::string Suggestion; | |||
3439 | AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion); | |||
3440 | if (CC == AArch64CC::Invalid) { | |||
3441 | std::string Msg = "invalid condition code"; | |||
3442 | if (!Suggestion.empty()) | |||
3443 | Msg += ", did you mean " + Suggestion + "?"; | |||
3444 | return TokError(Msg); | |||
3445 | } | |||
3446 | Lex(); // Eat identifier token. | |||
3447 | ||||
3448 | if (invertCondCode) { | |||
3449 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) | |||
3450 | return TokError("condition codes AL and NV are invalid for this instruction"); | |||
3451 | CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC)); | |||
3452 | } | |||
3453 | ||||
3454 | Operands.push_back( | |||
3455 | AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext())); | |||
3456 | return false; | |||
3457 | } | |||
3458 | ||||
3459 | OperandMatchResultTy | |||
3460 | AArch64AsmParser::tryParseSVCR(OperandVector &Operands) { | |||
3461 | const AsmToken &Tok = getTok(); | |||
3462 | SMLoc S = getLoc(); | |||
3463 | ||||
3464 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3465 | TokError("invalid operand for instruction"); | |||
3466 | return MatchOperand_ParseFail; | |||
3467 | } | |||
3468 | ||||
3469 | unsigned PStateImm = -1; | |||
3470 | const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString()); | |||
3471 | if (!SVCR) | |||
3472 | return MatchOperand_NoMatch; | |||
3473 | if (SVCR->haveFeatures(getSTI().getFeatureBits())) | |||
3474 | PStateImm = SVCR->Encoding; | |||
3475 | ||||
3476 | Operands.push_back( | |||
3477 | AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext())); | |||
3478 | Lex(); // Eat identifier token. | |||
3479 | return MatchOperand_Success; | |||
3480 | } | |||
3481 | ||||
3482 | OperandMatchResultTy | |||
3483 | AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) { | |||
3484 | const AsmToken &Tok = getTok(); | |||
3485 | SMLoc S = getLoc(); | |||
3486 | ||||
3487 | StringRef Name = Tok.getString(); | |||
3488 | ||||
3489 | if (Name.equals_insensitive("za") || Name.startswith_insensitive("za.")) { | |||
3490 | Lex(); // eat "za[.(b|h|s|d)]" | |||
3491 | unsigned ElementWidth = 0; | |||
3492 | auto DotPosition = Name.find('.'); | |||
3493 | if (DotPosition != StringRef::npos) { | |||
3494 | const auto &KindRes = | |||
3495 | parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix); | |||
3496 | if (!KindRes) { | |||
3497 | TokError( | |||
3498 | "Expected the register to be followed by element width suffix"); | |||
3499 | return MatchOperand_ParseFail; | |||
3500 | } | |||
3501 | ElementWidth = KindRes->second; | |||
3502 | } | |||
3503 | Operands.push_back(AArch64Operand::CreateMatrixRegister( | |||
3504 | AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(), | |||
3505 | getContext())); | |||
3506 | if (getLexer().is(AsmToken::LBrac)) { | |||
3507 | // There's no comma after matrix operand, so we can parse the next operand | |||
3508 | // immediately. | |||
3509 | if (parseOperand(Operands, false, false)) | |||
3510 | return MatchOperand_NoMatch; | |||
3511 | } | |||
3512 | return MatchOperand_Success; | |||
3513 | } | |||
3514 | ||||
3515 | // Try to parse matrix register. | |||
3516 | unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix); | |||
3517 | if (!Reg) | |||
3518 | return MatchOperand_NoMatch; | |||
3519 | ||||
3520 | size_t DotPosition = Name.find('.'); | |||
3521 | assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos && "Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3521 , __extension__ __PRETTY_FUNCTION__)); | |||
3522 | ||||
3523 | StringRef Head = Name.take_front(DotPosition); | |||
3524 | StringRef Tail = Name.drop_front(DotPosition); | |||
3525 | StringRef RowOrColumn = Head.take_back(); | |||
3526 | ||||
3527 | MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower()) | |||
3528 | .Case("h", MatrixKind::Row) | |||
3529 | .Case("v", MatrixKind::Col) | |||
3530 | .Default(MatrixKind::Tile); | |||
3531 | ||||
3532 | // Next up, parsing the suffix | |||
3533 | const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix); | |||
3534 | if (!KindRes) { | |||
3535 | TokError("Expected the register to be followed by element width suffix"); | |||
3536 | return MatchOperand_ParseFail; | |||
3537 | } | |||
3538 | unsigned ElementWidth = KindRes->second; | |||
3539 | ||||
3540 | Lex(); | |||
3541 | ||||
3542 | Operands.push_back(AArch64Operand::CreateMatrixRegister( | |||
3543 | Reg, ElementWidth, Kind, S, getLoc(), getContext())); | |||
3544 | ||||
3545 | if (getLexer().is(AsmToken::LBrac)) { | |||
3546 | // There's no comma after matrix operand, so we can parse the next operand | |||
3547 | // immediately. | |||
3548 | if (parseOperand(Operands, false, false)) | |||
3549 | return MatchOperand_NoMatch; | |||
3550 | } | |||
3551 | return MatchOperand_Success; | |||
3552 | } | |||
3553 | ||||
3554 | /// tryParseOptionalShift - Some operands take an optional shift argument. Parse | |||
3555 | /// them if present. | |||
3556 | OperandMatchResultTy | |||
3557 | AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { | |||
3558 | const AsmToken &Tok = getTok(); | |||
3559 | std::string LowerID = Tok.getString().lower(); | |||
3560 | AArch64_AM::ShiftExtendType ShOp = | |||
3561 | StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) | |||
3562 | .Case("lsl", AArch64_AM::LSL) | |||
3563 | .Case("lsr", AArch64_AM::LSR) | |||
3564 | .Case("asr", AArch64_AM::ASR) | |||
3565 | .Case("ror", AArch64_AM::ROR) | |||
3566 | .Case("msl", AArch64_AM::MSL) | |||
3567 | .Case("uxtb", AArch64_AM::UXTB) | |||
3568 | .Case("uxth", AArch64_AM::UXTH) | |||
3569 | .Case("uxtw", AArch64_AM::UXTW) | |||
3570 | .Case("uxtx", AArch64_AM::UXTX) | |||
3571 | .Case("sxtb", AArch64_AM::SXTB) | |||
3572 | .Case("sxth", AArch64_AM::SXTH) | |||
3573 | .Case("sxtw", AArch64_AM::SXTW) | |||
3574 | .Case("sxtx", AArch64_AM::SXTX) | |||
3575 | .Default(AArch64_AM::InvalidShiftExtend); | |||
3576 | ||||
3577 | if (ShOp == AArch64_AM::InvalidShiftExtend) | |||
3578 | return MatchOperand_NoMatch; | |||
3579 | ||||
3580 | SMLoc S = Tok.getLoc(); | |||
3581 | Lex(); | |||
3582 | ||||
3583 | bool Hash = parseOptionalToken(AsmToken::Hash); | |||
3584 | ||||
3585 | if (!Hash && getLexer().isNot(AsmToken::Integer)) { | |||
3586 | if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || | |||
3587 | ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || | |||
3588 | ShOp == AArch64_AM::MSL) { | |||
3589 | // We expect a number here. | |||
3590 | TokError("expected #imm after shift specifier"); | |||
3591 | return MatchOperand_ParseFail; | |||
3592 | } | |||
3593 | ||||
3594 | // "extend" type operations don't need an immediate, #0 is implicit. | |||
3595 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3596 | Operands.push_back( | |||
3597 | AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext())); | |||
3598 | return MatchOperand_Success; | |||
3599 | } | |||
3600 | ||||
3601 | // Make sure we do actually have a number, identifier or a parenthesized | |||
3602 | // expression. | |||
3603 | SMLoc E = getLoc(); | |||
3604 | if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) && | |||
3605 | !getTok().is(AsmToken::Identifier)) { | |||
3606 | Error(E, "expected integer shift amount"); | |||
3607 | return MatchOperand_ParseFail; | |||
3608 | } | |||
3609 | ||||
3610 | const MCExpr *ImmVal; | |||
3611 | if (getParser().parseExpression(ImmVal)) | |||
3612 | return MatchOperand_ParseFail; | |||
3613 | ||||
3614 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
3615 | if (!MCE) { | |||
3616 | Error(E, "expected constant '#imm' after shift specifier"); | |||
3617 | return MatchOperand_ParseFail; | |||
3618 | } | |||
3619 | ||||
3620 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3621 | Operands.push_back(AArch64Operand::CreateShiftExtend( | |||
3622 | ShOp, MCE->getValue(), true, S, E, getContext())); | |||
3623 | return MatchOperand_Success; | |||
3624 | } | |||
3625 | ||||
3626 | static const struct Extension { | |||
3627 | const char *Name; | |||
3628 | const FeatureBitset Features; | |||
3629 | } ExtensionMap[] = { | |||
3630 | {"crc", {AArch64::FeatureCRC}}, | |||
3631 | {"sm4", {AArch64::FeatureSM4}}, | |||
3632 | {"sha3", {AArch64::FeatureSHA3}}, | |||
3633 | {"sha2", {AArch64::FeatureSHA2}}, | |||
3634 | {"aes", {AArch64::FeatureAES}}, | |||
3635 | {"crypto", {AArch64::FeatureCrypto}}, | |||
3636 | {"fp", {AArch64::FeatureFPARMv8}}, | |||
3637 | {"simd", {AArch64::FeatureNEON}}, | |||
3638 | {"ras", {AArch64::FeatureRAS}}, | |||
3639 | {"rasv2", {AArch64::FeatureRASv2}}, | |||
3640 | {"lse", {AArch64::FeatureLSE}}, | |||
3641 | {"predres", {AArch64::FeaturePredRes}}, | |||
3642 | {"predres2", {AArch64::FeatureSPECRES2}}, | |||
3643 | {"ccdp", {AArch64::FeatureCacheDeepPersist}}, | |||
3644 | {"mte", {AArch64::FeatureMTE}}, | |||
3645 | {"memtag", {AArch64::FeatureMTE}}, | |||
3646 | {"tlb-rmi", {AArch64::FeatureTLB_RMI}}, | |||
3647 | {"pan", {AArch64::FeaturePAN}}, | |||
3648 | {"pan-rwv", {AArch64::FeaturePAN_RWV}}, | |||
3649 | {"ccpp", {AArch64::FeatureCCPP}}, | |||
3650 | {"rcpc", {AArch64::FeatureRCPC}}, | |||
3651 | {"rng", {AArch64::FeatureRandGen}}, | |||
3652 | {"sve", {AArch64::FeatureSVE}}, | |||
3653 | {"sve2", {AArch64::FeatureSVE2}}, | |||
3654 | {"sve2-aes", {AArch64::FeatureSVE2AES}}, | |||
3655 | {"sve2-sm4", {AArch64::FeatureSVE2SM4}}, | |||
3656 | {"sve2-sha3", {AArch64::FeatureSVE2SHA3}}, | |||
3657 | {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}}, | |||
3658 | {"sve2p1", {AArch64::FeatureSVE2p1}}, | |||
3659 | {"b16b16", {AArch64::FeatureB16B16}}, | |||
3660 | {"ls64", {AArch64::FeatureLS64}}, | |||
3661 | {"xs", {AArch64::FeatureXS}}, | |||
3662 | {"pauth", {AArch64::FeaturePAuth}}, | |||
3663 | {"flagm", {AArch64::FeatureFlagM}}, | |||
3664 | {"rme", {AArch64::FeatureRME}}, | |||
3665 | {"sme", {AArch64::FeatureSME}}, | |||
3666 | {"sme-f64f64", {AArch64::FeatureSMEF64F64}}, | |||
3667 | {"sme-f16f16", {AArch64::FeatureSMEF16F16}}, | |||
3668 | {"sme-i16i64", {AArch64::FeatureSMEI16I64}}, | |||
3669 | {"sme2", {AArch64::FeatureSME2}}, | |||
3670 | {"sme2p1", {AArch64::FeatureSME2p1}}, | |||
3671 | {"hbc", {AArch64::FeatureHBC}}, | |||
3672 | {"mops", {AArch64::FeatureMOPS}}, | |||
3673 | {"mec", {AArch64::FeatureMEC}}, | |||
3674 | {"the", {AArch64::FeatureTHE}}, | |||
3675 | {"d128", {AArch64::FeatureD128}}, | |||
3676 | {"lse128", {AArch64::FeatureLSE128}}, | |||
3677 | {"ite", {AArch64::FeatureITE}}, | |||
3678 | {"cssc", {AArch64::FeatureCSSC}}, | |||
3679 | {"rcpc3", {AArch64::FeatureRCPC3}}, | |||
3680 | {"gcs", {AArch64::FeatureGCS}}, | |||
3681 | // FIXME: Unsupported extensions | |||
3682 | {"lor", {}}, | |||
3683 | {"rdma", {}}, | |||
3684 | {"profile", {}}, | |||
3685 | }; | |||
3686 | ||||
3687 | static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { | |||
3688 | if (FBS[AArch64::HasV8_0aOps]) | |||
3689 | Str += "ARMv8a"; | |||
3690 | if (FBS[AArch64::HasV8_1aOps]) | |||
3691 | Str += "ARMv8.1a"; | |||
3692 | else if (FBS[AArch64::HasV8_2aOps]) | |||
3693 | Str += "ARMv8.2a"; | |||
3694 | else if (FBS[AArch64::HasV8_3aOps]) | |||
3695 | Str += "ARMv8.3a"; | |||
3696 | else if (FBS[AArch64::HasV8_4aOps]) | |||
3697 | Str += "ARMv8.4a"; | |||
3698 | else if (FBS[AArch64::HasV8_5aOps]) | |||
3699 | Str += "ARMv8.5a"; | |||
3700 | else if (FBS[AArch64::HasV8_6aOps]) | |||
3701 | Str += "ARMv8.6a"; | |||
3702 | else if (FBS[AArch64::HasV8_7aOps]) | |||
3703 | Str += "ARMv8.7a"; | |||
3704 | else if (FBS[AArch64::HasV8_8aOps]) | |||
3705 | Str += "ARMv8.8a"; | |||
3706 | else if (FBS[AArch64::HasV8_9aOps]) | |||
3707 | Str += "ARMv8.9a"; | |||
3708 | else if (FBS[AArch64::HasV9_0aOps]) | |||
3709 | Str += "ARMv9-a"; | |||
3710 | else if (FBS[AArch64::HasV9_1aOps]) | |||
3711 | Str += "ARMv9.1a"; | |||
3712 | else if (FBS[AArch64::HasV9_2aOps]) | |||
3713 | Str += "ARMv9.2a"; | |||
3714 | else if (FBS[AArch64::HasV9_3aOps]) | |||
3715 | Str += "ARMv9.3a"; | |||
3716 | else if (FBS[AArch64::HasV9_4aOps]) | |||
3717 | Str += "ARMv9.4a"; | |||
3718 | else if (FBS[AArch64::HasV8_0rOps]) | |||
3719 | Str += "ARMv8r"; | |||
3720 | else { | |||
3721 | SmallVector<std::string, 2> ExtMatches; | |||
3722 | for (const auto& Ext : ExtensionMap) { | |||
3723 | // Use & in case multiple features are enabled | |||
3724 | if ((FBS & Ext.Features) != FeatureBitset()) | |||
3725 | ExtMatches.push_back(Ext.Name); | |||
3726 | } | |||
3727 | Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)"; | |||
3728 | } | |||
3729 | } | |||
3730 | ||||
3731 | void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands, | |||
3732 | SMLoc S) { | |||
3733 | const uint16_t Op2 = Encoding & 7; | |||
3734 | const uint16_t Cm = (Encoding & 0x78) >> 3; | |||
3735 | const uint16_t Cn = (Encoding & 0x780) >> 7; | |||
3736 | const uint16_t Op1 = (Encoding & 0x3800) >> 11; | |||
3737 | ||||
3738 | const MCExpr *Expr = MCConstantExpr::create(Op1, getContext()); | |||
3739 | ||||
3740 | Operands.push_back( | |||
3741 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); | |||
3742 | Operands.push_back( | |||
3743 | AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); | |||
3744 | Operands.push_back( | |||
3745 | AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); | |||
3746 | Expr = MCConstantExpr::create(Op2, getContext()); | |||
3747 | Operands.push_back( | |||
3748 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); | |||
3749 | } | |||
3750 | ||||
3751 | /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for | |||
3752 | /// the SYS instruction. Parse them specially so that we create a SYS MCInst. | |||
3753 | bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, | |||
3754 | OperandVector &Operands) { | |||
3755 | if (Name.contains('.')) | |||
3756 | return TokError("invalid operand"); | |||
3757 | ||||
3758 | Mnemonic = Name; | |||
3759 | Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext())); | |||
3760 | ||||
3761 | const AsmToken &Tok = getTok(); | |||
3762 | StringRef Op = Tok.getString(); | |||
3763 | SMLoc S = Tok.getLoc(); | |||
3764 | ||||
3765 | if (Mnemonic == "ic") { | |||
3766 | const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op); | |||
3767 | if (!IC) | |||
3768 | return TokError("invalid operand for IC instruction"); | |||
3769 | else if (!IC->haveFeatures(getSTI().getFeatureBits())) { | |||
3770 | std::string Str("IC " + std::string(IC->Name) + " requires: "); | |||
3771 | setRequiredFeatureString(IC->getRequiredFeatures(), Str); | |||
3772 | return TokError(Str); | |||
3773 | } | |||
3774 | createSysAlias(IC->Encoding, Operands, S); | |||
3775 | } else if (Mnemonic == "dc") { | |||
3776 | const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op); | |||
3777 | if (!DC) | |||
3778 | return TokError("invalid operand for DC instruction"); | |||
3779 | else if (!DC->haveFeatures(getSTI().getFeatureBits())) { | |||
3780 | std::string Str("DC " + std::string(DC->Name) + " requires: "); | |||
3781 | setRequiredFeatureString(DC->getRequiredFeatures(), Str); | |||
3782 | return TokError(Str); | |||
3783 | } | |||
3784 | createSysAlias(DC->Encoding, Operands, S); | |||
3785 | } else if (Mnemonic == "at") { | |||
3786 | const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op); | |||
3787 | if (!AT) | |||
3788 | return TokError("invalid operand for AT instruction"); | |||
3789 | else if (!AT->haveFeatures(getSTI().getFeatureBits())) { | |||
3790 | std::string Str("AT " + std::string(AT->Name) + " requires: "); | |||
3791 | setRequiredFeatureString(AT->getRequiredFeatures(), Str); | |||
3792 | return TokError(Str); | |||
3793 | } | |||
3794 | createSysAlias(AT->Encoding, Operands, S); | |||
3795 | } else if (Mnemonic == "tlbi") { | |||
3796 | const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op); | |||
3797 | if (!TLBI) | |||
3798 | return TokError("invalid operand for TLBI instruction"); | |||
3799 | else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) { | |||
3800 | std::string Str("TLBI " + std::string(TLBI->Name) + " requires: "); | |||
3801 | setRequiredFeatureString(TLBI->getRequiredFeatures(), Str); | |||
3802 | return TokError(Str); | |||
3803 | } | |||
3804 | createSysAlias(TLBI->Encoding, Operands, S); | |||
3805 | } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") { | |||
3806 | ||||
3807 | if (Op.lower() != "rctx") | |||
3808 | return TokError("invalid operand for prediction restriction instruction"); | |||
3809 | ||||
3810 | bool hasAll = getSTI().hasFeature(AArch64::FeatureAll); | |||
3811 | bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes); | |||
3812 | bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2); | |||
3813 | ||||
3814 | if (Mnemonic == "cosp" && !hasSpecres2) | |||
3815 | return TokError("COSP requires: predres2"); | |||
3816 | if (!hasPredres) | |||
3817 | return TokError(Mnemonic.upper() + "RCTX requires: predres"); | |||
3818 | ||||
3819 | uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100 | |||
3820 | : Mnemonic == "dvp" ? 0b101 | |||
3821 | : Mnemonic == "cosp" ? 0b110 | |||
3822 | : Mnemonic == "cpp" ? 0b111 | |||
3823 | : 0; | |||
3824 | assert(PRCTX_Op2 &&(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction" ) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3825 , __extension__ __PRETTY_FUNCTION__)) | |||
3825 | "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction" ) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3825 , __extension__ __PRETTY_FUNCTION__)); | |||
3826 | const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3 | |||
3827 | const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2; | |||
3828 | ||||
3829 | createSysAlias(Encoding, Operands, S); | |||
3830 | } | |||
3831 | ||||
3832 | Lex(); // Eat operand. | |||
3833 | ||||
3834 | bool ExpectRegister = (Op.lower().find("all") == StringRef::npos); | |||
3835 | bool HasRegister = false; | |||
3836 | ||||
3837 | // Check for the optional register operand. | |||
3838 | if (parseOptionalToken(AsmToken::Comma)) { | |||
3839 | if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands)) | |||
3840 | return TokError("expected register operand"); | |||
3841 | HasRegister = true; | |||
3842 | } | |||
3843 | ||||
3844 | if (ExpectRegister && !HasRegister) | |||
3845 | return TokError("specified " + Mnemonic + " op requires a register"); | |||
3846 | else if (!ExpectRegister && HasRegister) | |||
3847 | return TokError("specified " + Mnemonic + " op does not use a register"); | |||
3848 | ||||
3849 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
3850 | return true; | |||
3851 | ||||
3852 | return false; | |||
3853 | } | |||
3854 | ||||
3855 | /// parseSyspAlias - The TLBIP instructions are simple aliases for | |||
3856 | /// the SYSP instruction. Parse them specially so that we create a SYSP MCInst. | |||
3857 | bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc, | |||
3858 | OperandVector &Operands) { | |||
3859 | if (Name.contains('.')) | |||
3860 | return TokError("invalid operand"); | |||
3861 | ||||
3862 | Mnemonic = Name; | |||
3863 | Operands.push_back( | |||
3864 | AArch64Operand::CreateToken("sysp", NameLoc, getContext())); | |||
3865 | ||||
3866 | const AsmToken &Tok = getTok(); | |||
3867 | StringRef Op = Tok.getString(); | |||
3868 | SMLoc S = Tok.getLoc(); | |||
3869 | ||||
3870 | if (Mnemonic == "tlbip") { | |||
3871 | bool HasnXSQualifier = Op.endswith_insensitive("nXS"); | |||
3872 | if (HasnXSQualifier) { | |||
3873 | Op = Op.drop_back(3); | |||
3874 | } | |||
3875 | const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op); | |||
3876 | if (!TLBIorig) | |||
3877 | return TokError("invalid operand for TLBIP instruction"); | |||
3878 | const AArch64TLBI::TLBI TLBI( | |||
3879 | TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0), | |||
3880 | TLBIorig->NeedsReg, | |||
3881 | HasnXSQualifier | |||
3882 | ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS}) | |||
3883 | : TLBIorig->FeaturesRequired); | |||
3884 | if (!TLBI.haveFeatures(getSTI().getFeatureBits())) { | |||
3885 | std::string Name = | |||
3886 | std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : ""); | |||
3887 | std::string Str("TLBIP " + Name + " requires: "); | |||
3888 | setRequiredFeatureString(TLBI.getRequiredFeatures(), Str); | |||
3889 | return TokError(Str); | |||
3890 | } | |||
3891 | createSysAlias(TLBI.Encoding, Operands, S); | |||
3892 | } | |||
3893 | ||||
3894 | Lex(); // Eat operand. | |||
3895 | ||||
3896 | if (parseComma()) | |||
3897 | return true; | |||
3898 | ||||
3899 | if (Tok.isNot(AsmToken::Identifier)) | |||
3900 | return TokError("expected register identifier"); | |||
3901 | auto Result = tryParseSyspXzrPair(Operands); | |||
3902 | if (Result == MatchOperand_NoMatch) | |||
3903 | Result = tryParseGPRSeqPair(Operands); | |||
3904 | if (Result != MatchOperand_Success) | |||
3905 | return TokError("specified " + Mnemonic + | |||
3906 | " op requires a pair of registers"); | |||
3907 | ||||
3908 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
3909 | return true; | |||
3910 | ||||
3911 | return false; | |||
3912 | } | |||
3913 | ||||
3914 | OperandMatchResultTy | |||
3915 | AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { | |||
3916 | MCAsmParser &Parser = getParser(); | |||
3917 | const AsmToken &Tok = getTok(); | |||
3918 | ||||
3919 | if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) { | |||
3920 | TokError("'csync' operand expected"); | |||
3921 | return MatchOperand_ParseFail; | |||
3922 | } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { | |||
3923 | // Immediate operand. | |||
3924 | const MCExpr *ImmVal; | |||
3925 | SMLoc ExprLoc = getLoc(); | |||
3926 | AsmToken IntTok = Tok; | |||
3927 | if (getParser().parseExpression(ImmVal)) | |||
3928 | return MatchOperand_ParseFail; | |||
3929 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
3930 | if (!MCE) { | |||
3931 | Error(ExprLoc, "immediate value expected for barrier operand"); | |||
3932 | return MatchOperand_ParseFail; | |||
3933 | } | |||
3934 | int64_t Value = MCE->getValue(); | |||
3935 | if (Mnemonic == "dsb" && Value > 15) { | |||
3936 | // This case is a no match here, but it might be matched by the nXS | |||
3937 | // variant. Deliberately not unlex the optional '#' as it is not necessary | |||
3938 | // to characterize an integer immediate. | |||
3939 | Parser.getLexer().UnLex(IntTok); | |||
3940 | return MatchOperand_NoMatch; | |||
3941 | } | |||
3942 | if (Value < 0 || Value > 15) { | |||
3943 | Error(ExprLoc, "barrier operand out of range"); | |||
3944 | return MatchOperand_ParseFail; | |||
3945 | } | |||
3946 | auto DB = AArch64DB::lookupDBByEncoding(Value); | |||
3947 | Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "", | |||
3948 | ExprLoc, getContext(), | |||
3949 | false /*hasnXSModifier*/)); | |||
3950 | return MatchOperand_Success; | |||
3951 | } | |||
3952 | ||||
3953 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3954 | TokError("invalid operand for instruction"); | |||
3955 | return MatchOperand_ParseFail; | |||
3956 | } | |||
3957 | ||||
3958 | StringRef Operand = Tok.getString(); | |||
3959 | auto TSB = AArch64TSB::lookupTSBByName(Operand); | |||
3960 | auto DB = AArch64DB::lookupDBByName(Operand); | |||
3961 | // The only valid named option for ISB is 'sy' | |||
3962 | if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) { | |||
3963 | TokError("'sy' or #imm operand expected"); | |||
3964 | return MatchOperand_ParseFail; | |||
3965 | // The only valid named option for TSB is 'csync' | |||
3966 | } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) { | |||
3967 | TokError("'csync' operand expected"); | |||
3968 | return MatchOperand_ParseFail; | |||
3969 | } else if (!DB && !TSB) { | |||
3970 | if (Mnemonic == "dsb") { | |||
3971 | // This case is a no match here, but it might be matched by the nXS | |||
3972 | // variant. | |||
3973 | return MatchOperand_NoMatch; | |||
3974 | } | |||
3975 | TokError("invalid barrier option name"); | |||
3976 | return MatchOperand_ParseFail; | |||
3977 | } | |||
3978 | ||||
3979 | Operands.push_back(AArch64Operand::CreateBarrier( | |||
3980 | DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), | |||
3981 | getContext(), false /*hasnXSModifier*/)); | |||
3982 | Lex(); // Consume the option | |||
3983 | ||||
3984 | return MatchOperand_Success; | |||
3985 | } | |||
3986 | ||||
3987 | OperandMatchResultTy | |||
3988 | AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) { | |||
3989 | const AsmToken &Tok = getTok(); | |||
3990 | ||||
3991 | assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands" ) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3991 , __extension__ __PRETTY_FUNCTION__)); | |||
3992 | if (Mnemonic != "dsb") | |||
3993 | return MatchOperand_ParseFail; | |||
3994 | ||||
3995 | if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { | |||
3996 | // Immediate operand. | |||
3997 | const MCExpr *ImmVal; | |||
3998 | SMLoc ExprLoc = getLoc(); | |||
3999 | if (getParser().parseExpression(ImmVal)) | |||
4000 | return MatchOperand_ParseFail; | |||
4001 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
4002 | if (!MCE) { | |||
4003 | Error(ExprLoc, "immediate value expected for barrier operand"); | |||
4004 | return MatchOperand_ParseFail; | |||
4005 | } | |||
4006 | int64_t Value = MCE->getValue(); | |||
4007 | // v8.7-A DSB in the nXS variant accepts only the following immediate | |||
4008 | // values: 16, 20, 24, 28. | |||
4009 | if (Value != 16 && Value != 20 && Value != 24 && Value != 28) { | |||
4010 | Error(ExprLoc, "barrier operand out of range"); | |||
4011 | return MatchOperand_ParseFail; | |||
4012 | } | |||
4013 | auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value); | |||
4014 | Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name, | |||
4015 | ExprLoc, getContext(), | |||
4016 | true /*hasnXSModifier*/)); | |||
4017 | return MatchOperand_Success; | |||
4018 | } | |||
4019 | ||||
4020 | if (Tok.isNot(AsmToken::Identifier)) { | |||
4021 | TokError("invalid operand for instruction"); | |||
4022 | return MatchOperand_ParseFail; | |||
4023 | } | |||
4024 | ||||
4025 | StringRef Operand = Tok.getString(); | |||
4026 | auto DB = AArch64DBnXS::lookupDBnXSByName(Operand); | |||
4027 | ||||
4028 | if (!DB) { | |||
4029 | TokError("invalid barrier option name"); | |||
4030 | return MatchOperand_ParseFail; | |||
4031 | } | |||
4032 | ||||
4033 | Operands.push_back( | |||
4034 | AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(), | |||
4035 | getContext(), true /*hasnXSModifier*/)); | |||
4036 | Lex(); // Consume the option | |||
4037 | ||||
4038 | return MatchOperand_Success; | |||
4039 | } | |||
4040 | ||||
4041 | OperandMatchResultTy | |||
4042 | AArch64AsmParser::tryParseSysReg(OperandVector &Operands) { | |||
4043 | const AsmToken &Tok = getTok(); | |||
4044 | ||||
4045 | if (Tok.isNot(AsmToken::Identifier)) | |||
4046 | return MatchOperand_NoMatch; | |||
4047 | ||||
4048 | if (AArch64SVCR::lookupSVCRByName(Tok.getString())) | |||
4049 | return MatchOperand_NoMatch; | |||
4050 | ||||
4051 | int MRSReg, MSRReg; | |||
4052 | auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString()); | |||
4053 | if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) { | |||
4054 | MRSReg = SysReg->Readable ? SysReg->Encoding : -1; | |||
4055 | MSRReg = SysReg->Writeable ? SysReg->Encoding : -1; | |||
4056 | } else | |||
4057 | MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString()); | |||
4058 | ||||
4059 | unsigned PStateImm = -1; | |||
4060 | auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString()); | |||
4061 | if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits())) | |||
4062 | PStateImm = PState15->Encoding; | |||
4063 | if (!PState15) { | |||
4064 | auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString()); | |||
4065 | if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits())) | |||
4066 | PStateImm = PState1->Encoding; | |||
4067 | } | |||
4068 | ||||
4069 | Operands.push_back( | |||
4070 | AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg, | |||
4071 | PStateImm, getContext())); | |||
4072 | Lex(); // Eat identifier | |||
4073 | ||||
4074 | return MatchOperand_Success; | |||
4075 | } | |||
4076 | ||||
4077 | /// tryParseNeonVectorRegister - Parse a vector register operand. | |||
4078 | bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { | |||
4079 | if (getTok().isNot(AsmToken::Identifier)) | |||
4080 | return true; | |||
4081 | ||||
4082 | SMLoc S = getLoc(); | |||
4083 | // Check for a vector register specifier first. | |||
4084 | StringRef Kind; | |||
4085 | MCRegister Reg; | |||
4086 | OperandMatchResultTy Res = | |||
4087 | tryParseVectorRegister(Reg, Kind, RegKind::NeonVector); | |||
4088 | if (Res != MatchOperand_Success) | |||
4089 | return true; | |||
4090 | ||||
4091 | const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector); | |||
4092 | if (!KindRes) | |||
4093 | return true; | |||
4094 | ||||
4095 | unsigned ElementWidth = KindRes->second; | |||
4096 | Operands.push_back( | |||
4097 | AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth, | |||
4098 | S, getLoc(), getContext())); | |||
4099 | ||||
4100 | // If there was an explicit qualifier, that goes on as a literal text | |||
4101 | // operand. | |||
4102 | if (!Kind.empty()) | |||
4103 | Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext())); | |||
4104 | ||||
4105 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; | |||
4106 | } | |||
4107 | ||||
4108 | OperandMatchResultTy | |||
4109 | AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { | |||
4110 | SMLoc SIdx = getLoc(); | |||
4111 | if (parseOptionalToken(AsmToken::LBrac)) { | |||
4112 | const MCExpr *ImmVal; | |||
4113 | if (getParser().parseExpression(ImmVal)) | |||
4114 | return MatchOperand_NoMatch; | |||
4115 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
4116 | if (!MCE) { | |||
4117 | TokError("immediate value expected for vector index"); | |||
4118 | return MatchOperand_ParseFail;; | |||
4119 | } | |||
4120 | ||||
4121 | SMLoc E = getLoc(); | |||
4122 | ||||
4123 | if (parseToken(AsmToken::RBrac, "']' expected")) | |||
4124 | return MatchOperand_ParseFail;; | |||
4125 | ||||
4126 | Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, | |||
4127 | E, getContext())); | |||
4128 | return MatchOperand_Success; | |||
4129 | } | |||
4130 | ||||
4131 | return MatchOperand_NoMatch; | |||
4132 | } | |||
4133 | ||||
4134 | // tryParseVectorRegister - Try to parse a vector register name with | |||
4135 | // optional kind specifier. If it is a register specifier, eat the token | |||
4136 | // and return it. | |||
4137 | OperandMatchResultTy | |||
4138 | AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg, StringRef &Kind, | |||
4139 | RegKind MatchKind) { | |||
4140 | const AsmToken &Tok = getTok(); | |||
4141 | ||||
4142 | if (Tok.isNot(AsmToken::Identifier)) | |||
4143 | return MatchOperand_NoMatch; | |||
4144 | ||||
4145 | StringRef Name = Tok.getString(); | |||
4146 | // If there is a kind specifier, it's separated from the register name by | |||
4147 | // a '.'. | |||
4148 | size_t Start = 0, Next = Name.find('.'); | |||
4149 | StringRef Head = Name.slice(Start, Next); | |||
4150 | unsigned RegNum = matchRegisterNameAlias(Head, MatchKind); | |||
4151 | ||||
4152 | if (RegNum) { | |||
4153 | if (Next != StringRef::npos) { | |||
4154 | Kind = Name.slice(Next, StringRef::npos); | |||
4155 | if (!isValidVectorKind(Kind, MatchKind)) { | |||
4156 | TokError("invalid vector kind qualifier"); | |||
4157 | return MatchOperand_ParseFail; | |||
4158 | } | |||
4159 | } | |||
4160 | Lex(); // Eat the register token. | |||
4161 | ||||
4162 | Reg = RegNum; | |||
4163 | return MatchOperand_Success; | |||
4164 | } | |||
4165 | ||||
4166 | return MatchOperand_NoMatch; | |||
4167 | } | |||
4168 | ||||
4169 | /// tryParseSVEPredicateVector - Parse a SVE predicate register operand. | |||
4170 | template <RegKind RK> OperandMatchResultTy | |||
4171 | AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { | |||
4172 | // Check for a SVE predicate register specifier first. | |||
4173 | const SMLoc S = getLoc(); | |||
4174 | StringRef Kind; | |||
4175 | MCRegister RegNum; | |||
4176 | auto Res = tryParseVectorRegister(RegNum, Kind, RK); | |||
4177 | if (Res != MatchOperand_Success) | |||
4178 | return Res; | |||
4179 | ||||
4180 | const auto &KindRes = parseVectorKind(Kind, RK); | |||
4181 | if (!KindRes) | |||
4182 | return MatchOperand_NoMatch; | |||
4183 | ||||
4184 | unsigned ElementWidth = KindRes->second; | |||
4185 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
4186 | RegNum, RK, ElementWidth, S, | |||
4187 | getLoc(), getContext())); | |||
4188 | ||||
4189 | if (getLexer().is(AsmToken::LBrac)) { | |||
4190 | if (RK == RegKind::SVEPredicateAsCounter) { | |||
4191 | OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands); | |||
4192 | if (ResIndex == MatchOperand_Success) | |||
4193 | return MatchOperand_Success; | |||
4194 | } else { | |||
4195 | // Indexed predicate, there's no comma so try parse the next operand | |||
4196 | // immediately. | |||
4197 | if (parseOperand(Operands, false, false)) | |||
4198 | return MatchOperand_NoMatch; | |||
4199 | } | |||
4200 | } | |||
4201 | ||||
4202 | // Not all predicates are followed by a '/m' or '/z'. | |||
4203 | if (getTok().isNot(AsmToken::Slash)) | |||
4204 | return MatchOperand_Success; | |||
4205 | ||||
4206 | // But when they do they shouldn't have an element type suffix. | |||
4207 | if (!Kind.empty()) { | |||
4208 | Error(S, "not expecting size suffix"); | |||
4209 | return MatchOperand_ParseFail; | |||
4210 | } | |||
4211 | ||||
4212 | // Add a literal slash as operand | |||
4213 | Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext())); | |||
4214 | ||||
4215 | Lex(); // Eat the slash. | |||
4216 | ||||
4217 | // Zeroing or merging? | |||
4218 | auto Pred = getTok().getString().lower(); | |||
4219 | if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") { | |||
4220 | Error(getLoc(), "expecting 'z' predication"); | |||
4221 | return MatchOperand_ParseFail; | |||
4222 | } | |||
4223 | ||||
4224 | if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") { | |||
4225 | Error(getLoc(), "expecting 'm' or 'z' predication"); | |||
4226 | return MatchOperand_ParseFail; | |||
4227 | } | |||
4228 | ||||
4229 | // Add zero/merge token. | |||
4230 | const char *ZM = Pred == "z" ? "z" : "m"; | |||
4231 | Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext())); | |||
4232 | ||||
4233 | Lex(); // Eat zero/merge token. | |||
4234 | return MatchOperand_Success; | |||
4235 | } | |||
4236 | ||||
4237 | /// parseRegister - Parse a register operand. | |||
4238 | bool AArch64AsmParser::parseRegister(OperandVector &Operands) { | |||
4239 | // Try for a Neon vector register. | |||
4240 | if (!tryParseNeonVectorRegister(Operands)) | |||
4241 | return false; | |||
4242 | ||||
4243 | if (tryParseZTOperand(Operands) == MatchOperand_Success) | |||
4244 | return false; | |||
4245 | ||||
4246 | // Otherwise try for a scalar register. | |||
4247 | if (tryParseGPROperand<false>(Operands) == MatchOperand_Success) | |||
4248 | return false; | |||
4249 | ||||
4250 | return true; | |||
4251 | } | |||
4252 | ||||
4253 | bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { | |||
4254 | bool HasELFModifier = false; | |||
4255 | AArch64MCExpr::VariantKind RefKind; | |||
4256 | ||||
4257 | if (parseOptionalToken(AsmToken::Colon)) { | |||
4258 | HasELFModifier = true; | |||
4259 | ||||
4260 | if (getTok().isNot(AsmToken::Identifier)) | |||
4261 | return TokError("expect relocation specifier in operand after ':'"); | |||
4262 | ||||
4263 | std::string LowerCase = getTok().getIdentifier().lower(); | |||
4264 | RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) | |||
4265 | .Case("lo12", AArch64MCExpr::VK_LO12) | |||
4266 | .Case("abs_g3", AArch64MCExpr::VK_ABS_G3) | |||
4267 | .Case("abs_g2", AArch64MCExpr::VK_ABS_G2) | |||
4268 | .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S) | |||
4269 | .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC) | |||
4270 | .Case("abs_g1", AArch64MCExpr::VK_ABS_G1) | |||
4271 | .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S) | |||
4272 | .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC) | |||
4273 | .Case("abs_g0", AArch64MCExpr::VK_ABS_G0) | |||
4274 | .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S) | |||
4275 | .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC) | |||
4276 | .Case("prel_g3", AArch64MCExpr::VK_PREL_G3) | |||
4277 | .Case("prel_g2", AArch64MCExpr::VK_PREL_G2) | |||
4278 | .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC) | |||
4279 | .Case("prel_g1", AArch64MCExpr::VK_PREL_G1) | |||
4280 | .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC) | |||
4281 | .Case("prel_g0", AArch64MCExpr::VK_PREL_G0) | |||
4282 | .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC) | |||
4283 | .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2) | |||
4284 | .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1) | |||
4285 | .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC) | |||
4286 | .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0) | |||
4287 | .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC) | |||
4288 | .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12) | |||
4289 | .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12) | |||
4290 | .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC) | |||
4291 | .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC) | |||
4292 | .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2) | |||
4293 | .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1) | |||
4294 | .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC) | |||
4295 | .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0) | |||
4296 | .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC) | |||
4297 | .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12) | |||
4298 | .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12) | |||
4299 | .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC) | |||
4300 | .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12) | |||
4301 | .Case("got", AArch64MCExpr::VK_GOT_PAGE) | |||
4302 | .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15) | |||
4303 | .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12) | |||
4304 | .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE) | |||
4305 | .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC) | |||
4306 | .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) | |||
4307 | .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) | |||
4308 | .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) | |||
4309 | .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12) | |||
4310 | .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12) | |||
4311 | .Default(AArch64MCExpr::VK_INVALID); | |||
4312 | ||||
4313 | if (RefKind == AArch64MCExpr::VK_INVALID) | |||
4314 | return TokError("expect relocation specifier in operand after ':'"); | |||
4315 | ||||
4316 | Lex(); // Eat identifier | |||
4317 | ||||
4318 | if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier")) | |||
4319 | return true; | |||
4320 | } | |||
4321 | ||||
4322 | if (getParser().parseExpression(ImmVal)) | |||
4323 | return true; | |||
4324 | ||||
4325 | if (HasELFModifier) | |||
4326 | ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext()); | |||
4327 | ||||
4328 | return false; | |||
4329 | } | |||
4330 | ||||
4331 | OperandMatchResultTy | |||
4332 | AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) { | |||
4333 | if (getTok().isNot(AsmToken::LCurly)) | |||
4334 | return MatchOperand_NoMatch; | |||
4335 | ||||
4336 | auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) { | |||
4337 | StringRef Name = getTok().getString(); | |||
4338 | size_t DotPosition = Name.find('.'); | |||
4339 | if (DotPosition == StringRef::npos) | |||
4340 | return MatchOperand_NoMatch; | |||
4341 | ||||
4342 | unsigned RegNum = matchMatrixTileListRegName(Name); | |||
4343 | if (!RegNum) | |||
4344 | return MatchOperand_NoMatch; | |||
4345 | ||||
4346 | StringRef Tail = Name.drop_front(DotPosition); | |||
4347 | const std::optional<std::pair<int, int>> &KindRes = | |||
4348 | parseVectorKind(Tail, RegKind::Matrix); | |||
4349 | if (!KindRes) { | |||
4350 | TokError("Expected the register to be followed by element width suffix"); | |||
4351 | return MatchOperand_ParseFail; | |||
4352 | } | |||
4353 | ElementWidth = KindRes->second; | |||
4354 | Reg = RegNum; | |||
4355 | Lex(); // Eat the register. | |||
4356 | return MatchOperand_Success; | |||
4357 | }; | |||
4358 | ||||
4359 | SMLoc S = getLoc(); | |||
4360 | auto LCurly = getTok(); | |||
4361 | Lex(); // Eat left bracket token. | |||
4362 | ||||
4363 | // Empty matrix list | |||
4364 | if (parseOptionalToken(AsmToken::RCurly)) { | |||
4365 | Operands.push_back(AArch64Operand::CreateMatrixTileList( | |||
4366 | /*RegMask=*/0, S, getLoc(), getContext())); | |||
4367 | return MatchOperand_Success; | |||
4368 | } | |||
4369 | ||||
4370 | // Try parse {za} alias early | |||
4371 | if (getTok().getString().equals_insensitive("za")) { | |||
4372 | Lex(); // Eat 'za' | |||
4373 | ||||
4374 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
4375 | return MatchOperand_ParseFail; | |||
4376 | ||||
4377 | Operands.push_back(AArch64Operand::CreateMatrixTileList( | |||
4378 | /*RegMask=*/0xFF, S, getLoc(), getContext())); | |||
4379 | return MatchOperand_Success; | |||
4380 | } | |||
4381 | ||||
4382 | SMLoc TileLoc = getLoc(); | |||
4383 | ||||
4384 | unsigned FirstReg, ElementWidth; | |||
4385 | auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth); | |||
4386 | if (ParseRes != MatchOperand_Success) { | |||
4387 | getLexer().UnLex(LCurly); | |||
4388 | return ParseRes; | |||
4389 | } | |||
4390 | ||||
4391 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
4392 | ||||
4393 | unsigned PrevReg = FirstReg; | |||
4394 | ||||
4395 | SmallSet<unsigned, 8> DRegs; | |||
4396 | AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth); | |||
4397 | ||||
4398 | SmallSet<unsigned, 8> SeenRegs; | |||
4399 | SeenRegs.insert(FirstReg); | |||
4400 | ||||
4401 | while (parseOptionalToken(AsmToken::Comma)) { | |||
4402 | TileLoc = getLoc(); | |||
4403 | unsigned Reg, NextElementWidth; | |||
4404 | ParseRes = ParseMatrixTile(Reg, NextElementWidth); | |||
4405 | if (ParseRes != MatchOperand_Success) | |||
4406 | return ParseRes; | |||
4407 | ||||
4408 | // Element size must match on all regs in the list. | |||
4409 | if (ElementWidth != NextElementWidth) { | |||
4410 | Error(TileLoc, "mismatched register size suffix"); | |||
4411 | return MatchOperand_ParseFail; | |||
4412 | } | |||
4413 | ||||
4414 | if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg))) | |||
4415 | Warning(TileLoc, "tile list not in ascending order"); | |||
4416 | ||||
4417 | if (SeenRegs.contains(Reg)) | |||
4418 | Warning(TileLoc, "duplicate tile in list"); | |||
4419 | else { | |||
4420 | SeenRegs.insert(Reg); | |||
4421 | AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth); | |||
4422 | } | |||
4423 | ||||
4424 | PrevReg = Reg; | |||
4425 | } | |||
4426 | ||||
4427 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
4428 | return MatchOperand_ParseFail; | |||
4429 | ||||
4430 | unsigned RegMask = 0; | |||
4431 | for (auto Reg : DRegs) | |||
4432 | RegMask |= 0x1 << (RI->getEncodingValue(Reg) - | |||
4433 | RI->getEncodingValue(AArch64::ZAD0)); | |||
4434 | Operands.push_back( | |||
4435 | AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext())); | |||
4436 | ||||
4437 | return MatchOperand_Success; | |||
4438 | } | |||
4439 | ||||
4440 | template <RegKind VectorKind> | |||
4441 | OperandMatchResultTy | |||
4442 | AArch64AsmParser::tryParseVectorList(OperandVector &Operands, | |||
4443 | bool ExpectMatch) { | |||
4444 | MCAsmParser &Parser = getParser(); | |||
4445 | if (!getTok().is(AsmToken::LCurly)) | |||
4446 | return MatchOperand_NoMatch; | |||
4447 | ||||
4448 | // Wrapper around parse function | |||
4449 | auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc, | |||
4450 | bool NoMatchIsError) { | |||
4451 | auto RegTok = getTok(); | |||
4452 | auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind); | |||
4453 | if (ParseRes == MatchOperand_Success) { | |||
4454 | if (parseVectorKind(Kind, VectorKind)) | |||
4455 | return ParseRes; | |||
4456 | llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4456 ); | |||
4457 | } | |||
4458 | ||||
4459 | if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch && | |||
4460 | RegTok.getString().equals_insensitive("zt0")) | |||
4461 | return MatchOperand_NoMatch; | |||
4462 | ||||
4463 | if (RegTok.isNot(AsmToken::Identifier) || | |||
4464 | ParseRes == MatchOperand_ParseFail || | |||
4465 | (ParseRes == MatchOperand_NoMatch && NoMatchIsError && | |||
4466 | !RegTok.getString().startswith_insensitive("za"))) { | |||
4467 | Error(Loc, "vector register expected"); | |||
4468 | return MatchOperand_ParseFail; | |||
4469 | } | |||
4470 | ||||
4471 | return MatchOperand_NoMatch; | |||
4472 | }; | |||
4473 | ||||
4474 | int NumRegs = getNumRegsForRegKind(VectorKind); | |||
4475 | SMLoc S = getLoc(); | |||
4476 | auto LCurly = getTok(); | |||
4477 | Lex(); // Eat left bracket token. | |||
4478 | ||||
4479 | StringRef Kind; | |||
4480 | MCRegister FirstReg; | |||
4481 | auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); | |||
4482 | ||||
4483 | // Put back the original left bracket if there was no match, so that | |||
4484 | // different types of list-operands can be matched (e.g. SVE, Neon). | |||
4485 | if (ParseRes == MatchOperand_NoMatch) | |||
4486 | Parser.getLexer().UnLex(LCurly); | |||
4487 | ||||
4488 | if (ParseRes != MatchOperand_Success) | |||
4489 | return ParseRes; | |||
4490 | ||||
4491 | int64_t PrevReg = FirstReg; | |||
4492 | unsigned Count = 1; | |||
4493 | ||||
4494 | int Stride = 1; | |||
4495 | if (parseOptionalToken(AsmToken::Minus)) { | |||
4496 | SMLoc Loc = getLoc(); | |||
4497 | StringRef NextKind; | |||
4498 | ||||
4499 | MCRegister Reg; | |||
4500 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); | |||
4501 | if (ParseRes != MatchOperand_Success) | |||
4502 | return ParseRes; | |||
4503 | ||||
4504 | // Any Kind suffices must match on all regs in the list. | |||
4505 | if (Kind != NextKind) { | |||
4506 | Error(Loc, "mismatched register size suffix"); | |||
4507 | return MatchOperand_ParseFail; | |||
4508 | } | |||
4509 | ||||
4510 | unsigned Space = | |||
4511 | (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg); | |||
4512 | ||||
4513 | if (Space == 0 || Space > 3) { | |||
4514 | Error(Loc, "invalid number of vectors"); | |||
4515 | return MatchOperand_ParseFail; | |||
4516 | } | |||
4517 | ||||
4518 | Count += Space; | |||
4519 | } | |||
4520 | else { | |||
4521 | bool HasCalculatedStride = false; | |||
4522 | while (parseOptionalToken(AsmToken::Comma)) { | |||
4523 | SMLoc Loc = getLoc(); | |||
4524 | StringRef NextKind; | |||
4525 | MCRegister Reg; | |||
4526 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); | |||
4527 | if (ParseRes != MatchOperand_Success) | |||
4528 | return ParseRes; | |||
4529 | ||||
4530 | // Any Kind suffices must match on all regs in the list. | |||
4531 | if (Kind != NextKind) { | |||
4532 | Error(Loc, "mismatched register size suffix"); | |||
4533 | return MatchOperand_ParseFail; | |||
4534 | } | |||
4535 | ||||
4536 | unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg); | |||
4537 | unsigned PrevRegVal = | |||
4538 | getContext().getRegisterInfo()->getEncodingValue(PrevReg); | |||
4539 | if (!HasCalculatedStride) { | |||
4540 | Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal) | |||
4541 | : (RegVal + NumRegs - PrevRegVal); | |||
4542 | HasCalculatedStride = true; | |||
4543 | } | |||
4544 | ||||
4545 | // Register must be incremental (with a wraparound at last register). | |||
4546 | if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) { | |||
4547 | Error(Loc, "registers must have the same sequential stride"); | |||
4548 | return MatchOperand_ParseFail; | |||
4549 | } | |||
4550 | ||||
4551 | PrevReg = Reg; | |||
4552 | ++Count; | |||
4553 | } | |||
4554 | } | |||
4555 | ||||
4556 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
4557 | return MatchOperand_ParseFail; | |||
4558 | ||||
4559 | if (Count > 4) { | |||
4560 | Error(S, "invalid number of vectors"); | |||
4561 | return MatchOperand_ParseFail; | |||
4562 | } | |||
4563 | ||||
4564 | unsigned NumElements = 0; | |||
4565 | unsigned ElementWidth = 0; | |||
4566 | if (!Kind.empty()) { | |||
4567 | if (const auto &VK = parseVectorKind(Kind, VectorKind)) | |||
4568 | std::tie(NumElements, ElementWidth) = *VK; | |||
4569 | } | |||
4570 | ||||
4571 | Operands.push_back(AArch64Operand::CreateVectorList( | |||
4572 | FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S, | |||
4573 | getLoc(), getContext())); | |||
4574 | ||||
4575 | return MatchOperand_Success; | |||
4576 | } | |||
4577 | ||||
4578 | /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. | |||
4579 | bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { | |||
4580 | auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true); | |||
4581 | if (ParseRes != MatchOperand_Success) | |||
4582 | return true; | |||
4583 | ||||
4584 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; | |||
4585 | } | |||
4586 | ||||
4587 | OperandMatchResultTy | |||
4588 | AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { | |||
4589 | SMLoc StartLoc = getLoc(); | |||
4590 | ||||
4591 | MCRegister RegNum; | |||
4592 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); | |||
4593 | if (Res != MatchOperand_Success) | |||
4594 | return Res; | |||
4595 | ||||
4596 | if (!parseOptionalToken(AsmToken::Comma)) { | |||
4597 | Operands.push_back(AArch64Operand::CreateReg( | |||
4598 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
4599 | return MatchOperand_Success; | |||
4600 | } | |||
4601 | ||||
4602 | parseOptionalToken(AsmToken::Hash); | |||
4603 | ||||
4604 | if (getTok().isNot(AsmToken::Integer)) { | |||
4605 | Error(getLoc(), "index must be absent or #0"); | |||
4606 | return MatchOperand_ParseFail; | |||
4607 | } | |||
4608 | ||||
4609 | const MCExpr *ImmVal; | |||
4610 | if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || | |||
4611 | cast<MCConstantExpr>(ImmVal)->getValue() != 0) { | |||
4612 | Error(getLoc(), "index must be absent or #0"); | |||
4613 | return MatchOperand_ParseFail; | |||
4614 | } | |||
4615 | ||||
4616 | Operands.push_back(AArch64Operand::CreateReg( | |||
4617 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
4618 | return MatchOperand_Success; | |||
4619 | } | |||
4620 | ||||
4621 | OperandMatchResultTy | |||
4622 | AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) { | |||
4623 | SMLoc StartLoc = getLoc(); | |||
4624 | const AsmToken &Tok = getTok(); | |||
4625 | std::string Name = Tok.getString().lower(); | |||
4626 | ||||
4627 | unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable); | |||
4628 | ||||
4629 | if (RegNum == 0) | |||
4630 | return MatchOperand_NoMatch; | |||
4631 | ||||
4632 | Operands.push_back(AArch64Operand::CreateReg( | |||
4633 | RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext())); | |||
4634 | Lex(); // Eat identifier token. | |||
4635 | ||||
4636 | // Check if register is followed by an index | |||
4637 | if (parseOptionalToken(AsmToken::LBrac)) { | |||
4638 | const MCExpr *ImmVal; | |||
4639 | if (getParser().parseExpression(ImmVal)) | |||
4640 | return MatchOperand_NoMatch; | |||
4641 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
4642 | if (!MCE) { | |||
4643 | TokError("immediate value expected for vector index"); | |||
4644 | return MatchOperand_ParseFail; | |||
4645 | } | |||
4646 | if (parseToken(AsmToken::RBrac, "']' expected")) | |||
4647 | return MatchOperand_ParseFail; | |||
4648 | ||||
4649 | Operands.push_back(AArch64Operand::CreateImm( | |||
4650 | MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc, | |||
4651 | getLoc(), getContext())); | |||
4652 | } | |||
4653 | ||||
4654 | return MatchOperand_Success; | |||
4655 | } | |||
4656 | ||||
4657 | template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> | |||
4658 | OperandMatchResultTy | |||
4659 | AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { | |||
4660 | SMLoc StartLoc = getLoc(); | |||
4661 | ||||
4662 | MCRegister RegNum; | |||
4663 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); | |||
4664 | if (Res != MatchOperand_Success) | |||
4665 | return Res; | |||
4666 | ||||
4667 | // No shift/extend is the default. | |||
4668 | if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) { | |||
4669 | Operands.push_back(AArch64Operand::CreateReg( | |||
4670 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); | |||
4671 | return MatchOperand_Success; | |||
4672 | } | |||
4673 | ||||
4674 | // Eat the comma | |||
4675 | Lex(); | |||
4676 | ||||
4677 | // Match the shift | |||
4678 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; | |||
4679 | Res = tryParseOptionalShiftExtend(ExtOpnd); | |||
4680 | if (Res != MatchOperand_Success) | |||
4681 | return Res; | |||
4682 | ||||
4683 | auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); | |||
4684 | Operands.push_back(AArch64Operand::CreateReg( | |||
4685 | RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, | |||
4686 | Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), | |||
4687 | Ext->hasShiftExtendAmount())); | |||
4688 | ||||
4689 | return MatchOperand_Success; | |||
4690 | } | |||
4691 | ||||
4692 | bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { | |||
4693 | MCAsmParser &Parser = getParser(); | |||
4694 | ||||
4695 | // Some SVE instructions have a decoration after the immediate, i.e. | |||
4696 | // "mul vl". We parse them here and add tokens, which must be present in the | |||
4697 | // asm string in the tablegen instruction. | |||
4698 | bool NextIsVL = | |||
4699 | Parser.getLexer().peekTok().getString().equals_insensitive("vl"); | |||
4700 | bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash); | |||
4701 | if (!getTok().getString().equals_insensitive("mul") || | |||
4702 | !(NextIsVL || NextIsHash)) | |||
4703 | return true; | |||
4704 | ||||
4705 | Operands.push_back( | |||
4706 | AArch64Operand::CreateToken("mul", getLoc(), getContext())); | |||
4707 | Lex(); // Eat the "mul" | |||
4708 | ||||
4709 | if (NextIsVL) { | |||
4710 | Operands.push_back( | |||
4711 | AArch64Operand::CreateToken("vl", getLoc(), getContext())); | |||
4712 | Lex(); // Eat the "vl" | |||
4713 | return false; | |||
4714 | } | |||
4715 | ||||
4716 | if (NextIsHash) { | |||
4717 | Lex(); // Eat the # | |||
4718 | SMLoc S = getLoc(); | |||
4719 | ||||
4720 | // Parse immediate operand. | |||
4721 | const MCExpr *ImmVal; | |||
4722 | if (!Parser.parseExpression(ImmVal)) | |||
4723 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) { | |||
4724 | Operands.push_back(AArch64Operand::CreateImm( | |||
4725 | MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(), | |||
4726 | getContext())); | |||
4727 | return MatchOperand_Success; | |||
4728 | } | |||
4729 | } | |||
4730 | ||||
4731 | return Error(getLoc(), "expected 'vl' or '#<imm>'"); | |||
4732 | } | |||
4733 | ||||
4734 | bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands, | |||
4735 | StringRef &VecGroup) { | |||
4736 | MCAsmParser &Parser = getParser(); | |||
4737 | auto Tok = Parser.getTok(); | |||
4738 | if (Tok.isNot(AsmToken::Identifier)) | |||
4739 | return true; | |||
4740 | ||||
4741 | StringRef VG = StringSwitch<StringRef>(Tok.getString().lower()) | |||
4742 | .Case("vgx2", "vgx2") | |||
4743 | .Case("vgx4", "vgx4") | |||
4744 | .Default(""); | |||
4745 | ||||
4746 | if (VG.empty()) | |||
4747 | return true; | |||
4748 | ||||
4749 | VecGroup = VG; | |||
4750 | Parser.Lex(); // Eat vgx[2|4] | |||
4751 | return false; | |||
4752 | } | |||
4753 | ||||
4754 | bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) { | |||
4755 | auto Tok = getTok(); | |||
4756 | if (Tok.isNot(AsmToken::Identifier)) | |||
4757 | return true; | |||
4758 | ||||
4759 | auto Keyword = Tok.getString(); | |||
4760 | Keyword = StringSwitch<StringRef>(Keyword.lower()) | |||
4761 | .Case("sm", "sm") | |||
4762 | .Case("za", "za") | |||
4763 | .Default(Keyword); | |||
4764 | Operands.push_back( | |||
4765 | AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext())); | |||
4766 | ||||
4767 | Lex(); | |||
4768 | return false; | |||
4769 | } | |||
4770 | ||||
4771 | /// parseOperand - Parse a arm instruction operand. For now this parses the | |||
4772 | /// operand regardless of the mnemonic. | |||
4773 | bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, | |||
4774 | bool invertCondCode) { | |||
4775 | MCAsmParser &Parser = getParser(); | |||
4776 | ||||
4777 | OperandMatchResultTy ResTy = | |||
4778 | MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true); | |||
4779 | ||||
4780 | // Check if the current operand has a custom associated parser, if so, try to | |||
4781 | // custom parse the operand, or fallback to the general approach. | |||
4782 | if (ResTy == MatchOperand_Success) | |||
4783 | return false; | |||
4784 | // If there wasn't a custom match, try the generic matcher below. Otherwise, | |||
4785 | // there was a match, but an error occurred, in which case, just return that | |||
4786 | // the operand parsing failed. | |||
4787 | if (ResTy == MatchOperand_ParseFail) | |||
4788 | return true; | |||
4789 | ||||
4790 | // Nothing custom, so do general case parsing. | |||
4791 | SMLoc S, E; | |||
4792 | switch (getLexer().getKind()) { | |||
4793 | default: { | |||
4794 | SMLoc S = getLoc(); | |||
4795 | const MCExpr *Expr; | |||
4796 | if (parseSymbolicImmVal(Expr)) | |||
4797 | return Error(S, "invalid operand"); | |||
4798 | ||||
4799 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
4800 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
4801 | return false; | |||
4802 | } | |||
4803 | case AsmToken::LBrac: { | |||
4804 | Operands.push_back( | |||
4805 | AArch64Operand::CreateToken("[", getLoc(), getContext())); | |||
4806 | Lex(); // Eat '[' | |||
4807 | ||||
4808 | // There's no comma after a '[', so we can parse the next operand | |||
4809 | // immediately. | |||
4810 | return parseOperand(Operands, false, false); | |||
4811 | } | |||
4812 | case AsmToken::LCurly: { | |||
4813 | if (!parseNeonVectorList(Operands)) | |||
4814 | return false; | |||
4815 | ||||
4816 | Operands.push_back( | |||
4817 | AArch64Operand::CreateToken("{", getLoc(), getContext())); | |||
4818 | Lex(); // Eat '{' | |||
4819 | ||||
4820 | // There's no comma after a '{', so we can parse the next operand | |||
4821 | // immediately. | |||
4822 | return parseOperand(Operands, false, false); | |||
4823 | } | |||
4824 | case AsmToken::Identifier: { | |||
4825 | // See if this is a "VG" decoration used by SME instructions. | |||
4826 | StringRef VecGroup; | |||
4827 | if (!parseOptionalVGOperand(Operands, VecGroup)) { | |||
4828 | Operands.push_back( | |||
4829 | AArch64Operand::CreateToken(VecGroup, getLoc(), getContext())); | |||
4830 | return false; | |||
4831 | } | |||
4832 | // If we're expecting a Condition Code operand, then just parse that. | |||
4833 | if (isCondCode) | |||
4834 | return parseCondCode(Operands, invertCondCode); | |||
4835 | ||||
4836 | // If it's a register name, parse it. | |||
4837 | if (!parseRegister(Operands)) | |||
4838 | return false; | |||
4839 | ||||
4840 | // See if this is a "mul vl" decoration or "mul #<int>" operand used | |||
4841 | // by SVE instructions. | |||
4842 | if (!parseOptionalMulOperand(Operands)) | |||
4843 | return false; | |||
4844 | ||||
4845 | // This could be an optional "shift" or "extend" operand. | |||
4846 | OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); | |||
4847 | // We can only continue if no tokens were eaten. | |||
4848 | if (GotShift != MatchOperand_NoMatch) | |||
4849 | return GotShift; | |||
4850 | ||||
4851 | // If this is a two-word mnemonic, parse its special keyword | |||
4852 | // operand as an identifier. | |||
4853 | if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" || | |||
4854 | Mnemonic == "gcsb") | |||
4855 | return parseKeywordOperand(Operands); | |||
4856 | ||||
4857 | // This was not a register so parse other operands that start with an | |||
4858 | // identifier (like labels) as expressions and create them as immediates. | |||
4859 | const MCExpr *IdVal; | |||
4860 | S = getLoc(); | |||
4861 | if (getParser().parseExpression(IdVal)) | |||
4862 | return true; | |||
4863 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
4864 | Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext())); | |||
4865 | return false; | |||
4866 | } | |||
4867 | case AsmToken::Integer: | |||
4868 | case AsmToken::Real: | |||
4869 | case AsmToken::Hash: { | |||
4870 | // #42 -> immediate. | |||
4871 | S = getLoc(); | |||
4872 | ||||
4873 | parseOptionalToken(AsmToken::Hash); | |||
4874 | ||||
4875 | // Parse a negative sign | |||
4876 | bool isNegative = false; | |||
4877 | if (getTok().is(AsmToken::Minus)) { | |||
4878 | isNegative = true; | |||
4879 | // We need to consume this token only when we have a Real, otherwise | |||
4880 | // we let parseSymbolicImmVal take care of it | |||
4881 | if (Parser.getLexer().peekTok().is(AsmToken::Real)) | |||
4882 | Lex(); | |||
4883 | } | |||
4884 | ||||
4885 | // The only Real that should come through here is a literal #0.0 for | |||
4886 | // the fcmp[e] r, #0.0 instructions. They expect raw token operands, | |||
4887 | // so convert the value. | |||
4888 | const AsmToken &Tok = getTok(); | |||
4889 | if (Tok.is(AsmToken::Real)) { | |||
4890 | APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); | |||
4891 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); | |||
4892 | if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && | |||
4893 | Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && | |||
4894 | Mnemonic != "fcmlt" && Mnemonic != "fcmne") | |||
4895 | return TokError("unexpected floating point literal"); | |||
4896 | else if (IntVal != 0 || isNegative) | |||
4897 | return TokError("expected floating-point constant #0.0"); | |||
4898 | Lex(); // Eat the token. | |||
4899 | ||||
4900 | Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext())); | |||
4901 | Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext())); | |||
4902 | return false; | |||
4903 | } | |||
4904 | ||||
4905 | const MCExpr *ImmVal; | |||
4906 | if (parseSymbolicImmVal(ImmVal)) | |||
4907 | return true; | |||
4908 | ||||
4909 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
4910 | Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext())); | |||
4911 | return false; | |||
4912 | } | |||
4913 | case AsmToken::Equal: { | |||
4914 | SMLoc Loc = getLoc(); | |||
4915 | if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) | |||
4916 | return TokError("unexpected token in operand"); | |||
4917 | Lex(); // Eat '=' | |||
4918 | const MCExpr *SubExprVal; | |||
4919 | if (getParser().parseExpression(SubExprVal)) | |||
4920 | return true; | |||
4921 | ||||
4922 | if (Operands.size() < 2 || | |||
4923 | !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) | |||
4924 | return Error(Loc, "Only valid when first operand is register"); | |||
4925 | ||||
4926 | bool IsXReg = | |||
4927 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
4928 | Operands[1]->getReg()); | |||
4929 | ||||
4930 | MCContext& Ctx = getContext(); | |||
4931 | E = SMLoc::getFromPointer(Loc.getPointer() - 1); | |||
4932 | // If the op is an imm and can be fit into a mov, then replace ldr with mov. | |||
4933 | if (isa<MCConstantExpr>(SubExprVal)) { | |||
4934 | uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue(); | |||
4935 | uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; | |||
4936 | while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) { | |||
4937 | ShiftAmt += 16; | |||
4938 | Imm >>= 16; | |||
4939 | } | |||
4940 | if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { | |||
4941 | Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx); | |||
4942 | Operands.push_back(AArch64Operand::CreateImm( | |||
4943 | MCConstantExpr::create(Imm, Ctx), S, E, Ctx)); | |||
4944 | if (ShiftAmt) | |||
4945 | Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL, | |||
4946 | ShiftAmt, true, S, E, Ctx)); | |||
4947 | return false; | |||
4948 | } | |||
4949 | APInt Simm = APInt(64, Imm << ShiftAmt); | |||
4950 | // check if the immediate is an unsigned or signed 32-bit int for W regs | |||
4951 | if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32))) | |||
4952 | return Error(Loc, "Immediate too large for register"); | |||
4953 | } | |||
4954 | // If it is a label or an imm that cannot fit in a movz, put it into CP. | |||
4955 | const MCExpr *CPLoc = | |||
4956 | getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc); | |||
4957 | Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx)); | |||
4958 | return false; | |||
4959 | } | |||
4960 | } | |||
4961 | } | |||
4962 | ||||
4963 | bool AArch64AsmParser::parseImmExpr(int64_t &Out) { | |||
4964 | const MCExpr *Expr = nullptr; | |||
4965 | SMLoc L = getLoc(); | |||
4966 | if (check(getParser().parseExpression(Expr), L, "expected expression")) | |||
4967 | return true; | |||
4968 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); | |||
4969 | if (check(!Value, L, "expected constant expression")) | |||
4970 | return true; | |||
4971 | Out = Value->getValue(); | |||
| ||||
4972 | return false; | |||
4973 | } | |||
4974 | ||||
4975 | bool AArch64AsmParser::parseComma() { | |||
4976 | if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma")) | |||
4977 | return true; | |||
4978 | // Eat the comma | |||
4979 | Lex(); | |||
4980 | return false; | |||
4981 | } | |||
4982 | ||||
4983 | bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base, | |||
4984 | unsigned First, unsigned Last) { | |||
4985 | MCRegister Reg; | |||
4986 | SMLoc Start, End; | |||
4987 | if (check(parseRegister(Reg, Start, End), getLoc(), "expected register")) | |||
4988 | return true; | |||
4989 | ||||
4990 | // Special handling for FP and LR; they aren't linearly after x28 in | |||
4991 | // the registers enum. | |||
4992 | unsigned RangeEnd = Last; | |||
4993 | if (Base == AArch64::X0) { | |||
4994 | if (Last == AArch64::FP) { | |||
4995 | RangeEnd = AArch64::X28; | |||
4996 | if (Reg == AArch64::FP) { | |||
4997 | Out = 29; | |||
4998 | return false; | |||
4999 | } | |||
5000 | } | |||
5001 | if (Last == AArch64::LR) { | |||
5002 | RangeEnd = AArch64::X28; | |||
5003 | if (Reg == AArch64::FP) { | |||
5004 | Out = 29; | |||
5005 | return false; | |||
5006 | } else if (Reg == AArch64::LR) { | |||
5007 | Out = 30; | |||
5008 | return false; | |||
5009 | } | |||
5010 | } | |||
5011 | } | |||
5012 | ||||
5013 | if (check(Reg < First || Reg > RangeEnd, Start, | |||
5014 | Twine("expected register in range ") + | |||
5015 | AArch64InstPrinter::getRegisterName(First) + " to " + | |||
5016 | AArch64InstPrinter::getRegisterName(Last))) | |||
5017 | return true; | |||
5018 | Out = Reg - Base; | |||
5019 | return false; | |||
5020 | } | |||
5021 | ||||
5022 | bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1, | |||
5023 | const MCParsedAsmOperand &Op2) const { | |||
5024 | auto &AOp1 = static_cast<const AArch64Operand&>(Op1); | |||
5025 | auto &AOp2 = static_cast<const AArch64Operand&>(Op2); | |||
5026 | ||||
5027 | if (AOp1.isVectorList() && AOp2.isVectorList()) | |||
5028 | return AOp1.getVectorListCount() == AOp2.getVectorListCount() && | |||
5029 | AOp1.getVectorListStart() == AOp2.getVectorListStart() && | |||
5030 | AOp1.getVectorListStride() == AOp2.getVectorListStride(); | |||
5031 | ||||
5032 | if (!AOp1.isReg() || !AOp2.isReg()) | |||
5033 | return false; | |||
5034 | ||||
5035 | if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && | |||
5036 | AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) | |||
5037 | return MCTargetAsmParser::areEqualRegs(Op1, Op2); | |||
5038 | ||||
5039 | assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2 .isScalarReg() && "Testing equality of non-scalar registers not supported" ) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5040 , __extension__ __PRETTY_FUNCTION__)) | |||
5040 | "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2 .isScalarReg() && "Testing equality of non-scalar registers not supported" ) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5040 , __extension__ __PRETTY_FUNCTION__)); | |||
5041 | ||||
5042 | // Check if a registers match their sub/super register classes. | |||
5043 | if (AOp1.getRegEqualityTy() == EqualsSuperReg) | |||
5044 | return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); | |||
5045 | if (AOp1.getRegEqualityTy() == EqualsSubReg) | |||
5046 | return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); | |||
5047 | if (AOp2.getRegEqualityTy() == EqualsSuperReg) | |||
5048 | return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); | |||
5049 | if (AOp2.getRegEqualityTy() == EqualsSubReg) | |||
5050 | return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); | |||
5051 | ||||
5052 | return false; | |||
5053 | } | |||
5054 | ||||
5055 | /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its | |||
5056 | /// operands. | |||
5057 | bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, | |||
5058 | StringRef Name, SMLoc NameLoc, | |||
5059 | OperandVector &Operands) { | |||
5060 | Name = StringSwitch<StringRef>(Name.lower()) | |||
5061 | .Case("beq", "b.eq") | |||
5062 | .Case("bne", "b.ne") | |||
5063 | .Case("bhs", "b.hs") | |||
5064 | .Case("bcs", "b.cs") | |||
5065 | .Case("blo", "b.lo") | |||
5066 | .Case("bcc", "b.cc") | |||
5067 | .Case("bmi", "b.mi") | |||
5068 | .Case("bpl", "b.pl") | |||
5069 | .Case("bvs", "b.vs") | |||
5070 | .Case("bvc", "b.vc") | |||
5071 | .Case("bhi", "b.hi") | |||
5072 | .Case("bls", "b.ls") | |||
5073 | .Case("bge", "b.ge") | |||
5074 | .Case("blt", "b.lt") | |||
5075 | .Case("bgt", "b.gt") | |||
5076 | .Case("ble", "b.le") | |||
5077 | .Case("bal", "b.al") | |||
5078 | .Case("bnv", "b.nv") | |||
5079 | .Default(Name); | |||
5080 | ||||
5081 | // First check for the AArch64-specific .req directive. | |||
5082 | if (getTok().is(AsmToken::Identifier) && | |||
5083 | getTok().getIdentifier().lower() == ".req") { | |||
5084 | parseDirectiveReq(Name, NameLoc); | |||
5085 | // We always return 'error' for this, as we're done with this | |||
5086 | // statement and don't need to match the 'instruction." | |||
5087 | return true; | |||
5088 | } | |||
5089 | ||||
5090 | // Create the leading tokens for the mnemonic, split by '.' characters. | |||
5091 | size_t Start = 0, Next = Name.find('.'); | |||
5092 | StringRef Head = Name.slice(Start, Next); | |||
5093 | ||||
5094 | // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for | |||
5095 | // the SYS instruction. | |||
5096 | if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" || | |||
5097 | Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp") | |||
5098 | return parseSysAlias(Head, NameLoc, Operands); | |||
5099 | ||||
5100 | // TLBIP instructions are aliases for the SYSP instruction. | |||
5101 | if (Head == "tlbip") | |||
5102 | return parseSyspAlias(Head, NameLoc, Operands); | |||
5103 | ||||
5104 | Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext())); | |||
5105 | Mnemonic = Head; | |||
5106 | ||||
5107 | // Handle condition codes for a branch mnemonic | |||
5108 | if ((Head == "b" || Head == "bc") && Next != StringRef::npos) { | |||
5109 | Start = Next; | |||
5110 | Next = Name.find('.', Start + 1); | |||
5111 | Head = Name.slice(Start + 1, Next); | |||
5112 | ||||
5113 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
5114 | (Head.data() - Name.data())); | |||
5115 | std::string Suggestion; | |||
5116 | AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion); | |||
5117 | if (CC == AArch64CC::Invalid) { | |||
5118 | std::string Msg = "invalid condition code"; | |||
5119 | if (!Suggestion.empty()) | |||
5120 | Msg += ", did you mean " + Suggestion + "?"; | |||
5121 | return Error(SuffixLoc, Msg); | |||
5122 | } | |||
5123 | Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(), | |||
5124 | /*IsSuffix=*/true)); | |||
5125 | Operands.push_back( | |||
5126 | AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext())); | |||
5127 | } | |||
5128 | ||||
5129 | // Add the remaining tokens in the mnemonic. | |||
5130 | while (Next != StringRef::npos) { | |||
5131 | Start = Next; | |||
5132 | Next = Name.find('.', Start + 1); | |||
5133 | Head = Name.slice(Start, Next); | |||
5134 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
5135 | (Head.data() - Name.data()) + 1); | |||
5136 | Operands.push_back(AArch64Operand::CreateToken( | |||
5137 | Head, SuffixLoc, getContext(), /*IsSuffix=*/true)); | |||
5138 | } | |||
5139 | ||||
5140 | // Conditional compare instructions have a Condition Code operand, which needs | |||
5141 | // to be parsed and an immediate operand created. | |||
5142 | bool condCodeFourthOperand = | |||
5143 | (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || | |||
5144 | Head == "fccmpe" || Head == "fcsel" || Head == "csel" || | |||
5145 | Head == "csinc" || Head == "csinv" || Head == "csneg"); | |||
5146 | ||||
5147 | // These instructions are aliases to some of the conditional select | |||
5148 | // instructions. However, the condition code is inverted in the aliased | |||
5149 | // instruction. | |||
5150 | // | |||
5151 | // FIXME: Is this the correct way to handle these? Or should the parser | |||
5152 | // generate the aliased instructions directly? | |||
5153 | bool condCodeSecondOperand = (Head == "cset" || Head == "csetm"); | |||
5154 | bool condCodeThirdOperand = | |||
5155 | (Head == "cinc" || Head == "cinv" || Head == "cneg"); | |||
5156 | ||||
5157 | // Read the remaining operands. | |||
5158 | if (getLexer().isNot(AsmToken::EndOfStatement)) { | |||
5159 | ||||
5160 | unsigned N = 1; | |||
5161 | do { | |||
5162 | // Parse and remember the operand. | |||
5163 | if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) || | |||
5164 | (N == 3 && condCodeThirdOperand) || | |||
5165 | (N == 2 && condCodeSecondOperand), | |||
5166 | condCodeSecondOperand || condCodeThirdOperand)) { | |||
5167 | return true; | |||
5168 | } | |||
5169 | ||||
5170 | // After successfully parsing some operands there are three special cases | |||
5171 | // to consider (i.e. notional operands not separated by commas). Two are | |||
5172 | // due to memory specifiers: | |||
5173 | // + An RBrac will end an address for load/store/prefetch | |||
5174 | // + An '!' will indicate a pre-indexed operation. | |||
5175 | // | |||
5176 | // And a further case is '}', which ends a group of tokens specifying the | |||
5177 | // SME accumulator array 'ZA' or tile vector, i.e. | |||
5178 | // | |||
5179 | // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }' | |||
5180 | // | |||
5181 | // It's someone else's responsibility to make sure these tokens are sane | |||
5182 | // in the given context! | |||
5183 | ||||
5184 | if (parseOptionalToken(AsmToken::RBrac)) | |||
5185 | Operands.push_back( | |||
5186 | AArch64Operand::CreateToken("]", getLoc(), getContext())); | |||
5187 | if (parseOptionalToken(AsmToken::Exclaim)) | |||
5188 | Operands.push_back( | |||
5189 | AArch64Operand::CreateToken("!", getLoc(), getContext())); | |||
5190 | if (parseOptionalToken(AsmToken::RCurly)) | |||
5191 | Operands.push_back( | |||
5192 | AArch64Operand::CreateToken("}", getLoc(), getContext())); | |||
5193 | ||||
5194 | ++N; | |||
5195 | } while (parseOptionalToken(AsmToken::Comma)); | |||
5196 | } | |||
5197 | ||||
5198 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
5199 | return true; | |||
5200 | ||||
5201 | return false; | |||
5202 | } | |||
5203 | ||||
5204 | static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) { | |||
5205 | assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5205 , __extension__ __PRETTY_FUNCTION__)); | |||
5206 | return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) || | |||
5207 | (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) || | |||
5208 | (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) || | |||
5209 | (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) || | |||
5210 | (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) || | |||
5211 | (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0)); | |||
5212 | } | |||
5213 | ||||
5214 | // FIXME: This entire function is a giant hack to provide us with decent | |||
5215 | // operand range validation/diagnostics until TableGen/MC can be extended | |||
5216 | // to support autogeneration of this kind of validation. | |||
5217 | bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc, | |||
5218 | SmallVectorImpl<SMLoc> &Loc) { | |||
5219 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
5220 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
5221 | ||||
5222 | // A prefix only applies to the instruction following it. Here we extract | |||
5223 | // prefix information for the next instruction before validating the current | |||
5224 | // one so that in the case of failure we don't erronously continue using the | |||
5225 | // current prefix. | |||
5226 | PrefixInfo Prefix = NextPrefix; | |||
5227 | NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags); | |||
5228 | ||||
5229 | // Before validating the instruction in isolation we run through the rules | |||
5230 | // applicable when it follows a prefix instruction. | |||
5231 | // NOTE: brk & hlt can be prefixed but require no additional validation. | |||
5232 | if (Prefix.isActive() && | |||
5233 | (Inst.getOpcode() != AArch64::BRK) && | |||
5234 | (Inst.getOpcode() != AArch64::HLT)) { | |||
5235 | ||||
5236 | // Prefixed intructions must have a destructive operand. | |||
5237 | if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) == | |||
5238 | AArch64::NotDestructive) | |||
5239 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
5240 | " movprfx, suggest replacing movprfx with mov"); | |||
5241 | ||||
5242 | // Destination operands must match. | |||
5243 | if (Inst.getOperand(0).getReg() != Prefix.getDstReg()) | |||
5244 | return Error(Loc[0], "instruction is unpredictable when following a" | |||
5245 | " movprfx writing to a different destination"); | |||
5246 | ||||
5247 | // Destination operand must not be used in any other location. | |||
5248 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) { | |||
5249 | if (Inst.getOperand(i).isReg() && | |||
5250 | (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) && | |||
5251 | isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg())) | |||
5252 | return Error(Loc[0], "instruction is unpredictable when following a" | |||
5253 | " movprfx and destination also used as non-destructive" | |||
5254 | " source"); | |||
5255 | } | |||
5256 | ||||
5257 | auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID]; | |||
5258 | if (Prefix.isPredicated()) { | |||
5259 | int PgIdx = -1; | |||
5260 | ||||
5261 | // Find the instructions general predicate. | |||
5262 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) | |||
5263 | if (Inst.getOperand(i).isReg() && | |||
5264 | PPRRegClass.contains(Inst.getOperand(i).getReg())) { | |||
5265 | PgIdx = i; | |||
5266 | break; | |||
5267 | } | |||
5268 | ||||
5269 | // Instruction must be predicated if the movprfx is predicated. | |||
5270 | if (PgIdx == -1 || | |||
5271 | (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone) | |||
5272 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
5273 | " predicated movprfx, suggest using unpredicated movprfx"); | |||
5274 | ||||
5275 | // Instruction must use same general predicate as the movprfx. | |||
5276 | if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg()) | |||
5277 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
5278 | " predicated movprfx using a different general predicate"); | |||
5279 | ||||
5280 | // Instruction element type must match the movprfx. | |||
5281 | if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize()) | |||
5282 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
5283 | " predicated movprfx with a different element size"); | |||
5284 | } | |||
5285 | } | |||
5286 | ||||
5287 | // Check for indexed addressing modes w/ the base register being the | |||
5288 | // same as a destination/source register or pair load where | |||
5289 | // the Rt == Rt2. All of those are undefined behaviour. | |||
5290 | switch (Inst.getOpcode()) { | |||
5291 | case AArch64::LDPSWpre: | |||
5292 | case AArch64::LDPWpost: | |||
5293 | case AArch64::LDPWpre: | |||
5294 | case AArch64::LDPXpost: | |||
5295 | case AArch64::LDPXpre: { | |||
5296 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
5297 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
5298 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
5299 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
5300 | return Error(Loc[0], "unpredictable LDP instruction, writeback base " | |||
5301 | "is also a destination"); | |||
5302 | if (RI->isSubRegisterEq(Rn, Rt2)) | |||
5303 | return Error(Loc[1], "unpredictable LDP instruction, writeback base " | |||
5304 | "is also a destination"); | |||
5305 | [[fallthrough]]; | |||
5306 | } | |||
5307 | case AArch64::LDR_ZA: | |||
5308 | case AArch64::STR_ZA: { | |||
5309 | if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() && | |||
5310 | Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm()) | |||
5311 | return Error(Loc[1], | |||
5312 | "unpredictable instruction, immediate and offset mismatch."); | |||
5313 | break; | |||
5314 | } | |||
5315 | case AArch64::LDPDi: | |||
5316 | case AArch64::LDPQi: | |||
5317 | case AArch64::LDPSi: | |||
5318 | case AArch64::LDPSWi: | |||
5319 | case AArch64::LDPWi: | |||
5320 | case AArch64::LDPXi: { | |||
5321 | unsigned Rt = Inst.getOperand(0).getReg(); | |||
5322 | unsigned Rt2 = Inst.getOperand(1).getReg(); | |||
5323 | if (Rt == Rt2) | |||
5324 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); | |||
5325 | break; | |||
5326 | } | |||
5327 | case AArch64::LDPDpost: | |||
5328 | case AArch64::LDPDpre: | |||
5329 | case AArch64::LDPQpost: | |||
5330 | case AArch64::LDPQpre: | |||
5331 | case AArch64::LDPSpost: | |||
5332 | case AArch64::LDPSpre: | |||
5333 | case AArch64::LDPSWpost: { | |||
5334 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
5335 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
5336 | if (Rt == Rt2) | |||
5337 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); | |||
5338 | break; | |||
5339 | } | |||
5340 | case AArch64::STPDpost: | |||
5341 | case AArch64::STPDpre: | |||
5342 | case AArch64::STPQpost: | |||
5343 | case AArch64::STPQpre: | |||
5344 | case AArch64::STPSpost: | |||
5345 | case AArch64::STPSpre: | |||
5346 | case AArch64::STPWpost: | |||
5347 | case AArch64::STPWpre: | |||
5348 | case AArch64::STPXpost: | |||
5349 | case AArch64::STPXpre: { | |||
5350 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
5351 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
5352 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
5353 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
5354 | return Error(Loc[0], "unpredictable STP instruction, writeback base " | |||
5355 | "is also a source"); | |||
5356 | if (RI->isSubRegisterEq(Rn, Rt2)) | |||
5357 | return Error(Loc[1], "unpredictable STP instruction, writeback base " | |||
5358 | "is also a source"); | |||
5359 | break; | |||
5360 | } | |||
5361 | case AArch64::LDRBBpre: | |||
5362 | case AArch64::LDRBpre: | |||
5363 | case AArch64::LDRHHpre: | |||
5364 | case AArch64::LDRHpre: | |||
5365 | case AArch64::LDRSBWpre: | |||
5366 | case AArch64::LDRSBXpre: | |||
5367 | case AArch64::LDRSHWpre: | |||
5368 | case AArch64::LDRSHXpre: | |||
5369 | case AArch64::LDRSWpre: | |||
5370 | case AArch64::LDRWpre: | |||
5371 | case AArch64::LDRXpre: | |||
5372 | case AArch64::LDRBBpost: | |||
5373 | case AArch64::LDRBpost: | |||
5374 | case AArch64::LDRHHpost: | |||
5375 | case AArch64::LDRHpost: | |||
5376 | case AArch64::LDRSBWpost: | |||
5377 | case AArch64::LDRSBXpost: | |||
5378 | case AArch64::LDRSHWpost: | |||
5379 | case AArch64::LDRSHXpost: | |||
5380 | case AArch64::LDRSWpost: | |||
5381 | case AArch64::LDRWpost: | |||
5382 | case AArch64::LDRXpost: { | |||
5383 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
5384 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
5385 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
5386 | return Error(Loc[0], "unpredictable LDR instruction, writeback base " | |||
5387 | "is also a source"); | |||
5388 | break; | |||
5389 | } | |||
5390 | case AArch64::STRBBpost: | |||
5391 | case AArch64::STRBpost: | |||
5392 | case AArch64::STRHHpost: | |||
5393 | case AArch64::STRHpost: | |||
5394 | case AArch64::STRWpost: | |||
5395 | case AArch64::STRXpost: | |||
5396 | case AArch64::STRBBpre: | |||
5397 | case AArch64::STRBpre: | |||
5398 | case AArch64::STRHHpre: | |||
5399 | case AArch64::STRHpre: | |||
5400 | case AArch64::STRWpre: | |||
5401 | case AArch64::STRXpre: { | |||
5402 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
5403 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
5404 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
5405 | return Error(Loc[0], "unpredictable STR instruction, writeback base " | |||
5406 | "is also a source"); | |||
5407 | break; | |||
5408 | } | |||
5409 | case AArch64::STXRB: | |||
5410 | case AArch64::STXRH: | |||
5411 | case AArch64::STXRW: | |||
5412 | case AArch64::STXRX: | |||
5413 | case AArch64::STLXRB: | |||
5414 | case AArch64::STLXRH: | |||
5415 | case AArch64::STLXRW: | |||
5416 | case AArch64::STLXRX: { | |||
5417 | unsigned Rs = Inst.getOperand(0).getReg(); | |||
5418 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
5419 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
5420 | if (RI->isSubRegisterEq(Rt, Rs) || | |||
5421 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) | |||
5422 | return Error(Loc[0], | |||
5423 | "unpredictable STXR instruction, status is also a source"); | |||
5424 | break; | |||
5425 | } | |||
5426 | case AArch64::STXPW: | |||
5427 | case AArch64::STXPX: | |||
5428 | case AArch64::STLXPW: | |||
5429 | case AArch64::STLXPX: { | |||
5430 | unsigned Rs = Inst.getOperand(0).getReg(); | |||
5431 | unsigned Rt1 = Inst.getOperand(1).getReg(); | |||
5432 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
5433 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
5434 | if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) || | |||
5435 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) | |||
5436 | return Error(Loc[0], | |||
5437 | "unpredictable STXP instruction, status is also a source"); | |||
5438 | break; | |||
5439 | } | |||
5440 | case AArch64::LDRABwriteback: | |||
5441 | case AArch64::LDRAAwriteback: { | |||
5442 | unsigned Xt = Inst.getOperand(0).getReg(); | |||
5443 | unsigned Xn = Inst.getOperand(1).getReg(); | |||
5444 | if (Xt == Xn) | |||
5445 | return Error(Loc[0], | |||
5446 | "unpredictable LDRA instruction, writeback base" | |||
5447 | " is also a destination"); | |||
5448 | break; | |||
5449 | } | |||
5450 | } | |||
5451 | ||||
5452 | // Check v8.8-A memops instructions. | |||
5453 | switch (Inst.getOpcode()) { | |||
5454 | case AArch64::CPYFP: | |||
5455 | case AArch64::CPYFPWN: | |||
5456 | case AArch64::CPYFPRN: | |||
5457 | case AArch64::CPYFPN: | |||
5458 | case AArch64::CPYFPWT: | |||
5459 | case AArch64::CPYFPWTWN: | |||
5460 | case AArch64::CPYFPWTRN: | |||
5461 | case AArch64::CPYFPWTN: | |||
5462 | case AArch64::CPYFPRT: | |||
5463 | case AArch64::CPYFPRTWN: | |||
5464 | case AArch64::CPYFPRTRN: | |||
5465 | case AArch64::CPYFPRTN: | |||
5466 | case AArch64::CPYFPT: | |||
5467 | case AArch64::CPYFPTWN: | |||
5468 | case AArch64::CPYFPTRN: | |||
5469 | case AArch64::CPYFPTN: | |||
5470 | case AArch64::CPYFM: | |||
5471 | case AArch64::CPYFMWN: | |||
5472 | case AArch64::CPYFMRN: | |||
5473 | case AArch64::CPYFMN: | |||
5474 | case AArch64::CPYFMWT: | |||
5475 | case AArch64::CPYFMWTWN: | |||
5476 | case AArch64::CPYFMWTRN: | |||
5477 | case AArch64::CPYFMWTN: | |||
5478 | case AArch64::CPYFMRT: | |||
5479 | case AArch64::CPYFMRTWN: | |||
5480 | case AArch64::CPYFMRTRN: | |||
5481 | case AArch64::CPYFMRTN: | |||
5482 | case AArch64::CPYFMT: | |||
5483 | case AArch64::CPYFMTWN: | |||
5484 | case AArch64::CPYFMTRN: | |||
5485 | case AArch64::CPYFMTN: | |||
5486 | case AArch64::CPYFE: | |||
5487 | case AArch64::CPYFEWN: | |||
5488 | case AArch64::CPYFERN: | |||
5489 | case AArch64::CPYFEN: | |||
5490 | case AArch64::CPYFEWT: | |||
5491 | case AArch64::CPYFEWTWN: | |||
5492 | case AArch64::CPYFEWTRN: | |||
5493 | case AArch64::CPYFEWTN: | |||
5494 | case AArch64::CPYFERT: | |||
5495 | case AArch64::CPYFERTWN: | |||
5496 | case AArch64::CPYFERTRN: | |||
5497 | case AArch64::CPYFERTN: | |||
5498 | case AArch64::CPYFET: | |||
5499 | case AArch64::CPYFETWN: | |||
5500 | case AArch64::CPYFETRN: | |||
5501 | case AArch64::CPYFETN: | |||
5502 | case AArch64::CPYP: | |||
5503 | case AArch64::CPYPWN: | |||
5504 | case AArch64::CPYPRN: | |||
5505 | case AArch64::CPYPN: | |||
5506 | case AArch64::CPYPWT: | |||
5507 | case AArch64::CPYPWTWN: | |||
5508 | case AArch64::CPYPWTRN: | |||
5509 | case AArch64::CPYPWTN: | |||
5510 | case AArch64::CPYPRT: | |||
5511 | case AArch64::CPYPRTWN: | |||
5512 | case AArch64::CPYPRTRN: | |||
5513 | case AArch64::CPYPRTN: | |||
5514 | case AArch64::CPYPT: | |||
5515 | case AArch64::CPYPTWN: | |||
5516 | case AArch64::CPYPTRN: | |||
5517 | case AArch64::CPYPTN: | |||
5518 | case AArch64::CPYM: | |||
5519 | case AArch64::CPYMWN: | |||
5520 | case AArch64::CPYMRN: | |||
5521 | case AArch64::CPYMN: | |||
5522 | case AArch64::CPYMWT: | |||
5523 | case AArch64::CPYMWTWN: | |||
5524 | case AArch64::CPYMWTRN: | |||
5525 | case AArch64::CPYMWTN: | |||
5526 | case AArch64::CPYMRT: | |||
5527 | case AArch64::CPYMRTWN: | |||
5528 | case AArch64::CPYMRTRN: | |||
5529 | case AArch64::CPYMRTN: | |||
5530 | case AArch64::CPYMT: | |||
5531 | case AArch64::CPYMTWN: | |||
5532 | case AArch64::CPYMTRN: | |||
5533 | case AArch64::CPYMTN: | |||
5534 | case AArch64::CPYE: | |||
5535 | case AArch64::CPYEWN: | |||
5536 | case AArch64::CPYERN: | |||
5537 | case AArch64::CPYEN: | |||
5538 | case AArch64::CPYEWT: | |||
5539 | case AArch64::CPYEWTWN: | |||
5540 | case AArch64::CPYEWTRN: | |||
5541 | case AArch64::CPYEWTN: | |||
5542 | case AArch64::CPYERT: | |||
5543 | case AArch64::CPYERTWN: | |||
5544 | case AArch64::CPYERTRN: | |||
5545 | case AArch64::CPYERTN: | |||
5546 | case AArch64::CPYET: | |||
5547 | case AArch64::CPYETWN: | |||
5548 | case AArch64::CPYETRN: | |||
5549 | case AArch64::CPYETN: { | |||
5550 | unsigned Xd_wb = Inst.getOperand(0).getReg(); | |||
5551 | unsigned Xs_wb = Inst.getOperand(1).getReg(); | |||
5552 | unsigned Xn_wb = Inst.getOperand(2).getReg(); | |||
5553 | unsigned Xd = Inst.getOperand(3).getReg(); | |||
5554 | unsigned Xs = Inst.getOperand(4).getReg(); | |||
5555 | unsigned Xn = Inst.getOperand(5).getReg(); | |||
5556 | if (Xd_wb != Xd) | |||
5557 | return Error(Loc[0], | |||
5558 | "invalid CPY instruction, Xd_wb and Xd do not match"); | |||
5559 | if (Xs_wb != Xs) | |||
5560 | return Error(Loc[0], | |||
5561 | "invalid CPY instruction, Xs_wb and Xs do not match"); | |||
5562 | if (Xn_wb != Xn) | |||
5563 | return Error(Loc[0], | |||
5564 | "invalid CPY instruction, Xn_wb and Xn do not match"); | |||
5565 | if (Xd == Xs) | |||
5566 | return Error(Loc[0], "invalid CPY instruction, destination and source" | |||
5567 | " registers are the same"); | |||
5568 | if (Xd == Xn) | |||
5569 | return Error(Loc[0], "invalid CPY instruction, destination and size" | |||
5570 | " registers are the same"); | |||
5571 | if (Xs == Xn) | |||
5572 | return Error(Loc[0], "invalid CPY instruction, source and size" | |||
5573 | " registers are the same"); | |||
5574 | break; | |||
5575 | } | |||
5576 | case AArch64::SETP: | |||
5577 | case AArch64::SETPT: | |||
5578 | case AArch64::SETPN: | |||
5579 | case AArch64::SETPTN: | |||
5580 | case AArch64::SETM: | |||
5581 | case AArch64::SETMT: | |||
5582 | case AArch64::SETMN: | |||
5583 | case AArch64::SETMTN: | |||
5584 | case AArch64::SETE: | |||
5585 | case AArch64::SETET: | |||
5586 | case AArch64::SETEN: | |||
5587 | case AArch64::SETETN: | |||
5588 | case AArch64::SETGP: | |||
5589 | case AArch64::SETGPT: | |||
5590 | case AArch64::SETGPN: | |||
5591 | case AArch64::SETGPTN: | |||
5592 | case AArch64::SETGM: | |||
5593 | case AArch64::SETGMT: | |||
5594 | case AArch64::SETGMN: | |||
5595 | case AArch64::SETGMTN: | |||
5596 | case AArch64::MOPSSETGE: | |||
5597 | case AArch64::MOPSSETGET: | |||
5598 | case AArch64::MOPSSETGEN: | |||
5599 | case AArch64::MOPSSETGETN: { | |||
5600 | unsigned Xd_wb = Inst.getOperand(0).getReg(); | |||
5601 | unsigned Xn_wb = Inst.getOperand(1).getReg(); | |||
5602 | unsigned Xd = Inst.getOperand(2).getReg(); | |||
5603 | unsigned Xn = Inst.getOperand(3).getReg(); | |||
5604 | unsigned Xm = Inst.getOperand(4).getReg(); | |||
5605 | if (Xd_wb != Xd) | |||
5606 | return Error(Loc[0], | |||
5607 | "invalid SET instruction, Xd_wb and Xd do not match"); | |||
5608 | if (Xn_wb != Xn) | |||
5609 | return Error(Loc[0], | |||
5610 | "invalid SET instruction, Xn_wb and Xn do not match"); | |||
5611 | if (Xd == Xn) | |||
5612 | return Error(Loc[0], "invalid SET instruction, destination and size" | |||
5613 | " registers are the same"); | |||
5614 | if (Xd == Xm) | |||
5615 | return Error(Loc[0], "invalid SET instruction, destination and source" | |||
5616 | " registers are the same"); | |||
5617 | if (Xn == Xm) | |||
5618 | return Error(Loc[0], "invalid SET instruction, source and size" | |||
5619 | " registers are the same"); | |||
5620 | break; | |||
5621 | } | |||
5622 | } | |||
5623 | ||||
5624 | // Now check immediate ranges. Separate from the above as there is overlap | |||
5625 | // in the instructions being checked and this keeps the nested conditionals | |||
5626 | // to a minimum. | |||
5627 | switch (Inst.getOpcode()) { | |||
5628 | case AArch64::ADDSWri: | |||
5629 | case AArch64::ADDSXri: | |||
5630 | case AArch64::ADDWri: | |||
5631 | case AArch64::ADDXri: | |||
5632 | case AArch64::SUBSWri: | |||
5633 | case AArch64::SUBSXri: | |||
5634 | case AArch64::SUBWri: | |||
5635 | case AArch64::SUBXri: { | |||
5636 | // Annoyingly we can't do this in the isAddSubImm predicate, so there is | |||
5637 | // some slight duplication here. | |||
5638 | if (Inst.getOperand(2).isExpr()) { | |||
5639 | const MCExpr *Expr = Inst.getOperand(2).getExpr(); | |||
5640 | AArch64MCExpr::VariantKind ELFRefKind; | |||
5641 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
5642 | int64_t Addend; | |||
5643 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
5644 | ||||
5645 | // Only allow these with ADDXri. | |||
5646 | if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || | |||
5647 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && | |||
5648 | Inst.getOpcode() == AArch64::ADDXri) | |||
5649 | return false; | |||
5650 | ||||
5651 | // Only allow these with ADDXri/ADDWri | |||
5652 | if ((ELFRefKind == AArch64MCExpr::VK_LO12 || | |||
5653 | ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || | |||
5654 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || | |||
5655 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || | |||
5656 | ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || | |||
5657 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || | |||
5658 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || | |||
5659 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || | |||
5660 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || | |||
5661 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && | |||
5662 | (Inst.getOpcode() == AArch64::ADDXri || | |||
5663 | Inst.getOpcode() == AArch64::ADDWri)) | |||
5664 | return false; | |||
5665 | ||||
5666 | // Don't allow symbol refs in the immediate field otherwise | |||
5667 | // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of | |||
5668 | // operands of the original instruction (i.e. 'add w0, w1, borked' vs | |||
5669 | // 'cmp w0, 'borked') | |||
5670 | return Error(Loc.back(), "invalid immediate expression"); | |||
5671 | } | |||
5672 | // We don't validate more complex expressions here | |||
5673 | } | |||
5674 | return false; | |||
5675 | } | |||
5676 | default: | |||
5677 | return false; | |||
5678 | } | |||
5679 | } | |||
5680 | ||||
5681 | static std::string AArch64MnemonicSpellCheck(StringRef S, | |||
5682 | const FeatureBitset &FBS, | |||
5683 | unsigned VariantID = 0); | |||
5684 | ||||
5685 | bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, | |||
5686 | uint64_t ErrorInfo, | |||
5687 | OperandVector &Operands) { | |||
5688 | switch (ErrCode) { | |||
5689 | case Match_InvalidTiedOperand: { | |||
5690 | auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]); | |||
5691 | if (Op.isVectorList()) | |||
5692 | return Error(Loc, "operand must match destination register list"); | |||
5693 | ||||
5694 | assert(Op.isReg() && "Unexpected operand type")(static_cast <bool> (Op.isReg() && "Unexpected operand type" ) ? void (0) : __assert_fail ("Op.isReg() && \"Unexpected operand type\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5694 , __extension__ __PRETTY_FUNCTION__)); | |||
5695 | switch (Op.getRegEqualityTy()) { | |||
5696 | case RegConstraintEqualityTy::EqualsSubReg: | |||
5697 | return Error(Loc, "operand must be 64-bit form of destination register"); | |||
5698 | case RegConstraintEqualityTy::EqualsSuperReg: | |||
5699 | return Error(Loc, "operand must be 32-bit form of destination register"); | |||
5700 | case RegConstraintEqualityTy::EqualsReg: | |||
5701 | return Error(Loc, "operand must match destination register"); | |||
5702 | } | |||
5703 | llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5703 ); | |||
5704 | } | |||
5705 | case Match_MissingFeature: | |||
5706 | return Error(Loc, | |||
5707 | "instruction requires a CPU feature not currently enabled"); | |||
5708 | case Match_InvalidOperand: | |||
5709 | return Error(Loc, "invalid operand for instruction"); | |||
5710 | case Match_InvalidSuffix: | |||
5711 | return Error(Loc, "invalid type suffix for instruction"); | |||
5712 | case Match_InvalidCondCode: | |||
5713 | return Error(Loc, "expected AArch64 condition code"); | |||
5714 | case Match_AddSubRegExtendSmall: | |||
5715 | return Error(Loc, | |||
5716 | "expected '[su]xt[bhw]' with optional integer in range [0, 4]"); | |||
5717 | case Match_AddSubRegExtendLarge: | |||
5718 | return Error(Loc, | |||
5719 | "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]"); | |||
5720 | case Match_AddSubSecondSource: | |||
5721 | return Error(Loc, | |||
5722 | "expected compatible register, symbol or integer in range [0, 4095]"); | |||
5723 | case Match_LogicalSecondSource: | |||
5724 | return Error(Loc, "expected compatible register or logical immediate"); | |||
5725 | case Match_InvalidMovImm32Shift: | |||
5726 | return Error(Loc, "expected 'lsl' with optional integer 0 or 16"); | |||
5727 | case Match_InvalidMovImm64Shift: | |||
5728 | return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48"); | |||
5729 | case Match_AddSubRegShift32: | |||
5730 | return Error(Loc, | |||
5731 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]"); | |||
5732 | case Match_AddSubRegShift64: | |||
5733 | return Error(Loc, | |||
5734 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]"); | |||
5735 | case Match_InvalidFPImm: | |||
5736 | return Error(Loc, | |||
5737 | "expected compatible register or floating-point constant"); | |||
5738 | case Match_InvalidMemoryIndexedSImm6: | |||
5739 | return Error(Loc, "index must be an integer in range [-32, 31]."); | |||
5740 | case Match_InvalidMemoryIndexedSImm5: | |||
5741 | return Error(Loc, "index must be an integer in range [-16, 15]."); | |||
5742 | case Match_InvalidMemoryIndexed1SImm4: | |||
5743 | return Error(Loc, "index must be an integer in range [-8, 7]."); | |||
5744 | case Match_InvalidMemoryIndexed2SImm4: | |||
5745 | return Error(Loc, "index must be a multiple of 2 in range [-16, 14]."); | |||
5746 | case Match_InvalidMemoryIndexed3SImm4: | |||
5747 | return Error(Loc, "index must be a multiple of 3 in range [-24, 21]."); | |||
5748 | case Match_InvalidMemoryIndexed4SImm4: | |||
5749 | return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); | |||
5750 | case Match_InvalidMemoryIndexed16SImm4: | |||
5751 | return Error(Loc, "index must be a multiple of 16 in range [-128, 112]."); | |||
5752 | case Match_InvalidMemoryIndexed32SImm4: | |||
5753 | return Error(Loc, "index must be a multiple of 32 in range [-256, 224]."); | |||
5754 | case Match_InvalidMemoryIndexed1SImm6: | |||
5755 | return Error(Loc, "index must be an integer in range [-32, 31]."); | |||
5756 | case Match_InvalidMemoryIndexedSImm8: | |||
5757 | return Error(Loc, "index must be an integer in range [-128, 127]."); | |||
5758 | case Match_InvalidMemoryIndexedSImm9: | |||
5759 | return Error(Loc, "index must be an integer in range [-256, 255]."); | |||
5760 | case Match_InvalidMemoryIndexed16SImm9: | |||
5761 | return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080]."); | |||
5762 | case Match_InvalidMemoryIndexed8SImm10: | |||
5763 | return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); | |||
5764 | case Match_InvalidMemoryIndexed4SImm7: | |||
5765 | return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); | |||
5766 | case Match_InvalidMemoryIndexed8SImm7: | |||
5767 | return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); | |||
5768 | case Match_InvalidMemoryIndexed16SImm7: | |||
5769 | return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); | |||
5770 | case Match_InvalidMemoryIndexed8UImm5: | |||
5771 | return Error(Loc, "index must be a multiple of 8 in range [0, 248]."); | |||
5772 | case Match_InvalidMemoryIndexed8UImm3: | |||
5773 | return Error(Loc, "index must be a multiple of 8 in range [0, 56]."); | |||
5774 | case Match_InvalidMemoryIndexed4UImm5: | |||
5775 | return Error(Loc, "index must be a multiple of 4 in range [0, 124]."); | |||
5776 | case Match_InvalidMemoryIndexed2UImm5: | |||
5777 | return Error(Loc, "index must be a multiple of 2 in range [0, 62]."); | |||
5778 | case Match_InvalidMemoryIndexed8UImm6: | |||
5779 | return Error(Loc, "index must be a multiple of 8 in range [0, 504]."); | |||
5780 | case Match_InvalidMemoryIndexed16UImm6: | |||
5781 | return Error(Loc, "index must be a multiple of 16 in range [0, 1008]."); | |||
5782 | case Match_InvalidMemoryIndexed4UImm6: | |||
5783 | return Error(Loc, "index must be a multiple of 4 in range [0, 252]."); | |||
5784 | case Match_InvalidMemoryIndexed2UImm6: | |||
5785 | return Error(Loc, "index must be a multiple of 2 in range [0, 126]."); | |||
5786 | case Match_InvalidMemoryIndexed1UImm6: | |||
5787 | return Error(Loc, "index must be in range [0, 63]."); | |||
5788 | case Match_InvalidMemoryWExtend8: | |||
5789 | return Error(Loc, | |||
5790 | "expected 'uxtw' or 'sxtw' with optional shift of #0"); | |||
5791 | case Match_InvalidMemoryWExtend16: | |||
5792 | return Error(Loc, | |||
5793 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1"); | |||
5794 | case Match_InvalidMemoryWExtend32: | |||
5795 | return Error(Loc, | |||
5796 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2"); | |||
5797 | case Match_InvalidMemoryWExtend64: | |||
5798 | return Error(Loc, | |||
5799 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3"); | |||
5800 | case Match_InvalidMemoryWExtend128: | |||
5801 | return Error(Loc, | |||
5802 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4"); | |||
5803 | case Match_InvalidMemoryXExtend8: | |||
5804 | return Error(Loc, | |||
5805 | "expected 'lsl' or 'sxtx' with optional shift of #0"); | |||
5806 | case Match_InvalidMemoryXExtend16: | |||
5807 | return Error(Loc, | |||
5808 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #1"); | |||
5809 | case Match_InvalidMemoryXExtend32: | |||
5810 | return Error(Loc, | |||
5811 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #2"); | |||
5812 | case Match_InvalidMemoryXExtend64: | |||
5813 | return Error(Loc, | |||
5814 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #3"); | |||
5815 | case Match_InvalidMemoryXExtend128: | |||
5816 | return Error(Loc, | |||
5817 |