File: | lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp |
Warning: | line 2488, column 7 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "MCTargetDesc/AArch64AddressingModes.h" | |||
10 | #include "MCTargetDesc/AArch64MCExpr.h" | |||
11 | #include "MCTargetDesc/AArch64MCTargetDesc.h" | |||
12 | #include "MCTargetDesc/AArch64TargetStreamer.h" | |||
13 | #include "AArch64InstrInfo.h" | |||
14 | #include "Utils/AArch64BaseInfo.h" | |||
15 | #include "llvm/ADT/APFloat.h" | |||
16 | #include "llvm/ADT/APInt.h" | |||
17 | #include "llvm/ADT/ArrayRef.h" | |||
18 | #include "llvm/ADT/STLExtras.h" | |||
19 | #include "llvm/ADT/SmallVector.h" | |||
20 | #include "llvm/ADT/StringExtras.h" | |||
21 | #include "llvm/ADT/StringMap.h" | |||
22 | #include "llvm/ADT/StringRef.h" | |||
23 | #include "llvm/ADT/StringSwitch.h" | |||
24 | #include "llvm/ADT/Twine.h" | |||
25 | #include "llvm/MC/MCContext.h" | |||
26 | #include "llvm/MC/MCExpr.h" | |||
27 | #include "llvm/MC/MCInst.h" | |||
28 | #include "llvm/MC/MCLinkerOptimizationHint.h" | |||
29 | #include "llvm/MC/MCObjectFileInfo.h" | |||
30 | #include "llvm/MC/MCParser/MCAsmLexer.h" | |||
31 | #include "llvm/MC/MCParser/MCAsmParser.h" | |||
32 | #include "llvm/MC/MCParser/MCAsmParserExtension.h" | |||
33 | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" | |||
34 | #include "llvm/MC/MCParser/MCTargetAsmParser.h" | |||
35 | #include "llvm/MC/MCRegisterInfo.h" | |||
36 | #include "llvm/MC/MCStreamer.h" | |||
37 | #include "llvm/MC/MCSubtargetInfo.h" | |||
38 | #include "llvm/MC/MCSymbol.h" | |||
39 | #include "llvm/MC/MCTargetOptions.h" | |||
40 | #include "llvm/MC/SubtargetFeature.h" | |||
41 | #include "llvm/MC/MCValue.h" | |||
42 | #include "llvm/Support/Casting.h" | |||
43 | #include "llvm/Support/Compiler.h" | |||
44 | #include "llvm/Support/ErrorHandling.h" | |||
45 | #include "llvm/Support/MathExtras.h" | |||
46 | #include "llvm/Support/SMLoc.h" | |||
47 | #include "llvm/Support/TargetParser.h" | |||
48 | #include "llvm/Support/TargetRegistry.h" | |||
49 | #include "llvm/Support/raw_ostream.h" | |||
50 | #include <cassert> | |||
51 | #include <cctype> | |||
52 | #include <cstdint> | |||
53 | #include <cstdio> | |||
54 | #include <string> | |||
55 | #include <tuple> | |||
56 | #include <utility> | |||
57 | #include <vector> | |||
58 | ||||
59 | using namespace llvm; | |||
60 | ||||
61 | namespace { | |||
62 | ||||
63 | enum class RegKind { | |||
64 | Scalar, | |||
65 | NeonVector, | |||
66 | SVEDataVector, | |||
67 | SVEPredicateVector | |||
68 | }; | |||
69 | ||||
70 | enum RegConstraintEqualityTy { | |||
71 | EqualsReg, | |||
72 | EqualsSuperReg, | |||
73 | EqualsSubReg | |||
74 | }; | |||
75 | ||||
76 | class AArch64AsmParser : public MCTargetAsmParser { | |||
77 | private: | |||
78 | StringRef Mnemonic; ///< Instruction mnemonic. | |||
79 | ||||
80 | // Map of register aliases registers via the .req directive. | |||
81 | StringMap<std::pair<RegKind, unsigned>> RegisterReqs; | |||
82 | ||||
83 | class PrefixInfo { | |||
84 | public: | |||
85 | static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) { | |||
86 | PrefixInfo Prefix; | |||
87 | switch (Inst.getOpcode()) { | |||
88 | case AArch64::MOVPRFX_ZZ: | |||
89 | Prefix.Active = true; | |||
90 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
91 | break; | |||
92 | case AArch64::MOVPRFX_ZPmZ_B: | |||
93 | case AArch64::MOVPRFX_ZPmZ_H: | |||
94 | case AArch64::MOVPRFX_ZPmZ_S: | |||
95 | case AArch64::MOVPRFX_ZPmZ_D: | |||
96 | Prefix.Active = true; | |||
97 | Prefix.Predicated = true; | |||
98 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; | |||
99 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx" ) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 100, __PRETTY_FUNCTION__)) | |||
100 | "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx" ) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 100, __PRETTY_FUNCTION__)); | |||
101 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
102 | Prefix.Pg = Inst.getOperand(2).getReg(); | |||
103 | break; | |||
104 | case AArch64::MOVPRFX_ZPzZ_B: | |||
105 | case AArch64::MOVPRFX_ZPzZ_H: | |||
106 | case AArch64::MOVPRFX_ZPzZ_S: | |||
107 | case AArch64::MOVPRFX_ZPzZ_D: | |||
108 | Prefix.Active = true; | |||
109 | Prefix.Predicated = true; | |||
110 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; | |||
111 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx" ) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 112, __PRETTY_FUNCTION__)) | |||
112 | "No destructive element size set for movprfx")((Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx" ) ? static_cast<void> (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 112, __PRETTY_FUNCTION__)); | |||
113 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
114 | Prefix.Pg = Inst.getOperand(1).getReg(); | |||
115 | break; | |||
116 | default: | |||
117 | break; | |||
118 | } | |||
119 | ||||
120 | return Prefix; | |||
121 | } | |||
122 | ||||
123 | PrefixInfo() : Active(false), Predicated(false) {} | |||
124 | bool isActive() const { return Active; } | |||
125 | bool isPredicated() const { return Predicated; } | |||
126 | unsigned getElementSize() const { | |||
127 | assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail ( "Predicated", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 127, __PRETTY_FUNCTION__)); | |||
128 | return ElementSize; | |||
129 | } | |||
130 | unsigned getDstReg() const { return Dst; } | |||
131 | unsigned getPgReg() const { | |||
132 | assert(Predicated)((Predicated) ? static_cast<void> (0) : __assert_fail ( "Predicated", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 132, __PRETTY_FUNCTION__)); | |||
133 | return Pg; | |||
134 | } | |||
135 | ||||
136 | private: | |||
137 | bool Active; | |||
138 | bool Predicated; | |||
139 | unsigned ElementSize; | |||
140 | unsigned Dst; | |||
141 | unsigned Pg; | |||
142 | } NextPrefix; | |||
143 | ||||
144 | AArch64TargetStreamer &getTargetStreamer() { | |||
145 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); | |||
146 | return static_cast<AArch64TargetStreamer &>(TS); | |||
147 | } | |||
148 | ||||
149 | SMLoc getLoc() const { return getParser().getTok().getLoc(); } | |||
150 | ||||
151 | bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); | |||
152 | void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S); | |||
153 | AArch64CC::CondCode parseCondCodeString(StringRef Cond); | |||
154 | bool parseCondCode(OperandVector &Operands, bool invertCondCode); | |||
155 | unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); | |||
156 | bool parseRegister(OperandVector &Operands); | |||
157 | bool parseSymbolicImmVal(const MCExpr *&ImmVal); | |||
158 | bool parseNeonVectorList(OperandVector &Operands); | |||
159 | bool parseOptionalMulOperand(OperandVector &Operands); | |||
160 | bool parseOperand(OperandVector &Operands, bool isCondCode, | |||
161 | bool invertCondCode); | |||
162 | ||||
163 | bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, | |||
164 | OperandVector &Operands); | |||
165 | ||||
166 | bool parseDirectiveArch(SMLoc L); | |||
167 | bool parseDirectiveArchExtension(SMLoc L); | |||
168 | bool parseDirectiveCPU(SMLoc L); | |||
169 | bool parseDirectiveInst(SMLoc L); | |||
170 | ||||
171 | bool parseDirectiveTLSDescCall(SMLoc L); | |||
172 | ||||
173 | bool parseDirectiveLOH(StringRef LOH, SMLoc L); | |||
174 | bool parseDirectiveLtorg(SMLoc L); | |||
175 | ||||
176 | bool parseDirectiveReq(StringRef Name, SMLoc L); | |||
177 | bool parseDirectiveUnreq(SMLoc L); | |||
178 | bool parseDirectiveCFINegateRAState(); | |||
179 | bool parseDirectiveCFIBKeyFrame(); | |||
180 | ||||
181 | bool validateInstruction(MCInst &Inst, SMLoc &IDLoc, | |||
182 | SmallVectorImpl<SMLoc> &Loc); | |||
183 | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
184 | OperandVector &Operands, MCStreamer &Out, | |||
185 | uint64_t &ErrorInfo, | |||
186 | bool MatchingInlineAsm) override; | |||
187 | /// @name Auto-generated Match Functions | |||
188 | /// { | |||
189 | ||||
190 | #define GET_ASSEMBLER_HEADER | |||
191 | #include "AArch64GenAsmMatcher.inc" | |||
192 | ||||
193 | /// } | |||
194 | ||||
195 | OperandMatchResultTy tryParseScalarRegister(unsigned &Reg); | |||
196 | OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind, | |||
197 | RegKind MatchKind); | |||
198 | OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); | |||
199 | OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); | |||
200 | OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); | |||
201 | OperandMatchResultTy tryParseSysReg(OperandVector &Operands); | |||
202 | OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); | |||
203 | template <bool IsSVEPrefetch = false> | |||
204 | OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); | |||
205 | OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); | |||
206 | OperandMatchResultTy tryParseBTIHint(OperandVector &Operands); | |||
207 | OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); | |||
208 | OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); | |||
209 | template<bool AddFPZeroAsLiteral> | |||
210 | OperandMatchResultTy tryParseFPImm(OperandVector &Operands); | |||
211 | OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands); | |||
212 | OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); | |||
213 | bool tryParseNeonVectorRegister(OperandVector &Operands); | |||
214 | OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); | |||
215 | OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); | |||
216 | template <bool ParseShiftExtend, | |||
217 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> | |||
218 | OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); | |||
219 | template <bool ParseShiftExtend, bool ParseSuffix> | |||
220 | OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); | |||
221 | OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands); | |||
222 | template <RegKind VectorKind> | |||
223 | OperandMatchResultTy tryParseVectorList(OperandVector &Operands, | |||
224 | bool ExpectMatch = false); | |||
225 | OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands); | |||
226 | ||||
227 | public: | |||
228 | enum AArch64MatchResultTy { | |||
229 | Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, | |||
230 | #define GET_OPERAND_DIAGNOSTIC_TYPES | |||
231 | #include "AArch64GenAsmMatcher.inc" | |||
232 | }; | |||
233 | bool IsILP32; | |||
234 | ||||
235 | AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, | |||
236 | const MCInstrInfo &MII, const MCTargetOptions &Options) | |||
237 | : MCTargetAsmParser(Options, STI, MII) { | |||
238 | IsILP32 = Options.getABIName() == "ilp32"; | |||
239 | MCAsmParserExtension::Initialize(Parser); | |||
240 | MCStreamer &S = getParser().getStreamer(); | |||
241 | if (S.getTargetStreamer() == nullptr) | |||
242 | new AArch64TargetStreamer(S); | |||
243 | ||||
244 | // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte | |||
245 | // directives as they have the same form and semantics: | |||
246 | /// ::= (.hword | .word | .xword ) [ expression (, expression)* ] | |||
247 | Parser.addAliasForDirective(".hword", ".2byte"); | |||
248 | Parser.addAliasForDirective(".word", ".4byte"); | |||
249 | Parser.addAliasForDirective(".xword", ".8byte"); | |||
250 | ||||
251 | // Initialize the set of available features. | |||
252 | setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); | |||
253 | } | |||
254 | ||||
255 | bool regsEqual(const MCParsedAsmOperand &Op1, | |||
256 | const MCParsedAsmOperand &Op2) const override; | |||
257 | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, | |||
258 | SMLoc NameLoc, OperandVector &Operands) override; | |||
259 | bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; | |||
260 | bool ParseDirective(AsmToken DirectiveID) override; | |||
261 | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, | |||
262 | unsigned Kind) override; | |||
263 | ||||
264 | static bool classifySymbolRef(const MCExpr *Expr, | |||
265 | AArch64MCExpr::VariantKind &ELFRefKind, | |||
266 | MCSymbolRefExpr::VariantKind &DarwinRefKind, | |||
267 | int64_t &Addend); | |||
268 | }; | |||
269 | ||||
270 | /// AArch64Operand - Instances of this class represent a parsed AArch64 machine | |||
271 | /// instruction. | |||
272 | class AArch64Operand : public MCParsedAsmOperand { | |||
273 | private: | |||
274 | enum KindTy { | |||
275 | k_Immediate, | |||
276 | k_ShiftedImm, | |||
277 | k_CondCode, | |||
278 | k_Register, | |||
279 | k_VectorList, | |||
280 | k_VectorIndex, | |||
281 | k_Token, | |||
282 | k_SysReg, | |||
283 | k_SysCR, | |||
284 | k_Prefetch, | |||
285 | k_ShiftExtend, | |||
286 | k_FPImm, | |||
287 | k_Barrier, | |||
288 | k_PSBHint, | |||
289 | k_BTIHint, | |||
290 | } Kind; | |||
291 | ||||
292 | SMLoc StartLoc, EndLoc; | |||
293 | ||||
294 | struct TokOp { | |||
295 | const char *Data; | |||
296 | unsigned Length; | |||
297 | bool IsSuffix; // Is the operand actually a suffix on the mnemonic. | |||
298 | }; | |||
299 | ||||
300 | // Separate shift/extend operand. | |||
301 | struct ShiftExtendOp { | |||
302 | AArch64_AM::ShiftExtendType Type; | |||
303 | unsigned Amount; | |||
304 | bool HasExplicitAmount; | |||
305 | }; | |||
306 | ||||
307 | struct RegOp { | |||
308 | unsigned RegNum; | |||
309 | RegKind Kind; | |||
310 | int ElementWidth; | |||
311 | ||||
312 | // The register may be allowed as a different register class, | |||
313 | // e.g. for GPR64as32 or GPR32as64. | |||
314 | RegConstraintEqualityTy EqualityTy; | |||
315 | ||||
316 | // In some cases the shift/extend needs to be explicitly parsed together | |||
317 | // with the register, rather than as a separate operand. This is needed | |||
318 | // for addressing modes where the instruction as a whole dictates the | |||
319 | // scaling/extend, rather than specific bits in the instruction. | |||
320 | // By parsing them as a single operand, we avoid the need to pass an | |||
321 | // extra operand in all CodeGen patterns (because all operands need to | |||
322 | // have an associated value), and we avoid the need to update TableGen to | |||
323 | // accept operands that have no associated bits in the instruction. | |||
324 | // | |||
325 | // An added benefit of parsing them together is that the assembler | |||
326 | // can give a sensible diagnostic if the scaling is not correct. | |||
327 | // | |||
328 | // The default is 'lsl #0' (HasExplicitAmount = false) if no | |||
329 | // ShiftExtend is specified. | |||
330 | ShiftExtendOp ShiftExtend; | |||
331 | }; | |||
332 | ||||
333 | struct VectorListOp { | |||
334 | unsigned RegNum; | |||
335 | unsigned Count; | |||
336 | unsigned NumElements; | |||
337 | unsigned ElementWidth; | |||
338 | RegKind RegisterKind; | |||
339 | }; | |||
340 | ||||
341 | struct VectorIndexOp { | |||
342 | unsigned Val; | |||
343 | }; | |||
344 | ||||
345 | struct ImmOp { | |||
346 | const MCExpr *Val; | |||
347 | }; | |||
348 | ||||
349 | struct ShiftedImmOp { | |||
350 | const MCExpr *Val; | |||
351 | unsigned ShiftAmount; | |||
352 | }; | |||
353 | ||||
354 | struct CondCodeOp { | |||
355 | AArch64CC::CondCode Code; | |||
356 | }; | |||
357 | ||||
358 | struct FPImmOp { | |||
359 | uint64_t Val; // APFloat value bitcasted to uint64_t. | |||
360 | bool IsExact; // describes whether parsed value was exact. | |||
361 | }; | |||
362 | ||||
363 | struct BarrierOp { | |||
364 | const char *Data; | |||
365 | unsigned Length; | |||
366 | unsigned Val; // Not the enum since not all values have names. | |||
367 | }; | |||
368 | ||||
369 | struct SysRegOp { | |||
370 | const char *Data; | |||
371 | unsigned Length; | |||
372 | uint32_t MRSReg; | |||
373 | uint32_t MSRReg; | |||
374 | uint32_t PStateField; | |||
375 | }; | |||
376 | ||||
377 | struct SysCRImmOp { | |||
378 | unsigned Val; | |||
379 | }; | |||
380 | ||||
381 | struct PrefetchOp { | |||
382 | const char *Data; | |||
383 | unsigned Length; | |||
384 | unsigned Val; | |||
385 | }; | |||
386 | ||||
387 | struct PSBHintOp { | |||
388 | const char *Data; | |||
389 | unsigned Length; | |||
390 | unsigned Val; | |||
391 | }; | |||
392 | ||||
393 | struct BTIHintOp { | |||
394 | const char *Data; | |||
395 | unsigned Length; | |||
396 | unsigned Val; | |||
397 | }; | |||
398 | ||||
399 | struct ExtendOp { | |||
400 | unsigned Val; | |||
401 | }; | |||
402 | ||||
403 | union { | |||
404 | struct TokOp Tok; | |||
405 | struct RegOp Reg; | |||
406 | struct VectorListOp VectorList; | |||
407 | struct VectorIndexOp VectorIndex; | |||
408 | struct ImmOp Imm; | |||
409 | struct ShiftedImmOp ShiftedImm; | |||
410 | struct CondCodeOp CondCode; | |||
411 | struct FPImmOp FPImm; | |||
412 | struct BarrierOp Barrier; | |||
413 | struct SysRegOp SysReg; | |||
414 | struct SysCRImmOp SysCRImm; | |||
415 | struct PrefetchOp Prefetch; | |||
416 | struct PSBHintOp PSBHint; | |||
417 | struct BTIHintOp BTIHint; | |||
418 | struct ShiftExtendOp ShiftExtend; | |||
419 | }; | |||
420 | ||||
421 | // Keep the MCContext around as the MCExprs may need manipulated during | |||
422 | // the add<>Operands() calls. | |||
423 | MCContext &Ctx; | |||
424 | ||||
425 | public: | |||
426 | AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} | |||
427 | ||||
428 | AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { | |||
429 | Kind = o.Kind; | |||
430 | StartLoc = o.StartLoc; | |||
431 | EndLoc = o.EndLoc; | |||
432 | switch (Kind) { | |||
433 | case k_Token: | |||
434 | Tok = o.Tok; | |||
435 | break; | |||
436 | case k_Immediate: | |||
437 | Imm = o.Imm; | |||
438 | break; | |||
439 | case k_ShiftedImm: | |||
440 | ShiftedImm = o.ShiftedImm; | |||
441 | break; | |||
442 | case k_CondCode: | |||
443 | CondCode = o.CondCode; | |||
444 | break; | |||
445 | case k_FPImm: | |||
446 | FPImm = o.FPImm; | |||
447 | break; | |||
448 | case k_Barrier: | |||
449 | Barrier = o.Barrier; | |||
450 | break; | |||
451 | case k_Register: | |||
452 | Reg = o.Reg; | |||
453 | break; | |||
454 | case k_VectorList: | |||
455 | VectorList = o.VectorList; | |||
456 | break; | |||
457 | case k_VectorIndex: | |||
458 | VectorIndex = o.VectorIndex; | |||
459 | break; | |||
460 | case k_SysReg: | |||
461 | SysReg = o.SysReg; | |||
462 | break; | |||
463 | case k_SysCR: | |||
464 | SysCRImm = o.SysCRImm; | |||
465 | break; | |||
466 | case k_Prefetch: | |||
467 | Prefetch = o.Prefetch; | |||
468 | break; | |||
469 | case k_PSBHint: | |||
470 | PSBHint = o.PSBHint; | |||
471 | break; | |||
472 | case k_BTIHint: | |||
473 | BTIHint = o.BTIHint; | |||
474 | break; | |||
475 | case k_ShiftExtend: | |||
476 | ShiftExtend = o.ShiftExtend; | |||
477 | break; | |||
478 | } | |||
479 | } | |||
480 | ||||
481 | /// getStartLoc - Get the location of the first token of this operand. | |||
482 | SMLoc getStartLoc() const override { return StartLoc; } | |||
483 | /// getEndLoc - Get the location of the last token of this operand. | |||
484 | SMLoc getEndLoc() const override { return EndLoc; } | |||
485 | ||||
486 | StringRef getToken() const { | |||
487 | assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 487, __PRETTY_FUNCTION__)); | |||
488 | return StringRef(Tok.Data, Tok.Length); | |||
489 | } | |||
490 | ||||
491 | bool isTokenSuffix() const { | |||
492 | assert(Kind == k_Token && "Invalid access!")((Kind == k_Token && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 492, __PRETTY_FUNCTION__)); | |||
493 | return Tok.IsSuffix; | |||
494 | } | |||
495 | ||||
496 | const MCExpr *getImm() const { | |||
497 | assert(Kind == k_Immediate && "Invalid access!")((Kind == k_Immediate && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 497, __PRETTY_FUNCTION__)); | |||
498 | return Imm.Val; | |||
499 | } | |||
500 | ||||
501 | const MCExpr *getShiftedImmVal() const { | |||
502 | assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 502, __PRETTY_FUNCTION__)); | |||
503 | return ShiftedImm.Val; | |||
504 | } | |||
505 | ||||
506 | unsigned getShiftedImmShift() const { | |||
507 | assert(Kind == k_ShiftedImm && "Invalid access!")((Kind == k_ShiftedImm && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 507, __PRETTY_FUNCTION__)); | |||
508 | return ShiftedImm.ShiftAmount; | |||
509 | } | |||
510 | ||||
511 | AArch64CC::CondCode getCondCode() const { | |||
512 | assert(Kind == k_CondCode && "Invalid access!")((Kind == k_CondCode && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 512, __PRETTY_FUNCTION__)); | |||
513 | return CondCode.Code; | |||
514 | } | |||
515 | ||||
516 | APFloat getFPImm() const { | |||
517 | assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 517, __PRETTY_FUNCTION__)); | |||
518 | return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); | |||
519 | } | |||
520 | ||||
521 | bool getFPImmIsExact() const { | |||
522 | assert (Kind == k_FPImm && "Invalid access!")((Kind == k_FPImm && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 522, __PRETTY_FUNCTION__)); | |||
523 | return FPImm.IsExact; | |||
524 | } | |||
525 | ||||
526 | unsigned getBarrier() const { | |||
527 | assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 527, __PRETTY_FUNCTION__)); | |||
528 | return Barrier.Val; | |||
529 | } | |||
530 | ||||
531 | StringRef getBarrierName() const { | |||
532 | assert(Kind == k_Barrier && "Invalid access!")((Kind == k_Barrier && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 532, __PRETTY_FUNCTION__)); | |||
533 | return StringRef(Barrier.Data, Barrier.Length); | |||
534 | } | |||
535 | ||||
536 | unsigned getReg() const override { | |||
537 | assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 537, __PRETTY_FUNCTION__)); | |||
538 | return Reg.RegNum; | |||
539 | } | |||
540 | ||||
541 | RegConstraintEqualityTy getRegEqualityTy() const { | |||
542 | assert(Kind == k_Register && "Invalid access!")((Kind == k_Register && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 542, __PRETTY_FUNCTION__)); | |||
543 | return Reg.EqualityTy; | |||
544 | } | |||
545 | ||||
546 | unsigned getVectorListStart() const { | |||
547 | assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 547, __PRETTY_FUNCTION__)); | |||
548 | return VectorList.RegNum; | |||
549 | } | |||
550 | ||||
551 | unsigned getVectorListCount() const { | |||
552 | assert(Kind == k_VectorList && "Invalid access!")((Kind == k_VectorList && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 552, __PRETTY_FUNCTION__)); | |||
553 | return VectorList.Count; | |||
554 | } | |||
555 | ||||
556 | unsigned getVectorIndex() const { | |||
557 | assert(Kind == k_VectorIndex && "Invalid access!")((Kind == k_VectorIndex && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 557, __PRETTY_FUNCTION__)); | |||
558 | return VectorIndex.Val; | |||
559 | } | |||
560 | ||||
561 | StringRef getSysReg() const { | |||
562 | assert(Kind == k_SysReg && "Invalid access!")((Kind == k_SysReg && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 562, __PRETTY_FUNCTION__)); | |||
563 | return StringRef(SysReg.Data, SysReg.Length); | |||
564 | } | |||
565 | ||||
566 | unsigned getSysCR() const { | |||
567 | assert(Kind == k_SysCR && "Invalid access!")((Kind == k_SysCR && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 567, __PRETTY_FUNCTION__)); | |||
568 | return SysCRImm.Val; | |||
569 | } | |||
570 | ||||
571 | unsigned getPrefetch() const { | |||
572 | assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 572, __PRETTY_FUNCTION__)); | |||
573 | return Prefetch.Val; | |||
574 | } | |||
575 | ||||
576 | unsigned getPSBHint() const { | |||
577 | assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 577, __PRETTY_FUNCTION__)); | |||
578 | return PSBHint.Val; | |||
579 | } | |||
580 | ||||
581 | StringRef getPSBHintName() const { | |||
582 | assert(Kind == k_PSBHint && "Invalid access!")((Kind == k_PSBHint && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 582, __PRETTY_FUNCTION__)); | |||
583 | return StringRef(PSBHint.Data, PSBHint.Length); | |||
584 | } | |||
585 | ||||
586 | unsigned getBTIHint() const { | |||
587 | assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 587, __PRETTY_FUNCTION__)); | |||
588 | return BTIHint.Val; | |||
589 | } | |||
590 | ||||
591 | StringRef getBTIHintName() const { | |||
592 | assert(Kind == k_BTIHint && "Invalid access!")((Kind == k_BTIHint && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 592, __PRETTY_FUNCTION__)); | |||
593 | return StringRef(BTIHint.Data, BTIHint.Length); | |||
594 | } | |||
595 | ||||
596 | StringRef getPrefetchName() const { | |||
597 | assert(Kind == k_Prefetch && "Invalid access!")((Kind == k_Prefetch && "Invalid access!") ? static_cast <void> (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 597, __PRETTY_FUNCTION__)); | |||
598 | return StringRef(Prefetch.Data, Prefetch.Length); | |||
599 | } | |||
600 | ||||
601 | AArch64_AM::ShiftExtendType getShiftExtendType() const { | |||
602 | if (Kind == k_ShiftExtend) | |||
603 | return ShiftExtend.Type; | |||
604 | if (Kind == k_Register) | |||
605 | return Reg.ShiftExtend.Type; | |||
606 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 606); | |||
607 | } | |||
608 | ||||
609 | unsigned getShiftExtendAmount() const { | |||
610 | if (Kind == k_ShiftExtend) | |||
611 | return ShiftExtend.Amount; | |||
612 | if (Kind == k_Register) | |||
613 | return Reg.ShiftExtend.Amount; | |||
614 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 614); | |||
615 | } | |||
616 | ||||
617 | bool hasShiftExtendAmount() const { | |||
618 | if (Kind == k_ShiftExtend) | |||
619 | return ShiftExtend.HasExplicitAmount; | |||
620 | if (Kind == k_Register) | |||
621 | return Reg.ShiftExtend.HasExplicitAmount; | |||
622 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 622); | |||
623 | } | |||
624 | ||||
625 | bool isImm() const override { return Kind == k_Immediate; } | |||
626 | bool isMem() const override { return false; } | |||
627 | ||||
628 | bool isUImm6() const { | |||
629 | if (!isImm()) | |||
630 | return false; | |||
631 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
632 | if (!MCE) | |||
633 | return false; | |||
634 | int64_t Val = MCE->getValue(); | |||
635 | return (Val >= 0 && Val < 64); | |||
636 | } | |||
637 | ||||
638 | template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } | |||
639 | ||||
640 | template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { | |||
641 | return isImmScaled<Bits, Scale>(true); | |||
642 | } | |||
643 | ||||
644 | template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const { | |||
645 | return isImmScaled<Bits, Scale>(false); | |||
646 | } | |||
647 | ||||
648 | template <int Bits, int Scale> | |||
649 | DiagnosticPredicate isImmScaled(bool Signed) const { | |||
650 | if (!isImm()) | |||
651 | return DiagnosticPredicateTy::NoMatch; | |||
652 | ||||
653 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
654 | if (!MCE) | |||
655 | return DiagnosticPredicateTy::NoMatch; | |||
656 | ||||
657 | int64_t MinVal, MaxVal; | |||
658 | if (Signed) { | |||
659 | int64_t Shift = Bits - 1; | |||
660 | MinVal = (int64_t(1) << Shift) * -Scale; | |||
661 | MaxVal = ((int64_t(1) << Shift) - 1) * Scale; | |||
662 | } else { | |||
663 | MinVal = 0; | |||
664 | MaxVal = ((int64_t(1) << Bits) - 1) * Scale; | |||
665 | } | |||
666 | ||||
667 | int64_t Val = MCE->getValue(); | |||
668 | if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) | |||
669 | return DiagnosticPredicateTy::Match; | |||
670 | ||||
671 | return DiagnosticPredicateTy::NearMatch; | |||
672 | } | |||
673 | ||||
674 | DiagnosticPredicate isSVEPattern() const { | |||
675 | if (!isImm()) | |||
676 | return DiagnosticPredicateTy::NoMatch; | |||
677 | auto *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
678 | if (!MCE) | |||
679 | return DiagnosticPredicateTy::NoMatch; | |||
680 | int64_t Val = MCE->getValue(); | |||
681 | if (Val >= 0 && Val < 32) | |||
682 | return DiagnosticPredicateTy::Match; | |||
683 | return DiagnosticPredicateTy::NearMatch; | |||
684 | } | |||
685 | ||||
686 | bool isSymbolicUImm12Offset(const MCExpr *Expr) const { | |||
687 | AArch64MCExpr::VariantKind ELFRefKind; | |||
688 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
689 | int64_t Addend; | |||
690 | if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, | |||
691 | Addend)) { | |||
692 | // If we don't understand the expression, assume the best and | |||
693 | // let the fixup and relocation code deal with it. | |||
694 | return true; | |||
695 | } | |||
696 | ||||
697 | if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || | |||
698 | ELFRefKind == AArch64MCExpr::VK_LO12 || | |||
699 | ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || | |||
700 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || | |||
701 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || | |||
702 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || | |||
703 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || | |||
704 | ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || | |||
705 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || | |||
706 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || | |||
707 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) { | |||
708 | // Note that we don't range-check the addend. It's adjusted modulo page | |||
709 | // size when converted, so there is no "out of range" condition when using | |||
710 | // @pageoff. | |||
711 | return true; | |||
712 | } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || | |||
713 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { | |||
714 | // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. | |||
715 | return Addend == 0; | |||
716 | } | |||
717 | ||||
718 | return false; | |||
719 | } | |||
720 | ||||
721 | template <int Scale> bool isUImm12Offset() const { | |||
722 | if (!isImm()) | |||
723 | return false; | |||
724 | ||||
725 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
726 | if (!MCE) | |||
727 | return isSymbolicUImm12Offset(getImm()); | |||
728 | ||||
729 | int64_t Val = MCE->getValue(); | |||
730 | return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; | |||
731 | } | |||
732 | ||||
733 | template <int N, int M> | |||
734 | bool isImmInRange() const { | |||
735 | if (!isImm()) | |||
736 | return false; | |||
737 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
738 | if (!MCE) | |||
739 | return false; | |||
740 | int64_t Val = MCE->getValue(); | |||
741 | return (Val >= N && Val <= M); | |||
742 | } | |||
743 | ||||
744 | // NOTE: Also used for isLogicalImmNot as anything that can be represented as | |||
745 | // a logical immediate can always be represented when inverted. | |||
746 | template <typename T> | |||
747 | bool isLogicalImm() const { | |||
748 | if (!isImm()) | |||
749 | return false; | |||
750 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
751 | if (!MCE) | |||
752 | return false; | |||
753 | ||||
754 | int64_t Val = MCE->getValue(); | |||
755 | int64_t SVal = typename std::make_signed<T>::type(Val); | |||
756 | int64_t UVal = typename std::make_unsigned<T>::type(Val); | |||
757 | if (Val != SVal && Val != UVal) | |||
758 | return false; | |||
759 | ||||
760 | return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8); | |||
761 | } | |||
762 | ||||
763 | bool isShiftedImm() const { return Kind == k_ShiftedImm; } | |||
764 | ||||
765 | /// Returns the immediate value as a pair of (imm, shift) if the immediate is | |||
766 | /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted | |||
767 | /// immediate that can be shifted by 'Shift'. | |||
768 | template <unsigned Width> | |||
769 | Optional<std::pair<int64_t, unsigned> > getShiftedVal() const { | |||
770 | if (isShiftedImm() && Width == getShiftedImmShift()) | |||
771 | if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal())) | |||
772 | return std::make_pair(CE->getValue(), Width); | |||
773 | ||||
774 | if (isImm()) | |||
775 | if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) { | |||
776 | int64_t Val = CE->getValue(); | |||
777 | if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) | |||
778 | return std::make_pair(Val >> Width, Width); | |||
779 | else | |||
780 | return std::make_pair(Val, 0u); | |||
781 | } | |||
782 | ||||
783 | return {}; | |||
784 | } | |||
785 | ||||
786 | bool isAddSubImm() const { | |||
787 | if (!isShiftedImm() && !isImm()) | |||
788 | return false; | |||
789 | ||||
790 | const MCExpr *Expr; | |||
791 | ||||
792 | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. | |||
793 | if (isShiftedImm()) { | |||
794 | unsigned Shift = ShiftedImm.ShiftAmount; | |||
795 | Expr = ShiftedImm.Val; | |||
796 | if (Shift != 0 && Shift != 12) | |||
797 | return false; | |||
798 | } else { | |||
799 | Expr = getImm(); | |||
800 | } | |||
801 | ||||
802 | AArch64MCExpr::VariantKind ELFRefKind; | |||
803 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
804 | int64_t Addend; | |||
805 | if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, | |||
806 | DarwinRefKind, Addend)) { | |||
807 | return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF | |||
808 | || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF | |||
809 | || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) | |||
810 | || ELFRefKind == AArch64MCExpr::VK_LO12 | |||
811 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 | |||
812 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 | |||
813 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC | |||
814 | || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 | |||
815 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 | |||
816 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC | |||
817 | || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 | |||
818 | || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 | |||
819 | || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; | |||
820 | } | |||
821 | ||||
822 | // If it's a constant, it should be a real immediate in range. | |||
823 | if (auto ShiftedVal = getShiftedVal<12>()) | |||
824 | return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; | |||
825 | ||||
826 | // If it's an expression, we hope for the best and let the fixup/relocation | |||
827 | // code deal with it. | |||
828 | return true; | |||
829 | } | |||
830 | ||||
831 | bool isAddSubImmNeg() const { | |||
832 | if (!isShiftedImm() && !isImm()) | |||
833 | return false; | |||
834 | ||||
835 | // Otherwise it should be a real negative immediate in range. | |||
836 | if (auto ShiftedVal = getShiftedVal<12>()) | |||
837 | return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; | |||
838 | ||||
839 | return false; | |||
840 | } | |||
841 | ||||
842 | // Signed value in the range -128 to +127. For element widths of | |||
843 | // 16 bits or higher it may also be a signed multiple of 256 in the | |||
844 | // range -32768 to +32512. | |||
845 | // For element-width of 8 bits a range of -128 to 255 is accepted, | |||
846 | // since a copy of a byte can be either signed/unsigned. | |||
847 | template <typename T> | |||
848 | DiagnosticPredicate isSVECpyImm() const { | |||
849 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) | |||
850 | return DiagnosticPredicateTy::NoMatch; | |||
851 | ||||
852 | bool IsByte = | |||
853 | std::is_same<int8_t, typename std::make_signed<T>::type>::value; | |||
854 | if (auto ShiftedImm = getShiftedVal<8>()) | |||
855 | if (!(IsByte && ShiftedImm->second) && | |||
856 | AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) | |||
857 | << ShiftedImm->second)) | |||
858 | return DiagnosticPredicateTy::Match; | |||
859 | ||||
860 | return DiagnosticPredicateTy::NearMatch; | |||
861 | } | |||
862 | ||||
863 | // Unsigned value in the range 0 to 255. For element widths of | |||
864 | // 16 bits or higher it may also be a signed multiple of 256 in the | |||
865 | // range 0 to 65280. | |||
866 | template <typename T> DiagnosticPredicate isSVEAddSubImm() const { | |||
867 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) | |||
868 | return DiagnosticPredicateTy::NoMatch; | |||
869 | ||||
870 | bool IsByte = | |||
871 | std::is_same<int8_t, typename std::make_signed<T>::type>::value; | |||
872 | if (auto ShiftedImm = getShiftedVal<8>()) | |||
873 | if (!(IsByte && ShiftedImm->second) && | |||
874 | AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first | |||
875 | << ShiftedImm->second)) | |||
876 | return DiagnosticPredicateTy::Match; | |||
877 | ||||
878 | return DiagnosticPredicateTy::NearMatch; | |||
879 | } | |||
880 | ||||
881 | template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { | |||
882 | if (isLogicalImm<T>() && !isSVECpyImm<T>()) | |||
883 | return DiagnosticPredicateTy::Match; | |||
884 | return DiagnosticPredicateTy::NoMatch; | |||
885 | } | |||
886 | ||||
887 | bool isCondCode() const { return Kind == k_CondCode; } | |||
888 | ||||
889 | bool isSIMDImmType10() const { | |||
890 | if (!isImm()) | |||
891 | return false; | |||
892 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
893 | if (!MCE) | |||
894 | return false; | |||
895 | return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); | |||
896 | } | |||
897 | ||||
898 | template<int N> | |||
899 | bool isBranchTarget() const { | |||
900 | if (!isImm()) | |||
901 | return false; | |||
902 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
903 | if (!MCE) | |||
904 | return true; | |||
905 | int64_t Val = MCE->getValue(); | |||
906 | if (Val & 0x3) | |||
907 | return false; | |||
908 | assert(N > 0 && "Branch target immediate cannot be 0 bits!")((N > 0 && "Branch target immediate cannot be 0 bits!" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 908, __PRETTY_FUNCTION__)); | |||
909 | return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2)); | |||
910 | } | |||
911 | ||||
912 | bool | |||
913 | isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { | |||
914 | if (!isImm()) | |||
915 | return false; | |||
916 | ||||
917 | AArch64MCExpr::VariantKind ELFRefKind; | |||
918 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
919 | int64_t Addend; | |||
920 | if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind, | |||
921 | DarwinRefKind, Addend)) { | |||
922 | return false; | |||
923 | } | |||
924 | if (DarwinRefKind != MCSymbolRefExpr::VK_None) | |||
925 | return false; | |||
926 | ||||
927 | for (unsigned i = 0; i != AllowedModifiers.size(); ++i) { | |||
928 | if (ELFRefKind == AllowedModifiers[i]) | |||
929 | return true; | |||
930 | } | |||
931 | ||||
932 | return false; | |||
933 | } | |||
934 | ||||
935 | bool isMovZSymbolG3() const { | |||
936 | return isMovWSymbol(AArch64MCExpr::VK_ABS_G3); | |||
937 | } | |||
938 | ||||
939 | bool isMovZSymbolG2() const { | |||
940 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, | |||
941 | AArch64MCExpr::VK_TPREL_G2, | |||
942 | AArch64MCExpr::VK_DTPREL_G2}); | |||
943 | } | |||
944 | ||||
945 | bool isMovZSymbolG1() const { | |||
946 | return isMovWSymbol({ | |||
947 | AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, | |||
948 | AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1, | |||
949 | AArch64MCExpr::VK_DTPREL_G1, | |||
950 | }); | |||
951 | } | |||
952 | ||||
953 | bool isMovZSymbolG0() const { | |||
954 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, | |||
955 | AArch64MCExpr::VK_TPREL_G0, | |||
956 | AArch64MCExpr::VK_DTPREL_G0}); | |||
957 | } | |||
958 | ||||
959 | bool isMovKSymbolG3() const { | |||
960 | return isMovWSymbol(AArch64MCExpr::VK_ABS_G3); | |||
961 | } | |||
962 | ||||
963 | bool isMovKSymbolG2() const { | |||
964 | return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC); | |||
965 | } | |||
966 | ||||
967 | bool isMovKSymbolG1() const { | |||
968 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC, | |||
969 | AArch64MCExpr::VK_TPREL_G1_NC, | |||
970 | AArch64MCExpr::VK_DTPREL_G1_NC}); | |||
971 | } | |||
972 | ||||
973 | bool isMovKSymbolG0() const { | |||
974 | return isMovWSymbol( | |||
975 | {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, | |||
976 | AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC}); | |||
977 | } | |||
978 | ||||
979 | template<int RegWidth, int Shift> | |||
980 | bool isMOVZMovAlias() const { | |||
981 | if (!isImm()) return false; | |||
982 | ||||
983 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
984 | if (!CE) return false; | |||
985 | uint64_t Value = CE->getValue(); | |||
986 | ||||
987 | return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); | |||
988 | } | |||
989 | ||||
990 | template<int RegWidth, int Shift> | |||
991 | bool isMOVNMovAlias() const { | |||
992 | if (!isImm()) return false; | |||
993 | ||||
994 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
995 | if (!CE) return false; | |||
996 | uint64_t Value = CE->getValue(); | |||
997 | ||||
998 | return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); | |||
999 | } | |||
1000 | ||||
1001 | bool isFPImm() const { | |||
1002 | return Kind == k_FPImm && | |||
1003 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1; | |||
1004 | } | |||
1005 | ||||
1006 | bool isBarrier() const { return Kind == k_Barrier; } | |||
1007 | bool isSysReg() const { return Kind == k_SysReg; } | |||
1008 | ||||
1009 | bool isMRSSystemRegister() const { | |||
1010 | if (!isSysReg()) return false; | |||
1011 | ||||
1012 | return SysReg.MRSReg != -1U; | |||
1013 | } | |||
1014 | ||||
1015 | bool isMSRSystemRegister() const { | |||
1016 | if (!isSysReg()) return false; | |||
1017 | return SysReg.MSRReg != -1U; | |||
1018 | } | |||
1019 | ||||
1020 | bool isSystemPStateFieldWithImm0_1() const { | |||
1021 | if (!isSysReg()) return false; | |||
1022 | return (SysReg.PStateField == AArch64PState::PAN || | |||
1023 | SysReg.PStateField == AArch64PState::DIT || | |||
1024 | SysReg.PStateField == AArch64PState::UAO || | |||
1025 | SysReg.PStateField == AArch64PState::SSBS); | |||
1026 | } | |||
1027 | ||||
1028 | bool isSystemPStateFieldWithImm0_15() const { | |||
1029 | if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false; | |||
1030 | return SysReg.PStateField != -1U; | |||
1031 | } | |||
1032 | ||||
1033 | bool isReg() const override { | |||
1034 | return Kind == k_Register; | |||
1035 | } | |||
1036 | ||||
1037 | bool isScalarReg() const { | |||
1038 | return Kind == k_Register && Reg.Kind == RegKind::Scalar; | |||
1039 | } | |||
1040 | ||||
1041 | bool isNeonVectorReg() const { | |||
1042 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector; | |||
1043 | } | |||
1044 | ||||
1045 | bool isNeonVectorRegLo() const { | |||
1046 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector && | |||
1047 | AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( | |||
1048 | Reg.RegNum); | |||
1049 | } | |||
1050 | ||||
1051 | template <unsigned Class> bool isSVEVectorReg() const { | |||
1052 | RegKind RK; | |||
1053 | switch (Class) { | |||
1054 | case AArch64::ZPRRegClassID: | |||
1055 | case AArch64::ZPR_3bRegClassID: | |||
1056 | case AArch64::ZPR_4bRegClassID: | |||
1057 | RK = RegKind::SVEDataVector; | |||
1058 | break; | |||
1059 | case AArch64::PPRRegClassID: | |||
1060 | case AArch64::PPR_3bRegClassID: | |||
1061 | RK = RegKind::SVEPredicateVector; | |||
1062 | break; | |||
1063 | default: | |||
1064 | llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1064); | |||
1065 | } | |||
1066 | ||||
1067 | return (Kind == k_Register && Reg.Kind == RK) && | |||
1068 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
1069 | } | |||
1070 | ||||
1071 | template <unsigned Class> bool isFPRasZPR() const { | |||
1072 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1073 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
1074 | } | |||
1075 | ||||
1076 | template <int ElementWidth, unsigned Class> | |||
1077 | DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { | |||
1078 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) | |||
1079 | return DiagnosticPredicateTy::NoMatch; | |||
1080 | ||||
1081 | if (isSVEVectorReg<Class>() && | |||
1082 | (ElementWidth == 0 || Reg.ElementWidth == ElementWidth)) | |||
1083 | return DiagnosticPredicateTy::Match; | |||
1084 | ||||
1085 | return DiagnosticPredicateTy::NearMatch; | |||
1086 | } | |||
1087 | ||||
1088 | template <int ElementWidth, unsigned Class> | |||
1089 | DiagnosticPredicate isSVEDataVectorRegOfWidth() const { | |||
1090 | if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) | |||
1091 | return DiagnosticPredicateTy::NoMatch; | |||
1092 | ||||
1093 | if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth) | |||
1094 | return DiagnosticPredicateTy::Match; | |||
1095 | ||||
1096 | return DiagnosticPredicateTy::NearMatch; | |||
1097 | } | |||
1098 | ||||
1099 | template <int ElementWidth, unsigned Class, | |||
1100 | AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, | |||
1101 | bool ShiftWidthAlwaysSame> | |||
1102 | DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { | |||
1103 | auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); | |||
1104 | if (!VectorMatch.isMatch()) | |||
1105 | return DiagnosticPredicateTy::NoMatch; | |||
1106 | ||||
1107 | // Give a more specific diagnostic when the user has explicitly typed in | |||
1108 | // a shift-amount that does not match what is expected, but for which | |||
1109 | // there is also an unscaled addressing mode (e.g. sxtw/uxtw). | |||
1110 | bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); | |||
1111 | if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || | |||
1112 | ShiftExtendTy == AArch64_AM::SXTW) && | |||
1113 | !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) | |||
1114 | return DiagnosticPredicateTy::NoMatch; | |||
1115 | ||||
1116 | if (MatchShift && ShiftExtendTy == getShiftExtendType()) | |||
1117 | return DiagnosticPredicateTy::Match; | |||
1118 | ||||
1119 | return DiagnosticPredicateTy::NearMatch; | |||
1120 | } | |||
1121 | ||||
1122 | bool isGPR32as64() const { | |||
1123 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1124 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); | |||
1125 | } | |||
1126 | ||||
1127 | bool isGPR64as32() const { | |||
1128 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1129 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); | |||
1130 | } | |||
1131 | ||||
1132 | bool isWSeqPair() const { | |||
1133 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1134 | AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( | |||
1135 | Reg.RegNum); | |||
1136 | } | |||
1137 | ||||
1138 | bool isXSeqPair() const { | |||
1139 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1140 | AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( | |||
1141 | Reg.RegNum); | |||
1142 | } | |||
1143 | ||||
1144 | template<int64_t Angle, int64_t Remainder> | |||
1145 | DiagnosticPredicate isComplexRotation() const { | |||
1146 | if (!isImm()) return DiagnosticPredicateTy::NoMatch; | |||
1147 | ||||
1148 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
1149 | if (!CE) return DiagnosticPredicateTy::NoMatch; | |||
1150 | uint64_t Value = CE->getValue(); | |||
1151 | ||||
1152 | if (Value % Angle == Remainder && Value <= 270) | |||
1153 | return DiagnosticPredicateTy::Match; | |||
1154 | return DiagnosticPredicateTy::NearMatch; | |||
1155 | } | |||
1156 | ||||
1157 | template <unsigned RegClassID> bool isGPR64() const { | |||
1158 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
1159 | AArch64MCRegisterClasses[RegClassID].contains(getReg()); | |||
1160 | } | |||
1161 | ||||
1162 | template <unsigned RegClassID, int ExtWidth> | |||
1163 | DiagnosticPredicate isGPR64WithShiftExtend() const { | |||
1164 | if (Kind != k_Register || Reg.Kind != RegKind::Scalar) | |||
1165 | return DiagnosticPredicateTy::NoMatch; | |||
1166 | ||||
1167 | if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && | |||
1168 | getShiftExtendAmount() == Log2_32(ExtWidth / 8)) | |||
1169 | return DiagnosticPredicateTy::Match; | |||
1170 | return DiagnosticPredicateTy::NearMatch; | |||
1171 | } | |||
1172 | ||||
1173 | /// Is this a vector list with the type implicit (presumably attached to the | |||
1174 | /// instruction itself)? | |||
1175 | template <RegKind VectorKind, unsigned NumRegs> | |||
1176 | bool isImplicitlyTypedVectorList() const { | |||
1177 | return Kind == k_VectorList && VectorList.Count == NumRegs && | |||
1178 | VectorList.NumElements == 0 && | |||
1179 | VectorList.RegisterKind == VectorKind; | |||
1180 | } | |||
1181 | ||||
1182 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, | |||
1183 | unsigned ElementWidth> | |||
1184 | bool isTypedVectorList() const { | |||
1185 | if (Kind != k_VectorList) | |||
1186 | return false; | |||
1187 | if (VectorList.Count != NumRegs) | |||
1188 | return false; | |||
1189 | if (VectorList.RegisterKind != VectorKind) | |||
1190 | return false; | |||
1191 | if (VectorList.ElementWidth != ElementWidth) | |||
1192 | return false; | |||
1193 | return VectorList.NumElements == NumElements; | |||
1194 | } | |||
1195 | ||||
1196 | template <int Min, int Max> | |||
1197 | DiagnosticPredicate isVectorIndex() const { | |||
1198 | if (Kind != k_VectorIndex) | |||
1199 | return DiagnosticPredicateTy::NoMatch; | |||
1200 | if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) | |||
1201 | return DiagnosticPredicateTy::Match; | |||
1202 | return DiagnosticPredicateTy::NearMatch; | |||
1203 | } | |||
1204 | ||||
1205 | bool isToken() const override { return Kind == k_Token; } | |||
1206 | ||||
1207 | bool isTokenEqual(StringRef Str) const { | |||
1208 | return Kind == k_Token && getToken() == Str; | |||
1209 | } | |||
1210 | bool isSysCR() const { return Kind == k_SysCR; } | |||
1211 | bool isPrefetch() const { return Kind == k_Prefetch; } | |||
1212 | bool isPSBHint() const { return Kind == k_PSBHint; } | |||
1213 | bool isBTIHint() const { return Kind == k_BTIHint; } | |||
1214 | bool isShiftExtend() const { return Kind == k_ShiftExtend; } | |||
1215 | bool isShifter() const { | |||
1216 | if (!isShiftExtend()) | |||
1217 | return false; | |||
1218 | ||||
1219 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1220 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
1221 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || | |||
1222 | ST == AArch64_AM::MSL); | |||
1223 | } | |||
1224 | ||||
1225 | template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { | |||
1226 | if (Kind != k_FPImm) | |||
1227 | return DiagnosticPredicateTy::NoMatch; | |||
1228 | ||||
1229 | if (getFPImmIsExact()) { | |||
1230 | // Lookup the immediate from table of supported immediates. | |||
1231 | auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum); | |||
1232 | assert(Desc && "Unknown enum value")((Desc && "Unknown enum value") ? static_cast<void > (0) : __assert_fail ("Desc && \"Unknown enum value\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1232, __PRETTY_FUNCTION__)); | |||
1233 | ||||
1234 | // Calculate its FP value. | |||
1235 | APFloat RealVal(APFloat::IEEEdouble()); | |||
1236 | if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) != | |||
1237 | APFloat::opOK) | |||
1238 | llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1238); | |||
1239 | ||||
1240 | if (getFPImm().bitwiseIsEqual(RealVal)) | |||
1241 | return DiagnosticPredicateTy::Match; | |||
1242 | } | |||
1243 | ||||
1244 | return DiagnosticPredicateTy::NearMatch; | |||
1245 | } | |||
1246 | ||||
1247 | template <unsigned ImmA, unsigned ImmB> | |||
1248 | DiagnosticPredicate isExactFPImm() const { | |||
1249 | DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; | |||
1250 | if ((Res = isExactFPImm<ImmA>())) | |||
1251 | return DiagnosticPredicateTy::Match; | |||
1252 | if ((Res = isExactFPImm<ImmB>())) | |||
1253 | return DiagnosticPredicateTy::Match; | |||
1254 | return Res; | |||
1255 | } | |||
1256 | ||||
1257 | bool isExtend() const { | |||
1258 | if (!isShiftExtend()) | |||
1259 | return false; | |||
1260 | ||||
1261 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1262 | return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || | |||
1263 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || | |||
1264 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || | |||
1265 | ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || | |||
1266 | ET == AArch64_AM::LSL) && | |||
1267 | getShiftExtendAmount() <= 4; | |||
1268 | } | |||
1269 | ||||
1270 | bool isExtend64() const { | |||
1271 | if (!isExtend()) | |||
1272 | return false; | |||
1273 | // Make sure the extend expects a 32-bit source register. | |||
1274 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1275 | return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || | |||
1276 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || | |||
1277 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; | |||
1278 | } | |||
1279 | ||||
1280 | bool isExtendLSL64() const { | |||
1281 | if (!isExtend()) | |||
1282 | return false; | |||
1283 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1284 | return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || | |||
1285 | ET == AArch64_AM::LSL) && | |||
1286 | getShiftExtendAmount() <= 4; | |||
1287 | } | |||
1288 | ||||
1289 | template<int Width> bool isMemXExtend() const { | |||
1290 | if (!isExtend()) | |||
1291 | return false; | |||
1292 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1293 | return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && | |||
1294 | (getShiftExtendAmount() == Log2_32(Width / 8) || | |||
1295 | getShiftExtendAmount() == 0); | |||
1296 | } | |||
1297 | ||||
1298 | template<int Width> bool isMemWExtend() const { | |||
1299 | if (!isExtend()) | |||
1300 | return false; | |||
1301 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1302 | return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && | |||
1303 | (getShiftExtendAmount() == Log2_32(Width / 8) || | |||
1304 | getShiftExtendAmount() == 0); | |||
1305 | } | |||
1306 | ||||
1307 | template <unsigned width> | |||
1308 | bool isArithmeticShifter() const { | |||
1309 | if (!isShifter()) | |||
1310 | return false; | |||
1311 | ||||
1312 | // An arithmetic shifter is LSL, LSR, or ASR. | |||
1313 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1314 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
1315 | ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; | |||
1316 | } | |||
1317 | ||||
1318 | template <unsigned width> | |||
1319 | bool isLogicalShifter() const { | |||
1320 | if (!isShifter()) | |||
1321 | return false; | |||
1322 | ||||
1323 | // A logical shifter is LSL, LSR, ASR or ROR. | |||
1324 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1325 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
1326 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && | |||
1327 | getShiftExtendAmount() < width; | |||
1328 | } | |||
1329 | ||||
1330 | bool isMovImm32Shifter() const { | |||
1331 | if (!isShifter()) | |||
1332 | return false; | |||
1333 | ||||
1334 | // A MOVi shifter is LSL of 0, 16, 32, or 48. | |||
1335 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1336 | if (ST != AArch64_AM::LSL) | |||
1337 | return false; | |||
1338 | uint64_t Val = getShiftExtendAmount(); | |||
1339 | return (Val == 0 || Val == 16); | |||
1340 | } | |||
1341 | ||||
1342 | bool isMovImm64Shifter() const { | |||
1343 | if (!isShifter()) | |||
1344 | return false; | |||
1345 | ||||
1346 | // A MOVi shifter is LSL of 0 or 16. | |||
1347 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
1348 | if (ST != AArch64_AM::LSL) | |||
1349 | return false; | |||
1350 | uint64_t Val = getShiftExtendAmount(); | |||
1351 | return (Val == 0 || Val == 16 || Val == 32 || Val == 48); | |||
1352 | } | |||
1353 | ||||
1354 | bool isLogicalVecShifter() const { | |||
1355 | if (!isShifter()) | |||
1356 | return false; | |||
1357 | ||||
1358 | // A logical vector shifter is a left shift by 0, 8, 16, or 24. | |||
1359 | unsigned Shift = getShiftExtendAmount(); | |||
1360 | return getShiftExtendType() == AArch64_AM::LSL && | |||
1361 | (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); | |||
1362 | } | |||
1363 | ||||
1364 | bool isLogicalVecHalfWordShifter() const { | |||
1365 | if (!isLogicalVecShifter()) | |||
1366 | return false; | |||
1367 | ||||
1368 | // A logical vector shifter is a left shift by 0 or 8. | |||
1369 | unsigned Shift = getShiftExtendAmount(); | |||
1370 | return getShiftExtendType() == AArch64_AM::LSL && | |||
1371 | (Shift == 0 || Shift == 8); | |||
1372 | } | |||
1373 | ||||
1374 | bool isMoveVecShifter() const { | |||
1375 | if (!isShiftExtend()) | |||
1376 | return false; | |||
1377 | ||||
1378 | // A logical vector shifter is a left shift by 8 or 16. | |||
1379 | unsigned Shift = getShiftExtendAmount(); | |||
1380 | return getShiftExtendType() == AArch64_AM::MSL && | |||
1381 | (Shift == 8 || Shift == 16); | |||
1382 | } | |||
1383 | ||||
1384 | // Fallback unscaled operands are for aliases of LDR/STR that fall back | |||
1385 | // to LDUR/STUR when the offset is not legal for the former but is for | |||
1386 | // the latter. As such, in addition to checking for being a legal unscaled | |||
1387 | // address, also check that it is not a legal scaled address. This avoids | |||
1388 | // ambiguity in the matcher. | |||
1389 | template<int Width> | |||
1390 | bool isSImm9OffsetFB() const { | |||
1391 | return isSImm<9>() && !isUImm12Offset<Width / 8>(); | |||
1392 | } | |||
1393 | ||||
1394 | bool isAdrpLabel() const { | |||
1395 | // Validation was handled during parsing, so we just sanity check that | |||
1396 | // something didn't go haywire. | |||
1397 | if (!isImm()) | |||
1398 | return false; | |||
1399 | ||||
1400 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1401 | int64_t Val = CE->getValue(); | |||
1402 | int64_t Min = - (4096 * (1LL << (21 - 1))); | |||
1403 | int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); | |||
1404 | return (Val % 4096) == 0 && Val >= Min && Val <= Max; | |||
1405 | } | |||
1406 | ||||
1407 | return true; | |||
1408 | } | |||
1409 | ||||
1410 | bool isAdrLabel() const { | |||
1411 | // Validation was handled during parsing, so we just sanity check that | |||
1412 | // something didn't go haywire. | |||
1413 | if (!isImm()) | |||
1414 | return false; | |||
1415 | ||||
1416 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
1417 | int64_t Val = CE->getValue(); | |||
1418 | int64_t Min = - (1LL << (21 - 1)); | |||
1419 | int64_t Max = ((1LL << (21 - 1)) - 1); | |||
1420 | return Val >= Min && Val <= Max; | |||
1421 | } | |||
1422 | ||||
1423 | return true; | |||
1424 | } | |||
1425 | ||||
1426 | void addExpr(MCInst &Inst, const MCExpr *Expr) const { | |||
1427 | // Add as immediates when possible. Null MCExpr = 0. | |||
1428 | if (!Expr) | |||
1429 | Inst.addOperand(MCOperand::createImm(0)); | |||
1430 | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) | |||
1431 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
1432 | else | |||
1433 | Inst.addOperand(MCOperand::createExpr(Expr)); | |||
1434 | } | |||
1435 | ||||
1436 | void addRegOperands(MCInst &Inst, unsigned N) const { | |||
1437 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1437, __PRETTY_FUNCTION__)); | |||
1438 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
1439 | } | |||
1440 | ||||
1441 | void addGPR32as64Operands(MCInst &Inst, unsigned N) const { | |||
1442 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1442, __PRETTY_FUNCTION__)); | |||
1443 | assert(((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1444, __PRETTY_FUNCTION__)) | |||
1444 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1444, __PRETTY_FUNCTION__)); | |||
1445 | ||||
1446 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
1447 | uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister( | |||
1448 | RI->getEncodingValue(getReg())); | |||
1449 | ||||
1450 | Inst.addOperand(MCOperand::createReg(Reg)); | |||
1451 | } | |||
1452 | ||||
1453 | void addGPR64as32Operands(MCInst &Inst, unsigned N) const { | |||
1454 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1454, __PRETTY_FUNCTION__)); | |||
1455 | assert(((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1456, __PRETTY_FUNCTION__)) | |||
1456 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1456, __PRETTY_FUNCTION__)); | |||
1457 | ||||
1458 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
1459 | uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( | |||
1460 | RI->getEncodingValue(getReg())); | |||
1461 | ||||
1462 | Inst.addOperand(MCOperand::createReg(Reg)); | |||
1463 | } | |||
1464 | ||||
1465 | template <int Width> | |||
1466 | void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { | |||
1467 | unsigned Base; | |||
1468 | switch (Width) { | |||
1469 | case 8: Base = AArch64::B0; break; | |||
1470 | case 16: Base = AArch64::H0; break; | |||
1471 | case 32: Base = AArch64::S0; break; | |||
1472 | case 64: Base = AArch64::D0; break; | |||
1473 | case 128: Base = AArch64::Q0; break; | |||
1474 | default: | |||
1475 | llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1475); | |||
1476 | } | |||
1477 | Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base)); | |||
1478 | } | |||
1479 | ||||
1480 | void addVectorReg64Operands(MCInst &Inst, unsigned N) const { | |||
1481 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1481, __PRETTY_FUNCTION__)); | |||
1482 | assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1483, __PRETTY_FUNCTION__)) | |||
1483 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1483, __PRETTY_FUNCTION__)); | |||
1484 | Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0)); | |||
1485 | } | |||
1486 | ||||
1487 | void addVectorReg128Operands(MCInst &Inst, unsigned N) const { | |||
1488 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1488, __PRETTY_FUNCTION__)); | |||
1489 | assert(((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1490, __PRETTY_FUNCTION__)) | |||
1490 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))((AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains (getReg())) ? static_cast<void> (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1490, __PRETTY_FUNCTION__)); | |||
1491 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
1492 | } | |||
1493 | ||||
1494 | void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { | |||
1495 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1495, __PRETTY_FUNCTION__)); | |||
1496 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
1497 | } | |||
1498 | ||||
1499 | enum VecListIndexType { | |||
1500 | VecListIdx_DReg = 0, | |||
1501 | VecListIdx_QReg = 1, | |||
1502 | VecListIdx_ZReg = 2, | |||
1503 | }; | |||
1504 | ||||
1505 | template <VecListIndexType RegTy, unsigned NumRegs> | |||
1506 | void addVectorListOperands(MCInst &Inst, unsigned N) const { | |||
1507 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1507, __PRETTY_FUNCTION__)); | |||
1508 | static const unsigned FirstRegs[][5] = { | |||
1509 | /* DReg */ { AArch64::Q0, | |||
1510 | AArch64::D0, AArch64::D0_D1, | |||
1511 | AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, | |||
1512 | /* QReg */ { AArch64::Q0, | |||
1513 | AArch64::Q0, AArch64::Q0_Q1, | |||
1514 | AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, | |||
1515 | /* ZReg */ { AArch64::Z0, | |||
1516 | AArch64::Z0, AArch64::Z0_Z1, | |||
1517 | AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 } | |||
1518 | }; | |||
1519 | ||||
1520 | assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs" ) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1521, __PRETTY_FUNCTION__)) | |||
1521 | " NumRegs must be <= 4 for ZRegs")(((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs" ) ? static_cast<void> (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1521, __PRETTY_FUNCTION__)); | |||
1522 | ||||
1523 | unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; | |||
1524 | Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() - | |||
1525 | FirstRegs[(unsigned)RegTy][0])); | |||
1526 | } | |||
1527 | ||||
1528 | void addVectorIndexOperands(MCInst &Inst, unsigned N) const { | |||
1529 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1529, __PRETTY_FUNCTION__)); | |||
1530 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
1531 | } | |||
1532 | ||||
1533 | template <unsigned ImmIs0, unsigned ImmIs1> | |||
1534 | void addExactFPImmOperands(MCInst &Inst, unsigned N) const { | |||
1535 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1535, __PRETTY_FUNCTION__)); | |||
1536 | assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")((bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand" ) ? static_cast<void> (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1536, __PRETTY_FUNCTION__)); | |||
1537 | Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>()))); | |||
1538 | } | |||
1539 | ||||
1540 | void addImmOperands(MCInst &Inst, unsigned N) const { | |||
1541 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1541, __PRETTY_FUNCTION__)); | |||
1542 | // If this is a pageoff symrefexpr with an addend, adjust the addend | |||
1543 | // to be only the page-offset portion. Otherwise, just add the expr | |||
1544 | // as-is. | |||
1545 | addExpr(Inst, getImm()); | |||
1546 | } | |||
1547 | ||||
1548 | template <int Shift> | |||
1549 | void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { | |||
1550 | assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1550, __PRETTY_FUNCTION__)); | |||
1551 | if (auto ShiftedVal = getShiftedVal<Shift>()) { | |||
1552 | Inst.addOperand(MCOperand::createImm(ShiftedVal->first)); | |||
1553 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); | |||
1554 | } else if (isShiftedImm()) { | |||
1555 | addExpr(Inst, getShiftedImmVal()); | |||
1556 | Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); | |||
1557 | } else { | |||
1558 | addExpr(Inst, getImm()); | |||
1559 | Inst.addOperand(MCOperand::createImm(0)); | |||
1560 | } | |||
1561 | } | |||
1562 | ||||
1563 | template <int Shift> | |||
1564 | void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { | |||
1565 | assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1565, __PRETTY_FUNCTION__)); | |||
1566 | if (auto ShiftedVal = getShiftedVal<Shift>()) { | |||
1567 | Inst.addOperand(MCOperand::createImm(-ShiftedVal->first)); | |||
1568 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); | |||
1569 | } else | |||
1570 | llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1570); | |||
1571 | } | |||
1572 | ||||
1573 | void addCondCodeOperands(MCInst &Inst, unsigned N) const { | |||
1574 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1574, __PRETTY_FUNCTION__)); | |||
1575 | Inst.addOperand(MCOperand::createImm(getCondCode())); | |||
1576 | } | |||
1577 | ||||
1578 | void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { | |||
1579 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1579, __PRETTY_FUNCTION__)); | |||
1580 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1581 | if (!MCE) | |||
1582 | addExpr(Inst, getImm()); | |||
1583 | else | |||
1584 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12)); | |||
1585 | } | |||
1586 | ||||
1587 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { | |||
1588 | addImmOperands(Inst, N); | |||
1589 | } | |||
1590 | ||||
1591 | template<int Scale> | |||
1592 | void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { | |||
1593 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1593, __PRETTY_FUNCTION__)); | |||
1594 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1595 | ||||
1596 | if (!MCE) { | |||
1597 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
1598 | return; | |||
1599 | } | |||
1600 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); | |||
1601 | } | |||
1602 | ||||
1603 | void addUImm6Operands(MCInst &Inst, unsigned N) const { | |||
1604 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1604, __PRETTY_FUNCTION__)); | |||
1605 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1606 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); | |||
1607 | } | |||
1608 | ||||
1609 | template <int Scale> | |||
1610 | void addImmScaledOperands(MCInst &Inst, unsigned N) const { | |||
1611 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1611, __PRETTY_FUNCTION__)); | |||
1612 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1613 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); | |||
1614 | } | |||
1615 | ||||
1616 | template <typename T> | |||
1617 | void addLogicalImmOperands(MCInst &Inst, unsigned N) const { | |||
1618 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1618, __PRETTY_FUNCTION__)); | |||
1619 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1620 | typename std::make_unsigned<T>::type Val = MCE->getValue(); | |||
1621 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); | |||
1622 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
1623 | } | |||
1624 | ||||
1625 | template <typename T> | |||
1626 | void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { | |||
1627 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1627, __PRETTY_FUNCTION__)); | |||
1628 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1629 | typename std::make_unsigned<T>::type Val = ~MCE->getValue(); | |||
1630 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); | |||
1631 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
1632 | } | |||
1633 | ||||
1634 | void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { | |||
1635 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1635, __PRETTY_FUNCTION__)); | |||
1636 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1637 | uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); | |||
1638 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
1639 | } | |||
1640 | ||||
1641 | void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { | |||
1642 | // Branch operands don't encode the low bits, so shift them off | |||
1643 | // here. If it's a label, however, just put it on directly as there's | |||
1644 | // not enough information now to do anything. | |||
1645 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1645, __PRETTY_FUNCTION__)); | |||
1646 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1647 | if (!MCE) { | |||
1648 | addExpr(Inst, getImm()); | |||
1649 | return; | |||
1650 | } | |||
1651 | assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast <void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1651, __PRETTY_FUNCTION__)); | |||
1652 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
1653 | } | |||
1654 | ||||
1655 | void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { | |||
1656 | // Branch operands don't encode the low bits, so shift them off | |||
1657 | // here. If it's a label, however, just put it on directly as there's | |||
1658 | // not enough information now to do anything. | |||
1659 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1659, __PRETTY_FUNCTION__)); | |||
1660 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1661 | if (!MCE) { | |||
1662 | addExpr(Inst, getImm()); | |||
1663 | return; | |||
1664 | } | |||
1665 | assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast <void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1665, __PRETTY_FUNCTION__)); | |||
1666 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
1667 | } | |||
1668 | ||||
1669 | void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { | |||
1670 | // Branch operands don't encode the low bits, so shift them off | |||
1671 | // here. If it's a label, however, just put it on directly as there's | |||
1672 | // not enough information now to do anything. | |||
1673 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1673, __PRETTY_FUNCTION__)); | |||
1674 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
1675 | if (!MCE) { | |||
1676 | addExpr(Inst, getImm()); | |||
1677 | return; | |||
1678 | } | |||
1679 | assert(MCE && "Invalid constant immediate operand!")((MCE && "Invalid constant immediate operand!") ? static_cast <void> (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1679, __PRETTY_FUNCTION__)); | |||
1680 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
1681 | } | |||
1682 | ||||
1683 | void addFPImmOperands(MCInst &Inst, unsigned N) const { | |||
1684 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1684, __PRETTY_FUNCTION__)); | |||
1685 | Inst.addOperand(MCOperand::createImm( | |||
1686 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()))); | |||
1687 | } | |||
1688 | ||||
1689 | void addBarrierOperands(MCInst &Inst, unsigned N) const { | |||
1690 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1690, __PRETTY_FUNCTION__)); | |||
1691 | Inst.addOperand(MCOperand::createImm(getBarrier())); | |||
1692 | } | |||
1693 | ||||
1694 | void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { | |||
1695 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1695, __PRETTY_FUNCTION__)); | |||
1696 | ||||
1697 | Inst.addOperand(MCOperand::createImm(SysReg.MRSReg)); | |||
1698 | } | |||
1699 | ||||
1700 | void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { | |||
1701 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1701, __PRETTY_FUNCTION__)); | |||
1702 | ||||
1703 | Inst.addOperand(MCOperand::createImm(SysReg.MSRReg)); | |||
1704 | } | |||
1705 | ||||
1706 | void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { | |||
1707 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1707, __PRETTY_FUNCTION__)); | |||
1708 | ||||
1709 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); | |||
1710 | } | |||
1711 | ||||
1712 | void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { | |||
1713 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1713, __PRETTY_FUNCTION__)); | |||
1714 | ||||
1715 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); | |||
1716 | } | |||
1717 | ||||
1718 | void addSysCROperands(MCInst &Inst, unsigned N) const { | |||
1719 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1719, __PRETTY_FUNCTION__)); | |||
1720 | Inst.addOperand(MCOperand::createImm(getSysCR())); | |||
1721 | } | |||
1722 | ||||
1723 | void addPrefetchOperands(MCInst &Inst, unsigned N) const { | |||
1724 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1724, __PRETTY_FUNCTION__)); | |||
1725 | Inst.addOperand(MCOperand::createImm(getPrefetch())); | |||
1726 | } | |||
1727 | ||||
1728 | void addPSBHintOperands(MCInst &Inst, unsigned N) const { | |||
1729 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1729, __PRETTY_FUNCTION__)); | |||
1730 | Inst.addOperand(MCOperand::createImm(getPSBHint())); | |||
1731 | } | |||
1732 | ||||
1733 | void addBTIHintOperands(MCInst &Inst, unsigned N) const { | |||
1734 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1734, __PRETTY_FUNCTION__)); | |||
1735 | Inst.addOperand(MCOperand::createImm(getBTIHint())); | |||
1736 | } | |||
1737 | ||||
1738 | void addShifterOperands(MCInst &Inst, unsigned N) const { | |||
1739 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1739, __PRETTY_FUNCTION__)); | |||
1740 | unsigned Imm = | |||
1741 | AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount()); | |||
1742 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
1743 | } | |||
1744 | ||||
1745 | void addExtendOperands(MCInst &Inst, unsigned N) const { | |||
1746 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1746, __PRETTY_FUNCTION__)); | |||
1747 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1748 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; | |||
1749 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); | |||
1750 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
1751 | } | |||
1752 | ||||
1753 | void addExtend64Operands(MCInst &Inst, unsigned N) const { | |||
1754 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1754, __PRETTY_FUNCTION__)); | |||
1755 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1756 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; | |||
1757 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); | |||
1758 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
1759 | } | |||
1760 | ||||
1761 | void addMemExtendOperands(MCInst &Inst, unsigned N) const { | |||
1762 | assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1762, __PRETTY_FUNCTION__)); | |||
1763 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1764 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; | |||
1765 | Inst.addOperand(MCOperand::createImm(IsSigned)); | |||
1766 | Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0)); | |||
1767 | } | |||
1768 | ||||
1769 | // For 8-bit load/store instructions with a register offset, both the | |||
1770 | // "DoShift" and "NoShift" variants have a shift of 0. Because of this, | |||
1771 | // they're disambiguated by whether the shift was explicit or implicit rather | |||
1772 | // than its size. | |||
1773 | void addMemExtend8Operands(MCInst &Inst, unsigned N) const { | |||
1774 | assert(N == 2 && "Invalid number of operands!")((N == 2 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1774, __PRETTY_FUNCTION__)); | |||
1775 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
1776 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; | |||
1777 | Inst.addOperand(MCOperand::createImm(IsSigned)); | |||
1778 | Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount())); | |||
1779 | } | |||
1780 | ||||
1781 | template<int Shift> | |||
1782 | void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { | |||
1783 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1783, __PRETTY_FUNCTION__)); | |||
1784 | ||||
1785 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
1786 | uint64_t Value = CE->getValue(); | |||
1787 | Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); | |||
1788 | } | |||
1789 | ||||
1790 | template<int Shift> | |||
1791 | void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { | |||
1792 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1792, __PRETTY_FUNCTION__)); | |||
1793 | ||||
1794 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
1795 | uint64_t Value = CE->getValue(); | |||
1796 | Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); | |||
1797 | } | |||
1798 | ||||
1799 | void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { | |||
1800 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1800, __PRETTY_FUNCTION__)); | |||
1801 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1802 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90)); | |||
1803 | } | |||
1804 | ||||
1805 | void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { | |||
1806 | assert(N == 1 && "Invalid number of operands!")((N == 1 && "Invalid number of operands!") ? static_cast <void> (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1806, __PRETTY_FUNCTION__)); | |||
1807 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
1808 | Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180)); | |||
1809 | } | |||
1810 | ||||
1811 | void print(raw_ostream &OS) const override; | |||
1812 | ||||
1813 | static std::unique_ptr<AArch64Operand> | |||
1814 | CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) { | |||
1815 | auto Op = make_unique<AArch64Operand>(k_Token, Ctx); | |||
1816 | Op->Tok.Data = Str.data(); | |||
1817 | Op->Tok.Length = Str.size(); | |||
1818 | Op->Tok.IsSuffix = IsSuffix; | |||
1819 | Op->StartLoc = S; | |||
1820 | Op->EndLoc = S; | |||
1821 | return Op; | |||
1822 | } | |||
1823 | ||||
1824 | static std::unique_ptr<AArch64Operand> | |||
1825 | CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, | |||
1826 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, | |||
1827 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, | |||
1828 | unsigned ShiftAmount = 0, | |||
1829 | unsigned HasExplicitAmount = false) { | |||
1830 | auto Op = make_unique<AArch64Operand>(k_Register, Ctx); | |||
1831 | Op->Reg.RegNum = RegNum; | |||
1832 | Op->Reg.Kind = Kind; | |||
1833 | Op->Reg.ElementWidth = 0; | |||
1834 | Op->Reg.EqualityTy = EqTy; | |||
1835 | Op->Reg.ShiftExtend.Type = ExtTy; | |||
1836 | Op->Reg.ShiftExtend.Amount = ShiftAmount; | |||
1837 | Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; | |||
1838 | Op->StartLoc = S; | |||
1839 | Op->EndLoc = E; | |||
1840 | return Op; | |||
1841 | } | |||
1842 | ||||
1843 | static std::unique_ptr<AArch64Operand> | |||
1844 | CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, | |||
1845 | SMLoc S, SMLoc E, MCContext &Ctx, | |||
1846 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, | |||
1847 | unsigned ShiftAmount = 0, | |||
1848 | unsigned HasExplicitAmount = false) { | |||
1849 | assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && "Invalid vector kind" ) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1851, __PRETTY_FUNCTION__)) | |||
1850 | Kind == RegKind::SVEPredicateVector) &&(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && "Invalid vector kind" ) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1851, __PRETTY_FUNCTION__)) | |||
1851 | "Invalid vector kind")(((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && "Invalid vector kind" ) ? static_cast<void> (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector) && \"Invalid vector kind\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1851, __PRETTY_FUNCTION__)); | |||
1852 | auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, | |||
1853 | HasExplicitAmount); | |||
1854 | Op->Reg.ElementWidth = ElementWidth; | |||
1855 | return Op; | |||
1856 | } | |||
1857 | ||||
1858 | static std::unique_ptr<AArch64Operand> | |||
1859 | CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements, | |||
1860 | unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E, | |||
1861 | MCContext &Ctx) { | |||
1862 | auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx); | |||
1863 | Op->VectorList.RegNum = RegNum; | |||
1864 | Op->VectorList.Count = Count; | |||
1865 | Op->VectorList.NumElements = NumElements; | |||
1866 | Op->VectorList.ElementWidth = ElementWidth; | |||
1867 | Op->VectorList.RegisterKind = RegisterKind; | |||
1868 | Op->StartLoc = S; | |||
1869 | Op->EndLoc = E; | |||
1870 | return Op; | |||
1871 | } | |||
1872 | ||||
1873 | static std::unique_ptr<AArch64Operand> | |||
1874 | CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
1875 | auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx); | |||
1876 | Op->VectorIndex.Val = Idx; | |||
1877 | Op->StartLoc = S; | |||
1878 | Op->EndLoc = E; | |||
1879 | return Op; | |||
1880 | } | |||
1881 | ||||
1882 | static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, | |||
1883 | SMLoc E, MCContext &Ctx) { | |||
1884 | auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx); | |||
1885 | Op->Imm.Val = Val; | |||
1886 | Op->StartLoc = S; | |||
1887 | Op->EndLoc = E; | |||
1888 | return Op; | |||
1889 | } | |||
1890 | ||||
1891 | static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, | |||
1892 | unsigned ShiftAmount, | |||
1893 | SMLoc S, SMLoc E, | |||
1894 | MCContext &Ctx) { | |||
1895 | auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx); | |||
1896 | Op->ShiftedImm .Val = Val; | |||
1897 | Op->ShiftedImm.ShiftAmount = ShiftAmount; | |||
1898 | Op->StartLoc = S; | |||
1899 | Op->EndLoc = E; | |||
1900 | return Op; | |||
1901 | } | |||
1902 | ||||
1903 | static std::unique_ptr<AArch64Operand> | |||
1904 | CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
1905 | auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx); | |||
1906 | Op->CondCode.Code = Code; | |||
1907 | Op->StartLoc = S; | |||
1908 | Op->EndLoc = E; | |||
1909 | return Op; | |||
1910 | } | |||
1911 | ||||
1912 | static std::unique_ptr<AArch64Operand> | |||
1913 | CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { | |||
1914 | auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx); | |||
1915 | Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); | |||
1916 | Op->FPImm.IsExact = IsExact; | |||
1917 | Op->StartLoc = S; | |||
1918 | Op->EndLoc = S; | |||
1919 | return Op; | |||
1920 | } | |||
1921 | ||||
1922 | static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, | |||
1923 | StringRef Str, | |||
1924 | SMLoc S, | |||
1925 | MCContext &Ctx) { | |||
1926 | auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx); | |||
1927 | Op->Barrier.Val = Val; | |||
1928 | Op->Barrier.Data = Str.data(); | |||
1929 | Op->Barrier.Length = Str.size(); | |||
1930 | Op->StartLoc = S; | |||
1931 | Op->EndLoc = S; | |||
1932 | return Op; | |||
1933 | } | |||
1934 | ||||
1935 | static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, | |||
1936 | uint32_t MRSReg, | |||
1937 | uint32_t MSRReg, | |||
1938 | uint32_t PStateField, | |||
1939 | MCContext &Ctx) { | |||
1940 | auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx); | |||
1941 | Op->SysReg.Data = Str.data(); | |||
1942 | Op->SysReg.Length = Str.size(); | |||
1943 | Op->SysReg.MRSReg = MRSReg; | |||
1944 | Op->SysReg.MSRReg = MSRReg; | |||
1945 | Op->SysReg.PStateField = PStateField; | |||
1946 | Op->StartLoc = S; | |||
1947 | Op->EndLoc = S; | |||
1948 | return Op; | |||
1949 | } | |||
1950 | ||||
1951 | static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, | |||
1952 | SMLoc E, MCContext &Ctx) { | |||
1953 | auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx); | |||
1954 | Op->SysCRImm.Val = Val; | |||
1955 | Op->StartLoc = S; | |||
1956 | Op->EndLoc = E; | |||
1957 | return Op; | |||
1958 | } | |||
1959 | ||||
1960 | static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, | |||
1961 | StringRef Str, | |||
1962 | SMLoc S, | |||
1963 | MCContext &Ctx) { | |||
1964 | auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx); | |||
1965 | Op->Prefetch.Val = Val; | |||
1966 | Op->Barrier.Data = Str.data(); | |||
1967 | Op->Barrier.Length = Str.size(); | |||
1968 | Op->StartLoc = S; | |||
1969 | Op->EndLoc = S; | |||
1970 | return Op; | |||
1971 | } | |||
1972 | ||||
1973 | static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, | |||
1974 | StringRef Str, | |||
1975 | SMLoc S, | |||
1976 | MCContext &Ctx) { | |||
1977 | auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx); | |||
1978 | Op->PSBHint.Val = Val; | |||
1979 | Op->PSBHint.Data = Str.data(); | |||
1980 | Op->PSBHint.Length = Str.size(); | |||
1981 | Op->StartLoc = S; | |||
1982 | Op->EndLoc = S; | |||
1983 | return Op; | |||
1984 | } | |||
1985 | ||||
1986 | static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val, | |||
1987 | StringRef Str, | |||
1988 | SMLoc S, | |||
1989 | MCContext &Ctx) { | |||
1990 | auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx); | |||
1991 | Op->BTIHint.Val = Val << 1 | 32; | |||
1992 | Op->BTIHint.Data = Str.data(); | |||
1993 | Op->BTIHint.Length = Str.size(); | |||
1994 | Op->StartLoc = S; | |||
1995 | Op->EndLoc = S; | |||
1996 | return Op; | |||
1997 | } | |||
1998 | ||||
1999 | static std::unique_ptr<AArch64Operand> | |||
2000 | CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, | |||
2001 | bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
2002 | auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx); | |||
2003 | Op->ShiftExtend.Type = ShOp; | |||
2004 | Op->ShiftExtend.Amount = Val; | |||
2005 | Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; | |||
2006 | Op->StartLoc = S; | |||
2007 | Op->EndLoc = E; | |||
2008 | return Op; | |||
2009 | } | |||
2010 | }; | |||
2011 | ||||
2012 | } // end anonymous namespace. | |||
2013 | ||||
2014 | void AArch64Operand::print(raw_ostream &OS) const { | |||
2015 | switch (Kind) { | |||
2016 | case k_FPImm: | |||
2017 | OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); | |||
2018 | if (!getFPImmIsExact()) | |||
2019 | OS << " (inexact)"; | |||
2020 | OS << ">"; | |||
2021 | break; | |||
2022 | case k_Barrier: { | |||
2023 | StringRef Name = getBarrierName(); | |||
2024 | if (!Name.empty()) | |||
2025 | OS << "<barrier " << Name << ">"; | |||
2026 | else | |||
2027 | OS << "<barrier invalid #" << getBarrier() << ">"; | |||
2028 | break; | |||
2029 | } | |||
2030 | case k_Immediate: | |||
2031 | OS << *getImm(); | |||
2032 | break; | |||
2033 | case k_ShiftedImm: { | |||
2034 | unsigned Shift = getShiftedImmShift(); | |||
2035 | OS << "<shiftedimm "; | |||
2036 | OS << *getShiftedImmVal(); | |||
2037 | OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">"; | |||
2038 | break; | |||
2039 | } | |||
2040 | case k_CondCode: | |||
2041 | OS << "<condcode " << getCondCode() << ">"; | |||
2042 | break; | |||
2043 | case k_VectorList: { | |||
2044 | OS << "<vectorlist "; | |||
2045 | unsigned Reg = getVectorListStart(); | |||
2046 | for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) | |||
2047 | OS << Reg + i << " "; | |||
2048 | OS << ">"; | |||
2049 | break; | |||
2050 | } | |||
2051 | case k_VectorIndex: | |||
2052 | OS << "<vectorindex " << getVectorIndex() << ">"; | |||
2053 | break; | |||
2054 | case k_SysReg: | |||
2055 | OS << "<sysreg: " << getSysReg() << '>'; | |||
2056 | break; | |||
2057 | case k_Token: | |||
2058 | OS << "'" << getToken() << "'"; | |||
2059 | break; | |||
2060 | case k_SysCR: | |||
2061 | OS << "c" << getSysCR(); | |||
2062 | break; | |||
2063 | case k_Prefetch: { | |||
2064 | StringRef Name = getPrefetchName(); | |||
2065 | if (!Name.empty()) | |||
2066 | OS << "<prfop " << Name << ">"; | |||
2067 | else | |||
2068 | OS << "<prfop invalid #" << getPrefetch() << ">"; | |||
2069 | break; | |||
2070 | } | |||
2071 | case k_PSBHint: | |||
2072 | OS << getPSBHintName(); | |||
2073 | break; | |||
2074 | case k_Register: | |||
2075 | OS << "<register " << getReg() << ">"; | |||
2076 | if (!getShiftExtendAmount() && !hasShiftExtendAmount()) | |||
2077 | break; | |||
2078 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
2079 | case k_BTIHint: | |||
2080 | OS << getBTIHintName(); | |||
2081 | break; | |||
2082 | case k_ShiftExtend: | |||
2083 | OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" | |||
2084 | << getShiftExtendAmount(); | |||
2085 | if (!hasShiftExtendAmount()) | |||
2086 | OS << "<imp>"; | |||
2087 | OS << '>'; | |||
2088 | break; | |||
2089 | } | |||
2090 | } | |||
2091 | ||||
2092 | /// @name Auto-generated Match Functions | |||
2093 | /// { | |||
2094 | ||||
2095 | static unsigned MatchRegisterName(StringRef Name); | |||
2096 | ||||
2097 | /// } | |||
2098 | ||||
2099 | static unsigned MatchNeonVectorRegName(StringRef Name) { | |||
2100 | return StringSwitch<unsigned>(Name.lower()) | |||
2101 | .Case("v0", AArch64::Q0) | |||
2102 | .Case("v1", AArch64::Q1) | |||
2103 | .Case("v2", AArch64::Q2) | |||
2104 | .Case("v3", AArch64::Q3) | |||
2105 | .Case("v4", AArch64::Q4) | |||
2106 | .Case("v5", AArch64::Q5) | |||
2107 | .Case("v6", AArch64::Q6) | |||
2108 | .Case("v7", AArch64::Q7) | |||
2109 | .Case("v8", AArch64::Q8) | |||
2110 | .Case("v9", AArch64::Q9) | |||
2111 | .Case("v10", AArch64::Q10) | |||
2112 | .Case("v11", AArch64::Q11) | |||
2113 | .Case("v12", AArch64::Q12) | |||
2114 | .Case("v13", AArch64::Q13) | |||
2115 | .Case("v14", AArch64::Q14) | |||
2116 | .Case("v15", AArch64::Q15) | |||
2117 | .Case("v16", AArch64::Q16) | |||
2118 | .Case("v17", AArch64::Q17) | |||
2119 | .Case("v18", AArch64::Q18) | |||
2120 | .Case("v19", AArch64::Q19) | |||
2121 | .Case("v20", AArch64::Q20) | |||
2122 | .Case("v21", AArch64::Q21) | |||
2123 | .Case("v22", AArch64::Q22) | |||
2124 | .Case("v23", AArch64::Q23) | |||
2125 | .Case("v24", AArch64::Q24) | |||
2126 | .Case("v25", AArch64::Q25) | |||
2127 | .Case("v26", AArch64::Q26) | |||
2128 | .Case("v27", AArch64::Q27) | |||
2129 | .Case("v28", AArch64::Q28) | |||
2130 | .Case("v29", AArch64::Q29) | |||
2131 | .Case("v30", AArch64::Q30) | |||
2132 | .Case("v31", AArch64::Q31) | |||
2133 | .Default(0); | |||
2134 | } | |||
2135 | ||||
2136 | /// Returns an optional pair of (#elements, element-width) if Suffix | |||
2137 | /// is a valid vector kind. Where the number of elements in a vector | |||
2138 | /// or the vector width is implicit or explicitly unknown (but still a | |||
2139 | /// valid suffix kind), 0 is used. | |||
2140 | static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, | |||
2141 | RegKind VectorKind) { | |||
2142 | std::pair<int, int> Res = {-1, -1}; | |||
2143 | ||||
2144 | switch (VectorKind) { | |||
2145 | case RegKind::NeonVector: | |||
2146 | Res = | |||
2147 | StringSwitch<std::pair<int, int>>(Suffix.lower()) | |||
2148 | .Case("", {0, 0}) | |||
2149 | .Case(".1d", {1, 64}) | |||
2150 | .Case(".1q", {1, 128}) | |||
2151 | // '.2h' needed for fp16 scalar pairwise reductions | |||
2152 | .Case(".2h", {2, 16}) | |||
2153 | .Case(".2s", {2, 32}) | |||
2154 | .Case(".2d", {2, 64}) | |||
2155 | // '.4b' is another special case for the ARMv8.2a dot product | |||
2156 | // operand | |||
2157 | .Case(".4b", {4, 8}) | |||
2158 | .Case(".4h", {4, 16}) | |||
2159 | .Case(".4s", {4, 32}) | |||
2160 | .Case(".8b", {8, 8}) | |||
2161 | .Case(".8h", {8, 16}) | |||
2162 | .Case(".16b", {16, 8}) | |||
2163 | // Accept the width neutral ones, too, for verbose syntax. If those | |||
2164 | // aren't used in the right places, the token operand won't match so | |||
2165 | // all will work out. | |||
2166 | .Case(".b", {0, 8}) | |||
2167 | .Case(".h", {0, 16}) | |||
2168 | .Case(".s", {0, 32}) | |||
2169 | .Case(".d", {0, 64}) | |||
2170 | .Default({-1, -1}); | |||
2171 | break; | |||
2172 | case RegKind::SVEPredicateVector: | |||
2173 | case RegKind::SVEDataVector: | |||
2174 | Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) | |||
2175 | .Case("", {0, 0}) | |||
2176 | .Case(".b", {0, 8}) | |||
2177 | .Case(".h", {0, 16}) | |||
2178 | .Case(".s", {0, 32}) | |||
2179 | .Case(".d", {0, 64}) | |||
2180 | .Case(".q", {0, 128}) | |||
2181 | .Default({-1, -1}); | |||
2182 | break; | |||
2183 | default: | |||
2184 | llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2184); | |||
2185 | } | |||
2186 | ||||
2187 | if (Res == std::make_pair(-1, -1)) | |||
2188 | return Optional<std::pair<int, int>>(); | |||
2189 | ||||
2190 | return Optional<std::pair<int, int>>(Res); | |||
2191 | } | |||
2192 | ||||
2193 | static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { | |||
2194 | return parseVectorKind(Suffix, VectorKind).hasValue(); | |||
2195 | } | |||
2196 | ||||
2197 | static unsigned matchSVEDataVectorRegName(StringRef Name) { | |||
2198 | return StringSwitch<unsigned>(Name.lower()) | |||
2199 | .Case("z0", AArch64::Z0) | |||
2200 | .Case("z1", AArch64::Z1) | |||
2201 | .Case("z2", AArch64::Z2) | |||
2202 | .Case("z3", AArch64::Z3) | |||
2203 | .Case("z4", AArch64::Z4) | |||
2204 | .Case("z5", AArch64::Z5) | |||
2205 | .Case("z6", AArch64::Z6) | |||
2206 | .Case("z7", AArch64::Z7) | |||
2207 | .Case("z8", AArch64::Z8) | |||
2208 | .Case("z9", AArch64::Z9) | |||
2209 | .Case("z10", AArch64::Z10) | |||
2210 | .Case("z11", AArch64::Z11) | |||
2211 | .Case("z12", AArch64::Z12) | |||
2212 | .Case("z13", AArch64::Z13) | |||
2213 | .Case("z14", AArch64::Z14) | |||
2214 | .Case("z15", AArch64::Z15) | |||
2215 | .Case("z16", AArch64::Z16) | |||
2216 | .Case("z17", AArch64::Z17) | |||
2217 | .Case("z18", AArch64::Z18) | |||
2218 | .Case("z19", AArch64::Z19) | |||
2219 | .Case("z20", AArch64::Z20) | |||
2220 | .Case("z21", AArch64::Z21) | |||
2221 | .Case("z22", AArch64::Z22) | |||
2222 | .Case("z23", AArch64::Z23) | |||
2223 | .Case("z24", AArch64::Z24) | |||
2224 | .Case("z25", AArch64::Z25) | |||
2225 | .Case("z26", AArch64::Z26) | |||
2226 | .Case("z27", AArch64::Z27) | |||
2227 | .Case("z28", AArch64::Z28) | |||
2228 | .Case("z29", AArch64::Z29) | |||
2229 | .Case("z30", AArch64::Z30) | |||
2230 | .Case("z31", AArch64::Z31) | |||
2231 | .Default(0); | |||
2232 | } | |||
2233 | ||||
2234 | static unsigned matchSVEPredicateVectorRegName(StringRef Name) { | |||
2235 | return StringSwitch<unsigned>(Name.lower()) | |||
2236 | .Case("p0", AArch64::P0) | |||
2237 | .Case("p1", AArch64::P1) | |||
2238 | .Case("p2", AArch64::P2) | |||
2239 | .Case("p3", AArch64::P3) | |||
2240 | .Case("p4", AArch64::P4) | |||
2241 | .Case("p5", AArch64::P5) | |||
2242 | .Case("p6", AArch64::P6) | |||
2243 | .Case("p7", AArch64::P7) | |||
2244 | .Case("p8", AArch64::P8) | |||
2245 | .Case("p9", AArch64::P9) | |||
2246 | .Case("p10", AArch64::P10) | |||
2247 | .Case("p11", AArch64::P11) | |||
2248 | .Case("p12", AArch64::P12) | |||
2249 | .Case("p13", AArch64::P13) | |||
2250 | .Case("p14", AArch64::P14) | |||
2251 | .Case("p15", AArch64::P15) | |||
2252 | .Default(0); | |||
2253 | } | |||
2254 | ||||
2255 | bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, | |||
2256 | SMLoc &EndLoc) { | |||
2257 | StartLoc = getLoc(); | |||
2258 | auto Res = tryParseScalarRegister(RegNo); | |||
2259 | EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
2260 | return Res != MatchOperand_Success; | |||
2261 | } | |||
2262 | ||||
2263 | // Matches a register name or register alias previously defined by '.req' | |||
2264 | unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, | |||
2265 | RegKind Kind) { | |||
2266 | unsigned RegNum = 0; | |||
2267 | if ((RegNum = matchSVEDataVectorRegName(Name))) | |||
2268 | return Kind == RegKind::SVEDataVector ? RegNum : 0; | |||
2269 | ||||
2270 | if ((RegNum = matchSVEPredicateVectorRegName(Name))) | |||
2271 | return Kind == RegKind::SVEPredicateVector ? RegNum : 0; | |||
2272 | ||||
2273 | if ((RegNum = MatchNeonVectorRegName(Name))) | |||
2274 | return Kind == RegKind::NeonVector ? RegNum : 0; | |||
2275 | ||||
2276 | // The parsed register must be of RegKind Scalar | |||
2277 | if ((RegNum = MatchRegisterName(Name))) | |||
2278 | return Kind == RegKind::Scalar ? RegNum : 0; | |||
2279 | ||||
2280 | if (!RegNum) { | |||
2281 | // Handle a few common aliases of registers. | |||
2282 | if (auto RegNum = StringSwitch<unsigned>(Name.lower()) | |||
2283 | .Case("fp", AArch64::FP) | |||
2284 | .Case("lr", AArch64::LR) | |||
2285 | .Case("x31", AArch64::XZR) | |||
2286 | .Case("w31", AArch64::WZR) | |||
2287 | .Default(0)) | |||
2288 | return Kind == RegKind::Scalar ? RegNum : 0; | |||
2289 | ||||
2290 | // Check for aliases registered via .req. Canonicalize to lower case. | |||
2291 | // That's more consistent since register names are case insensitive, and | |||
2292 | // it's how the original entry was passed in from MC/MCParser/AsmParser. | |||
2293 | auto Entry = RegisterReqs.find(Name.lower()); | |||
2294 | if (Entry == RegisterReqs.end()) | |||
2295 | return 0; | |||
2296 | ||||
2297 | // set RegNum if the match is the right kind of register | |||
2298 | if (Kind == Entry->getValue().first) | |||
2299 | RegNum = Entry->getValue().second; | |||
2300 | } | |||
2301 | return RegNum; | |||
2302 | } | |||
2303 | ||||
2304 | /// tryParseScalarRegister - Try to parse a register name. The token must be an | |||
2305 | /// Identifier when called, and if it is a register name the token is eaten and | |||
2306 | /// the register is added to the operand list. | |||
2307 | OperandMatchResultTy | |||
2308 | AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) { | |||
2309 | MCAsmParser &Parser = getParser(); | |||
2310 | const AsmToken &Tok = Parser.getTok(); | |||
2311 | if (Tok.isNot(AsmToken::Identifier)) | |||
2312 | return MatchOperand_NoMatch; | |||
2313 | ||||
2314 | std::string lowerCase = Tok.getString().lower(); | |||
2315 | unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar); | |||
2316 | if (Reg == 0) | |||
2317 | return MatchOperand_NoMatch; | |||
2318 | ||||
2319 | RegNum = Reg; | |||
2320 | Parser.Lex(); // Eat identifier token. | |||
2321 | return MatchOperand_Success; | |||
2322 | } | |||
2323 | ||||
2324 | /// tryParseSysCROperand - Try to parse a system instruction CR operand name. | |||
2325 | OperandMatchResultTy | |||
2326 | AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { | |||
2327 | MCAsmParser &Parser = getParser(); | |||
2328 | SMLoc S = getLoc(); | |||
2329 | ||||
2330 | if (Parser.getTok().isNot(AsmToken::Identifier)) { | |||
2331 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
2332 | return MatchOperand_ParseFail; | |||
2333 | } | |||
2334 | ||||
2335 | StringRef Tok = Parser.getTok().getIdentifier(); | |||
2336 | if (Tok[0] != 'c' && Tok[0] != 'C') { | |||
2337 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
2338 | return MatchOperand_ParseFail; | |||
2339 | } | |||
2340 | ||||
2341 | uint32_t CRNum; | |||
2342 | bool BadNum = Tok.drop_front().getAsInteger(10, CRNum); | |||
2343 | if (BadNum || CRNum > 15) { | |||
2344 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
2345 | return MatchOperand_ParseFail; | |||
2346 | } | |||
2347 | ||||
2348 | Parser.Lex(); // Eat identifier token. | |||
2349 | Operands.push_back( | |||
2350 | AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext())); | |||
2351 | return MatchOperand_Success; | |||
2352 | } | |||
2353 | ||||
2354 | /// tryParsePrefetch - Try to parse a prefetch operand. | |||
2355 | template <bool IsSVEPrefetch> | |||
2356 | OperandMatchResultTy | |||
2357 | AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { | |||
2358 | MCAsmParser &Parser = getParser(); | |||
2359 | SMLoc S = getLoc(); | |||
2360 | const AsmToken &Tok = Parser.getTok(); | |||
2361 | ||||
2362 | auto LookupByName = [](StringRef N) { | |||
2363 | if (IsSVEPrefetch) { | |||
2364 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) | |||
2365 | return Optional<unsigned>(Res->Encoding); | |||
2366 | } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) | |||
2367 | return Optional<unsigned>(Res->Encoding); | |||
2368 | return Optional<unsigned>(); | |||
2369 | }; | |||
2370 | ||||
2371 | auto LookupByEncoding = [](unsigned E) { | |||
2372 | if (IsSVEPrefetch) { | |||
2373 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) | |||
2374 | return Optional<StringRef>(Res->Name); | |||
2375 | } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) | |||
2376 | return Optional<StringRef>(Res->Name); | |||
2377 | return Optional<StringRef>(); | |||
2378 | }; | |||
2379 | unsigned MaxVal = IsSVEPrefetch ? 15 : 31; | |||
2380 | ||||
2381 | // Either an identifier for named values or a 5-bit immediate. | |||
2382 | // Eat optional hash. | |||
2383 | if (parseOptionalToken(AsmToken::Hash) || | |||
2384 | Tok.is(AsmToken::Integer)) { | |||
2385 | const MCExpr *ImmVal; | |||
2386 | if (getParser().parseExpression(ImmVal)) | |||
2387 | return MatchOperand_ParseFail; | |||
2388 | ||||
2389 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
2390 | if (!MCE) { | |||
2391 | TokError("immediate value expected for prefetch operand"); | |||
2392 | return MatchOperand_ParseFail; | |||
2393 | } | |||
2394 | unsigned prfop = MCE->getValue(); | |||
2395 | if (prfop > MaxVal) { | |||
2396 | TokError("prefetch operand out of range, [0," + utostr(MaxVal) + | |||
2397 | "] expected"); | |||
2398 | return MatchOperand_ParseFail; | |||
2399 | } | |||
2400 | ||||
2401 | auto PRFM = LookupByEncoding(MCE->getValue()); | |||
2402 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
2403 | prfop, PRFM.getValueOr(""), S, getContext())); | |||
2404 | return MatchOperand_Success; | |||
2405 | } | |||
2406 | ||||
2407 | if (Tok.isNot(AsmToken::Identifier)) { | |||
2408 | TokError("prefetch hint expected"); | |||
2409 | return MatchOperand_ParseFail; | |||
2410 | } | |||
2411 | ||||
2412 | auto PRFM = LookupByName(Tok.getString()); | |||
2413 | if (!PRFM) { | |||
2414 | TokError("prefetch hint expected"); | |||
2415 | return MatchOperand_ParseFail; | |||
2416 | } | |||
2417 | ||||
2418 | Parser.Lex(); // Eat identifier token. | |||
2419 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
2420 | *PRFM, Tok.getString(), S, getContext())); | |||
2421 | return MatchOperand_Success; | |||
2422 | } | |||
2423 | ||||
2424 | /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command | |||
2425 | OperandMatchResultTy | |||
2426 | AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { | |||
2427 | MCAsmParser &Parser = getParser(); | |||
2428 | SMLoc S = getLoc(); | |||
2429 | const AsmToken &Tok = Parser.getTok(); | |||
2430 | if (Tok.isNot(AsmToken::Identifier)) { | |||
2431 | TokError("invalid operand for instruction"); | |||
2432 | return MatchOperand_ParseFail; | |||
2433 | } | |||
2434 | ||||
2435 | auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString()); | |||
2436 | if (!PSB) { | |||
2437 | TokError("invalid operand for instruction"); | |||
2438 | return MatchOperand_ParseFail; | |||
2439 | } | |||
2440 | ||||
2441 | Parser.Lex(); // Eat identifier token. | |||
2442 | Operands.push_back(AArch64Operand::CreatePSBHint( | |||
2443 | PSB->Encoding, Tok.getString(), S, getContext())); | |||
2444 | return MatchOperand_Success; | |||
2445 | } | |||
2446 | ||||
2447 | /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command | |||
2448 | OperandMatchResultTy | |||
2449 | AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) { | |||
2450 | MCAsmParser &Parser = getParser(); | |||
2451 | SMLoc S = getLoc(); | |||
2452 | const AsmToken &Tok = Parser.getTok(); | |||
2453 | if (Tok.isNot(AsmToken::Identifier)) { | |||
2454 | TokError("invalid operand for instruction"); | |||
2455 | return MatchOperand_ParseFail; | |||
2456 | } | |||
2457 | ||||
2458 | auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString()); | |||
2459 | if (!BTI) { | |||
2460 | TokError("invalid operand for instruction"); | |||
2461 | return MatchOperand_ParseFail; | |||
2462 | } | |||
2463 | ||||
2464 | Parser.Lex(); // Eat identifier token. | |||
2465 | Operands.push_back(AArch64Operand::CreateBTIHint( | |||
2466 | BTI->Encoding, Tok.getString(), S, getContext())); | |||
2467 | return MatchOperand_Success; | |||
2468 | } | |||
2469 | ||||
2470 | /// tryParseAdrpLabel - Parse and validate a source label for the ADRP | |||
2471 | /// instruction. | |||
2472 | OperandMatchResultTy | |||
2473 | AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { | |||
2474 | MCAsmParser &Parser = getParser(); | |||
2475 | SMLoc S = getLoc(); | |||
2476 | const MCExpr *Expr; | |||
| ||||
2477 | ||||
2478 | if (Parser.getTok().is(AsmToken::Hash)) { | |||
2479 | Parser.Lex(); // Eat hash token. | |||
2480 | } | |||
2481 | ||||
2482 | if (parseSymbolicImmVal(Expr)) | |||
2483 | return MatchOperand_ParseFail; | |||
2484 | ||||
2485 | AArch64MCExpr::VariantKind ELFRefKind; | |||
2486 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
2487 | int64_t Addend; | |||
2488 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
| ||||
2489 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && | |||
2490 | ELFRefKind == AArch64MCExpr::VK_INVALID) { | |||
2491 | // No modifier was specified at all; this is the syntax for an ELF basic | |||
2492 | // ADRP relocation (unfortunately). | |||
2493 | Expr = | |||
2494 | AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext()); | |||
2495 | } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || | |||
2496 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && | |||
2497 | Addend != 0) { | |||
2498 | Error(S, "gotpage label reference not allowed an addend"); | |||
2499 | return MatchOperand_ParseFail; | |||
2500 | } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && | |||
2501 | DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && | |||
2502 | DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && | |||
2503 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && | |||
2504 | ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && | |||
2505 | ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { | |||
2506 | // The operand must be an @page or @gotpage qualified symbolref. | |||
2507 | Error(S, "page or gotpage label reference expected"); | |||
2508 | return MatchOperand_ParseFail; | |||
2509 | } | |||
2510 | } | |||
2511 | ||||
2512 | // We have either a label reference possibly with addend or an immediate. The | |||
2513 | // addend is a raw value here. The linker will adjust it to only reference the | |||
2514 | // page. | |||
2515 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
2516 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
2517 | ||||
2518 | return MatchOperand_Success; | |||
2519 | } | |||
2520 | ||||
2521 | /// tryParseAdrLabel - Parse and validate a source label for the ADR | |||
2522 | /// instruction. | |||
2523 | OperandMatchResultTy | |||
2524 | AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { | |||
2525 | SMLoc S = getLoc(); | |||
2526 | const MCExpr *Expr; | |||
2527 | ||||
2528 | // Leave anything with a bracket to the default for SVE | |||
2529 | if (getParser().getTok().is(AsmToken::LBrac)) | |||
2530 | return MatchOperand_NoMatch; | |||
2531 | ||||
2532 | if (getParser().getTok().is(AsmToken::Hash)) | |||
2533 | getParser().Lex(); // Eat hash token. | |||
2534 | ||||
2535 | if (parseSymbolicImmVal(Expr)) | |||
2536 | return MatchOperand_ParseFail; | |||
2537 | ||||
2538 | AArch64MCExpr::VariantKind ELFRefKind; | |||
2539 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
2540 | int64_t Addend; | |||
2541 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
2542 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && | |||
2543 | ELFRefKind == AArch64MCExpr::VK_INVALID) { | |||
2544 | // No modifier was specified at all; this is the syntax for an ELF basic | |||
2545 | // ADR relocation (unfortunately). | |||
2546 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext()); | |||
2547 | } else { | |||
2548 | Error(S, "unexpected adr label"); | |||
2549 | return MatchOperand_ParseFail; | |||
2550 | } | |||
2551 | } | |||
2552 | ||||
2553 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
2554 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
2555 | return MatchOperand_Success; | |||
2556 | } | |||
2557 | ||||
2558 | /// tryParseFPImm - A floating point immediate expression operand. | |||
2559 | template<bool AddFPZeroAsLiteral> | |||
2560 | OperandMatchResultTy | |||
2561 | AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { | |||
2562 | MCAsmParser &Parser = getParser(); | |||
2563 | SMLoc S = getLoc(); | |||
2564 | ||||
2565 | bool Hash = parseOptionalToken(AsmToken::Hash); | |||
2566 | ||||
2567 | // Handle negation, as that still comes through as a separate token. | |||
2568 | bool isNegative = parseOptionalToken(AsmToken::Minus); | |||
2569 | ||||
2570 | const AsmToken &Tok = Parser.getTok(); | |||
2571 | if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) { | |||
2572 | if (!Hash) | |||
2573 | return MatchOperand_NoMatch; | |||
2574 | TokError("invalid floating point immediate"); | |||
2575 | return MatchOperand_ParseFail; | |||
2576 | } | |||
2577 | ||||
2578 | // Parse hexadecimal representation. | |||
2579 | if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) { | |||
2580 | if (Tok.getIntVal() > 255 || isNegative) { | |||
2581 | TokError("encoded floating point value out of range"); | |||
2582 | return MatchOperand_ParseFail; | |||
2583 | } | |||
2584 | ||||
2585 | APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal())); | |||
2586 | Operands.push_back( | |||
2587 | AArch64Operand::CreateFPImm(F, true, S, getContext())); | |||
2588 | } else { | |||
2589 | // Parse FP representation. | |||
2590 | APFloat RealVal(APFloat::IEEEdouble()); | |||
2591 | auto Status = | |||
2592 | RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); | |||
2593 | if (isNegative) | |||
2594 | RealVal.changeSign(); | |||
2595 | ||||
2596 | if (AddFPZeroAsLiteral && RealVal.isPosZero()) { | |||
2597 | Operands.push_back( | |||
2598 | AArch64Operand::CreateToken("#0", false, S, getContext())); | |||
2599 | Operands.push_back( | |||
2600 | AArch64Operand::CreateToken(".0", false, S, getContext())); | |||
2601 | } else | |||
2602 | Operands.push_back(AArch64Operand::CreateFPImm( | |||
2603 | RealVal, Status == APFloat::opOK, S, getContext())); | |||
2604 | } | |||
2605 | ||||
2606 | Parser.Lex(); // Eat the token. | |||
2607 | ||||
2608 | return MatchOperand_Success; | |||
2609 | } | |||
2610 | ||||
2611 | /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with | |||
2612 | /// a shift suffix, for example '#1, lsl #12'. | |||
2613 | OperandMatchResultTy | |||
2614 | AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { | |||
2615 | MCAsmParser &Parser = getParser(); | |||
2616 | SMLoc S = getLoc(); | |||
2617 | ||||
2618 | if (Parser.getTok().is(AsmToken::Hash)) | |||
2619 | Parser.Lex(); // Eat '#' | |||
2620 | else if (Parser.getTok().isNot(AsmToken::Integer)) | |||
2621 | // Operand should start from # or should be integer, emit error otherwise. | |||
2622 | return MatchOperand_NoMatch; | |||
2623 | ||||
2624 | const MCExpr *Imm; | |||
2625 | if (parseSymbolicImmVal(Imm)) | |||
2626 | return MatchOperand_ParseFail; | |||
2627 | else if (Parser.getTok().isNot(AsmToken::Comma)) { | |||
2628 | SMLoc E = Parser.getTok().getLoc(); | |||
2629 | Operands.push_back( | |||
2630 | AArch64Operand::CreateImm(Imm, S, E, getContext())); | |||
2631 | return MatchOperand_Success; | |||
2632 | } | |||
2633 | ||||
2634 | // Eat ',' | |||
2635 | Parser.Lex(); | |||
2636 | ||||
2637 | // The optional operand must be "lsl #N" where N is non-negative. | |||
2638 | if (!Parser.getTok().is(AsmToken::Identifier) || | |||
2639 | !Parser.getTok().getIdentifier().equals_lower("lsl")) { | |||
2640 | Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate"); | |||
2641 | return MatchOperand_ParseFail; | |||
2642 | } | |||
2643 | ||||
2644 | // Eat 'lsl' | |||
2645 | Parser.Lex(); | |||
2646 | ||||
2647 | parseOptionalToken(AsmToken::Hash); | |||
2648 | ||||
2649 | if (Parser.getTok().isNot(AsmToken::Integer)) { | |||
2650 | Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate"); | |||
2651 | return MatchOperand_ParseFail; | |||
2652 | } | |||
2653 | ||||
2654 | int64_t ShiftAmount = Parser.getTok().getIntVal(); | |||
2655 | ||||
2656 | if (ShiftAmount < 0) { | |||
2657 | Error(Parser.getTok().getLoc(), "positive shift amount required"); | |||
2658 | return MatchOperand_ParseFail; | |||
2659 | } | |||
2660 | Parser.Lex(); // Eat the number | |||
2661 | ||||
2662 | // Just in case the optional lsl #0 is used for immediates other than zero. | |||
2663 | if (ShiftAmount == 0 && Imm != 0) { | |||
2664 | SMLoc E = Parser.getTok().getLoc(); | |||
2665 | Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext())); | |||
2666 | return MatchOperand_Success; | |||
2667 | } | |||
2668 | ||||
2669 | SMLoc E = Parser.getTok().getLoc(); | |||
2670 | Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, | |||
2671 | S, E, getContext())); | |||
2672 | return MatchOperand_Success; | |||
2673 | } | |||
2674 | ||||
2675 | /// parseCondCodeString - Parse a Condition Code string. | |||
2676 | AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) { | |||
2677 | AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) | |||
2678 | .Case("eq", AArch64CC::EQ) | |||
2679 | .Case("ne", AArch64CC::NE) | |||
2680 | .Case("cs", AArch64CC::HS) | |||
2681 | .Case("hs", AArch64CC::HS) | |||
2682 | .Case("cc", AArch64CC::LO) | |||
2683 | .Case("lo", AArch64CC::LO) | |||
2684 | .Case("mi", AArch64CC::MI) | |||
2685 | .Case("pl", AArch64CC::PL) | |||
2686 | .Case("vs", AArch64CC::VS) | |||
2687 | .Case("vc", AArch64CC::VC) | |||
2688 | .Case("hi", AArch64CC::HI) | |||
2689 | .Case("ls", AArch64CC::LS) | |||
2690 | .Case("ge", AArch64CC::GE) | |||
2691 | .Case("lt", AArch64CC::LT) | |||
2692 | .Case("gt", AArch64CC::GT) | |||
2693 | .Case("le", AArch64CC::LE) | |||
2694 | .Case("al", AArch64CC::AL) | |||
2695 | .Case("nv", AArch64CC::NV) | |||
2696 | .Default(AArch64CC::Invalid); | |||
2697 | ||||
2698 | if (CC == AArch64CC::Invalid && | |||
2699 | getSTI().getFeatureBits()[AArch64::FeatureSVE]) | |||
2700 | CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) | |||
2701 | .Case("none", AArch64CC::EQ) | |||
2702 | .Case("any", AArch64CC::NE) | |||
2703 | .Case("nlast", AArch64CC::HS) | |||
2704 | .Case("last", AArch64CC::LO) | |||
2705 | .Case("first", AArch64CC::MI) | |||
2706 | .Case("nfrst", AArch64CC::PL) | |||
2707 | .Case("pmore", AArch64CC::HI) | |||
2708 | .Case("plast", AArch64CC::LS) | |||
2709 | .Case("tcont", AArch64CC::GE) | |||
2710 | .Case("tstop", AArch64CC::LT) | |||
2711 | .Default(AArch64CC::Invalid); | |||
2712 | ||||
2713 | return CC; | |||
2714 | } | |||
2715 | ||||
2716 | /// parseCondCode - Parse a Condition Code operand. | |||
2717 | bool AArch64AsmParser::parseCondCode(OperandVector &Operands, | |||
2718 | bool invertCondCode) { | |||
2719 | MCAsmParser &Parser = getParser(); | |||
2720 | SMLoc S = getLoc(); | |||
2721 | const AsmToken &Tok = Parser.getTok(); | |||
2722 | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")((Tok.is(AsmToken::Identifier) && "Token is not an Identifier" ) ? static_cast<void> (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2722, __PRETTY_FUNCTION__)); | |||
2723 | ||||
2724 | StringRef Cond = Tok.getString(); | |||
2725 | AArch64CC::CondCode CC = parseCondCodeString(Cond); | |||
2726 | if (CC == AArch64CC::Invalid) | |||
2727 | return TokError("invalid condition code"); | |||
2728 | Parser.Lex(); // Eat identifier token. | |||
2729 | ||||
2730 | if (invertCondCode) { | |||
2731 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) | |||
2732 | return TokError("condition codes AL and NV are invalid for this instruction"); | |||
2733 | CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC)); | |||
2734 | } | |||
2735 | ||||
2736 | Operands.push_back( | |||
2737 | AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext())); | |||
2738 | return false; | |||
2739 | } | |||
2740 | ||||
2741 | /// tryParseOptionalShift - Some operands take an optional shift argument. Parse | |||
2742 | /// them if present. | |||
2743 | OperandMatchResultTy | |||
2744 | AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { | |||
2745 | MCAsmParser &Parser = getParser(); | |||
2746 | const AsmToken &Tok = Parser.getTok(); | |||
2747 | std::string LowerID = Tok.getString().lower(); | |||
2748 | AArch64_AM::ShiftExtendType ShOp = | |||
2749 | StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) | |||
2750 | .Case("lsl", AArch64_AM::LSL) | |||
2751 | .Case("lsr", AArch64_AM::LSR) | |||
2752 | .Case("asr", AArch64_AM::ASR) | |||
2753 | .Case("ror", AArch64_AM::ROR) | |||
2754 | .Case("msl", AArch64_AM::MSL) | |||
2755 | .Case("uxtb", AArch64_AM::UXTB) | |||
2756 | .Case("uxth", AArch64_AM::UXTH) | |||
2757 | .Case("uxtw", AArch64_AM::UXTW) | |||
2758 | .Case("uxtx", AArch64_AM::UXTX) | |||
2759 | .Case("sxtb", AArch64_AM::SXTB) | |||
2760 | .Case("sxth", AArch64_AM::SXTH) | |||
2761 | .Case("sxtw", AArch64_AM::SXTW) | |||
2762 | .Case("sxtx", AArch64_AM::SXTX) | |||
2763 | .Default(AArch64_AM::InvalidShiftExtend); | |||
2764 | ||||
2765 | if (ShOp == AArch64_AM::InvalidShiftExtend) | |||
2766 | return MatchOperand_NoMatch; | |||
2767 | ||||
2768 | SMLoc S = Tok.getLoc(); | |||
2769 | Parser.Lex(); | |||
2770 | ||||
2771 | bool Hash = parseOptionalToken(AsmToken::Hash); | |||
2772 | ||||
2773 | if (!Hash && getLexer().isNot(AsmToken::Integer)) { | |||
2774 | if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || | |||
2775 | ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || | |||
2776 | ShOp == AArch64_AM::MSL) { | |||
2777 | // We expect a number here. | |||
2778 | TokError("expected #imm after shift specifier"); | |||
2779 | return MatchOperand_ParseFail; | |||
2780 | } | |||
2781 | ||||
2782 | // "extend" type operations don't need an immediate, #0 is implicit. | |||
2783 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
2784 | Operands.push_back( | |||
2785 | AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext())); | |||
2786 | return MatchOperand_Success; | |||
2787 | } | |||
2788 | ||||
2789 | // Make sure we do actually have a number, identifier or a parenthesized | |||
2790 | // expression. | |||
2791 | SMLoc E = Parser.getTok().getLoc(); | |||
2792 | if (!Parser.getTok().is(AsmToken::Integer) && | |||
2793 | !Parser.getTok().is(AsmToken::LParen) && | |||
2794 | !Parser.getTok().is(AsmToken::Identifier)) { | |||
2795 | Error(E, "expected integer shift amount"); | |||
2796 | return MatchOperand_ParseFail; | |||
2797 | } | |||
2798 | ||||
2799 | const MCExpr *ImmVal; | |||
2800 | if (getParser().parseExpression(ImmVal)) | |||
2801 | return MatchOperand_ParseFail; | |||
2802 | ||||
2803 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
2804 | if (!MCE) { | |||
2805 | Error(E, "expected constant '#imm' after shift specifier"); | |||
2806 | return MatchOperand_ParseFail; | |||
2807 | } | |||
2808 | ||||
2809 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
2810 | Operands.push_back(AArch64Operand::CreateShiftExtend( | |||
2811 | ShOp, MCE->getValue(), true, S, E, getContext())); | |||
2812 | return MatchOperand_Success; | |||
2813 | } | |||
2814 | ||||
2815 | static const struct Extension { | |||
2816 | const char *Name; | |||
2817 | const FeatureBitset Features; | |||
2818 | } ExtensionMap[] = { | |||
2819 | {"crc", {AArch64::FeatureCRC}}, | |||
2820 | {"sm4", {AArch64::FeatureSM4}}, | |||
2821 | {"sha3", {AArch64::FeatureSHA3}}, | |||
2822 | {"sha2", {AArch64::FeatureSHA2}}, | |||
2823 | {"aes", {AArch64::FeatureAES}}, | |||
2824 | {"crypto", {AArch64::FeatureCrypto}}, | |||
2825 | {"fp", {AArch64::FeatureFPARMv8}}, | |||
2826 | {"simd", {AArch64::FeatureNEON}}, | |||
2827 | {"ras", {AArch64::FeatureRAS}}, | |||
2828 | {"lse", {AArch64::FeatureLSE}}, | |||
2829 | {"predres", {AArch64::FeaturePredRes}}, | |||
2830 | {"ccdp", {AArch64::FeatureCacheDeepPersist}}, | |||
2831 | {"mte", {AArch64::FeatureMTE}}, | |||
2832 | {"tlb-rmi", {AArch64::FeatureTLB_RMI}}, | |||
2833 | {"pan-rwv", {AArch64::FeaturePAN_RWV}}, | |||
2834 | {"ccpp", {AArch64::FeatureCCPP}}, | |||
2835 | {"sve", {AArch64::FeatureSVE}}, | |||
2836 | // FIXME: Unsupported extensions | |||
2837 | {"pan", {}}, | |||
2838 | {"lor", {}}, | |||
2839 | {"rdma", {}}, | |||
2840 | {"profile", {}}, | |||
2841 | }; | |||
2842 | ||||
2843 | static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { | |||
2844 | if (FBS[AArch64::HasV8_1aOps]) | |||
2845 | Str += "ARMv8.1a"; | |||
2846 | else if (FBS[AArch64::HasV8_2aOps]) | |||
2847 | Str += "ARMv8.2a"; | |||
2848 | else if (FBS[AArch64::HasV8_3aOps]) | |||
2849 | Str += "ARMv8.3a"; | |||
2850 | else if (FBS[AArch64::HasV8_4aOps]) | |||
2851 | Str += "ARMv8.4a"; | |||
2852 | else if (FBS[AArch64::HasV8_5aOps]) | |||
2853 | Str += "ARMv8.5a"; | |||
2854 | else { | |||
2855 | auto ext = std::find_if(std::begin(ExtensionMap), | |||
2856 | std::end(ExtensionMap), | |||
2857 | [&](const Extension& e) | |||
2858 | // Use & in case multiple features are enabled | |||
2859 | { return (FBS & e.Features) != FeatureBitset(); } | |||
2860 | ); | |||
2861 | ||||
2862 | Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)"; | |||
2863 | } | |||
2864 | } | |||
2865 | ||||
2866 | void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands, | |||
2867 | SMLoc S) { | |||
2868 | const uint16_t Op2 = Encoding & 7; | |||
2869 | const uint16_t Cm = (Encoding & 0x78) >> 3; | |||
2870 | const uint16_t Cn = (Encoding & 0x780) >> 7; | |||
2871 | const uint16_t Op1 = (Encoding & 0x3800) >> 11; | |||
2872 | ||||
2873 | const MCExpr *Expr = MCConstantExpr::create(Op1, getContext()); | |||
2874 | ||||
2875 | Operands.push_back( | |||
2876 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); | |||
2877 | Operands.push_back( | |||
2878 | AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); | |||
2879 | Operands.push_back( | |||
2880 | AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); | |||
2881 | Expr = MCConstantExpr::create(Op2, getContext()); | |||
2882 | Operands.push_back( | |||
2883 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); | |||
2884 | } | |||
2885 | ||||
2886 | /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for | |||
2887 | /// the SYS instruction. Parse them specially so that we create a SYS MCInst. | |||
2888 | bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, | |||
2889 | OperandVector &Operands) { | |||
2890 | if (Name.find('.') != StringRef::npos) | |||
2891 | return TokError("invalid operand"); | |||
2892 | ||||
2893 | Mnemonic = Name; | |||
2894 | Operands.push_back( | |||
2895 | AArch64Operand::CreateToken("sys", false, NameLoc, getContext())); | |||
2896 | ||||
2897 | MCAsmParser &Parser = getParser(); | |||
2898 | const AsmToken &Tok = Parser.getTok(); | |||
2899 | StringRef Op = Tok.getString(); | |||
2900 | SMLoc S = Tok.getLoc(); | |||
2901 | ||||
2902 | if (Mnemonic == "ic") { | |||
2903 | const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op); | |||
2904 | if (!IC) | |||
2905 | return TokError("invalid operand for IC instruction"); | |||
2906 | else if (!IC->haveFeatures(getSTI().getFeatureBits())) { | |||
2907 | std::string Str("IC " + std::string(IC->Name) + " requires "); | |||
2908 | setRequiredFeatureString(IC->getRequiredFeatures(), Str); | |||
2909 | return TokError(Str.c_str()); | |||
2910 | } | |||
2911 | createSysAlias(IC->Encoding, Operands, S); | |||
2912 | } else if (Mnemonic == "dc") { | |||
2913 | const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op); | |||
2914 | if (!DC) | |||
2915 | return TokError("invalid operand for DC instruction"); | |||
2916 | else if (!DC->haveFeatures(getSTI().getFeatureBits())) { | |||
2917 | std::string Str("DC " + std::string(DC->Name) + " requires "); | |||
2918 | setRequiredFeatureString(DC->getRequiredFeatures(), Str); | |||
2919 | return TokError(Str.c_str()); | |||
2920 | } | |||
2921 | createSysAlias(DC->Encoding, Operands, S); | |||
2922 | } else if (Mnemonic == "at") { | |||
2923 | const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op); | |||
2924 | if (!AT) | |||
2925 | return TokError("invalid operand for AT instruction"); | |||
2926 | else if (!AT->haveFeatures(getSTI().getFeatureBits())) { | |||
2927 | std::string Str("AT " + std::string(AT->Name) + " requires "); | |||
2928 | setRequiredFeatureString(AT->getRequiredFeatures(), Str); | |||
2929 | return TokError(Str.c_str()); | |||
2930 | } | |||
2931 | createSysAlias(AT->Encoding, Operands, S); | |||
2932 | } else if (Mnemonic == "tlbi") { | |||
2933 | const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op); | |||
2934 | if (!TLBI) | |||
2935 | return TokError("invalid operand for TLBI instruction"); | |||
2936 | else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) { | |||
2937 | std::string Str("TLBI " + std::string(TLBI->Name) + " requires "); | |||
2938 | setRequiredFeatureString(TLBI->getRequiredFeatures(), Str); | |||
2939 | return TokError(Str.c_str()); | |||
2940 | } | |||
2941 | createSysAlias(TLBI->Encoding, Operands, S); | |||
2942 | } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") { | |||
2943 | const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op); | |||
2944 | if (!PRCTX) | |||
2945 | return TokError("invalid operand for prediction restriction instruction"); | |||
2946 | else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) { | |||
2947 | std::string Str( | |||
2948 | Mnemonic.upper() + std::string(PRCTX->Name) + " requires "); | |||
2949 | setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str); | |||
2950 | return TokError(Str.c_str()); | |||
2951 | } | |||
2952 | uint16_t PRCTX_Op2 = | |||
2953 | Mnemonic == "cfp" ? 4 : | |||
2954 | Mnemonic == "dvp" ? 5 : | |||
2955 | Mnemonic == "cpp" ? 7 : | |||
2956 | 0; | |||
2957 | assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction")((PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction" ) ? static_cast<void> (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2957, __PRETTY_FUNCTION__)); | |||
2958 | createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S); | |||
2959 | } | |||
2960 | ||||
2961 | Parser.Lex(); // Eat operand. | |||
2962 | ||||
2963 | bool ExpectRegister = (Op.lower().find("all") == StringRef::npos); | |||
2964 | bool HasRegister = false; | |||
2965 | ||||
2966 | // Check for the optional register operand. | |||
2967 | if (parseOptionalToken(AsmToken::Comma)) { | |||
2968 | if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands)) | |||
2969 | return TokError("expected register operand"); | |||
2970 | HasRegister = true; | |||
2971 | } | |||
2972 | ||||
2973 | if (ExpectRegister && !HasRegister) | |||
2974 | return TokError("specified " + Mnemonic + " op requires a register"); | |||
2975 | else if (!ExpectRegister && HasRegister) | |||
2976 | return TokError("specified " + Mnemonic + " op does not use a register"); | |||
2977 | ||||
2978 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
2979 | return true; | |||
2980 | ||||
2981 | return false; | |||
2982 | } | |||
2983 | ||||
2984 | OperandMatchResultTy | |||
2985 | AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { | |||
2986 | MCAsmParser &Parser = getParser(); | |||
2987 | const AsmToken &Tok = Parser.getTok(); | |||
2988 | ||||
2989 | if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) { | |||
2990 | TokError("'csync' operand expected"); | |||
2991 | return MatchOperand_ParseFail; | |||
2992 | // Can be either a #imm style literal or an option name | |||
2993 | } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { | |||
2994 | // Immediate operand. | |||
2995 | const MCExpr *ImmVal; | |||
2996 | SMLoc ExprLoc = getLoc(); | |||
2997 | if (getParser().parseExpression(ImmVal)) | |||
2998 | return MatchOperand_ParseFail; | |||
2999 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
3000 | if (!MCE) { | |||
3001 | Error(ExprLoc, "immediate value expected for barrier operand"); | |||
3002 | return MatchOperand_ParseFail; | |||
3003 | } | |||
3004 | if (MCE->getValue() < 0 || MCE->getValue() > 15) { | |||
3005 | Error(ExprLoc, "barrier operand out of range"); | |||
3006 | return MatchOperand_ParseFail; | |||
3007 | } | |||
3008 | auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue()); | |||
3009 | Operands.push_back(AArch64Operand::CreateBarrier( | |||
3010 | MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext())); | |||
3011 | return MatchOperand_Success; | |||
3012 | } | |||
3013 | ||||
3014 | if (Tok.isNot(AsmToken::Identifier)) { | |||
3015 | TokError("invalid operand for instruction"); | |||
3016 | return MatchOperand_ParseFail; | |||
3017 | } | |||
3018 | ||||
3019 | auto TSB = AArch64TSB::lookupTSBByName(Tok.getString()); | |||
3020 | // The only valid named option for ISB is 'sy' | |||
3021 | auto DB = AArch64DB::lookupDBByName(Tok.getString()); | |||
3022 | if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) { | |||
3023 | TokError("'sy' or #imm operand expected"); | |||
3024 | return MatchOperand_ParseFail; | |||
3025 | // The only valid named option for TSB is 'csync' | |||
3026 | } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) { | |||
3027 | TokError("'csync' operand expected"); | |||
3028 | return MatchOperand_ParseFail; | |||
3029 | } else if (!DB && !TSB) { | |||
3030 | TokError("invalid barrier option name"); | |||
3031 | return MatchOperand_ParseFail; | |||
3032 | } | |||
3033 | ||||
3034 | Operands.push_back(AArch64Operand::CreateBarrier( | |||
3035 | DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext())); | |||
3036 | Parser.Lex(); // Consume the option | |||
3037 | ||||
3038 | return MatchOperand_Success; | |||
3039 | } | |||
3040 | ||||
3041 | OperandMatchResultTy | |||
3042 | AArch64AsmParser::tryParseSysReg(OperandVector &Operands) { | |||
3043 | MCAsmParser &Parser = getParser(); | |||
3044 | const AsmToken &Tok = Parser.getTok(); | |||
3045 | ||||
3046 | if (Tok.isNot(AsmToken::Identifier)) | |||
3047 | return MatchOperand_NoMatch; | |||
3048 | ||||
3049 | int MRSReg, MSRReg; | |||
3050 | auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString()); | |||
3051 | if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) { | |||
3052 | MRSReg = SysReg->Readable ? SysReg->Encoding : -1; | |||
3053 | MSRReg = SysReg->Writeable ? SysReg->Encoding : -1; | |||
3054 | } else | |||
3055 | MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString()); | |||
3056 | ||||
3057 | auto PState = AArch64PState::lookupPStateByName(Tok.getString()); | |||
3058 | unsigned PStateImm = -1; | |||
3059 | if (PState && PState->haveFeatures(getSTI().getFeatureBits())) | |||
3060 | PStateImm = PState->Encoding; | |||
3061 | ||||
3062 | Operands.push_back( | |||
3063 | AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg, | |||
3064 | PStateImm, getContext())); | |||
3065 | Parser.Lex(); // Eat identifier | |||
3066 | ||||
3067 | return MatchOperand_Success; | |||
3068 | } | |||
3069 | ||||
3070 | /// tryParseNeonVectorRegister - Parse a vector register operand. | |||
3071 | bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { | |||
3072 | MCAsmParser &Parser = getParser(); | |||
3073 | if (Parser.getTok().isNot(AsmToken::Identifier)) | |||
3074 | return true; | |||
3075 | ||||
3076 | SMLoc S = getLoc(); | |||
3077 | // Check for a vector register specifier first. | |||
3078 | StringRef Kind; | |||
3079 | unsigned Reg; | |||
3080 | OperandMatchResultTy Res = | |||
3081 | tryParseVectorRegister(Reg, Kind, RegKind::NeonVector); | |||
3082 | if (Res != MatchOperand_Success) | |||
3083 | return true; | |||
3084 | ||||
3085 | const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector); | |||
3086 | if (!KindRes) | |||
3087 | return true; | |||
3088 | ||||
3089 | unsigned ElementWidth = KindRes->second; | |||
3090 | Operands.push_back( | |||
3091 | AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth, | |||
3092 | S, getLoc(), getContext())); | |||
3093 | ||||
3094 | // If there was an explicit qualifier, that goes on as a literal text | |||
3095 | // operand. | |||
3096 | if (!Kind.empty()) | |||
3097 | Operands.push_back( | |||
3098 | AArch64Operand::CreateToken(Kind, false, S, getContext())); | |||
3099 | ||||
3100 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; | |||
3101 | } | |||
3102 | ||||
3103 | OperandMatchResultTy | |||
3104 | AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { | |||
3105 | SMLoc SIdx = getLoc(); | |||
3106 | if (parseOptionalToken(AsmToken::LBrac)) { | |||
3107 | const MCExpr *ImmVal; | |||
3108 | if (getParser().parseExpression(ImmVal)) | |||
3109 | return MatchOperand_NoMatch; | |||
3110 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
3111 | if (!MCE) { | |||
3112 | TokError("immediate value expected for vector index"); | |||
3113 | return MatchOperand_ParseFail;; | |||
3114 | } | |||
3115 | ||||
3116 | SMLoc E = getLoc(); | |||
3117 | ||||
3118 | if (parseToken(AsmToken::RBrac, "']' expected")) | |||
3119 | return MatchOperand_ParseFail;; | |||
3120 | ||||
3121 | Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, | |||
3122 | E, getContext())); | |||
3123 | return MatchOperand_Success; | |||
3124 | } | |||
3125 | ||||
3126 | return MatchOperand_NoMatch; | |||
3127 | } | |||
3128 | ||||
3129 | // tryParseVectorRegister - Try to parse a vector register name with | |||
3130 | // optional kind specifier. If it is a register specifier, eat the token | |||
3131 | // and return it. | |||
3132 | OperandMatchResultTy | |||
3133 | AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind, | |||
3134 | RegKind MatchKind) { | |||
3135 | MCAsmParser &Parser = getParser(); | |||
3136 | const AsmToken &Tok = Parser.getTok(); | |||
3137 | ||||
3138 | if (Tok.isNot(AsmToken::Identifier)) | |||
3139 | return MatchOperand_NoMatch; | |||
3140 | ||||
3141 | StringRef Name = Tok.getString(); | |||
3142 | // If there is a kind specifier, it's separated from the register name by | |||
3143 | // a '.'. | |||
3144 | size_t Start = 0, Next = Name.find('.'); | |||
3145 | StringRef Head = Name.slice(Start, Next); | |||
3146 | unsigned RegNum = matchRegisterNameAlias(Head, MatchKind); | |||
3147 | ||||
3148 | if (RegNum) { | |||
3149 | if (Next != StringRef::npos) { | |||
3150 | Kind = Name.slice(Next, StringRef::npos); | |||
3151 | if (!isValidVectorKind(Kind, MatchKind)) { | |||
3152 | TokError("invalid vector kind qualifier"); | |||
3153 | return MatchOperand_ParseFail; | |||
3154 | } | |||
3155 | } | |||
3156 | Parser.Lex(); // Eat the register token. | |||
3157 | ||||
3158 | Reg = RegNum; | |||
3159 | return MatchOperand_Success; | |||
3160 | } | |||
3161 | ||||
3162 | return MatchOperand_NoMatch; | |||
3163 | } | |||
3164 | ||||
3165 | /// tryParseSVEPredicateVector - Parse a SVE predicate register operand. | |||
3166 | OperandMatchResultTy | |||
3167 | AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { | |||
3168 | // Check for a SVE predicate register specifier first. | |||
3169 | const SMLoc S = getLoc(); | |||
3170 | StringRef Kind; | |||
3171 | unsigned RegNum; | |||
3172 | auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); | |||
3173 | if (Res != MatchOperand_Success) | |||
3174 | return Res; | |||
3175 | ||||
3176 | const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector); | |||
3177 | if (!KindRes) | |||
3178 | return MatchOperand_NoMatch; | |||
3179 | ||||
3180 | unsigned ElementWidth = KindRes->second; | |||
3181 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
3182 | RegNum, RegKind::SVEPredicateVector, ElementWidth, S, | |||
3183 | getLoc(), getContext())); | |||
3184 | ||||
3185 | // Not all predicates are followed by a '/m' or '/z'. | |||
3186 | MCAsmParser &Parser = getParser(); | |||
3187 | if (Parser.getTok().isNot(AsmToken::Slash)) | |||
3188 | return MatchOperand_Success; | |||
3189 | ||||
3190 | // But when they do they shouldn't have an element type suffix. | |||
3191 | if (!Kind.empty()) { | |||
3192 | Error(S, "not expecting size suffix"); | |||
3193 | return MatchOperand_ParseFail; | |||
3194 | } | |||
3195 | ||||
3196 | // Add a literal slash as operand | |||
3197 | Operands.push_back( | |||
3198 | AArch64Operand::CreateToken("/" , false, getLoc(), getContext())); | |||
3199 | ||||
3200 | Parser.Lex(); // Eat the slash. | |||
3201 | ||||
3202 | // Zeroing or merging? | |||
3203 | auto Pred = Parser.getTok().getString().lower(); | |||
3204 | if (Pred != "z" && Pred != "m") { | |||
3205 | Error(getLoc(), "expecting 'm' or 'z' predication"); | |||
3206 | return MatchOperand_ParseFail; | |||
3207 | } | |||
3208 | ||||
3209 | // Add zero/merge token. | |||
3210 | const char *ZM = Pred == "z" ? "z" : "m"; | |||
3211 | Operands.push_back( | |||
3212 | AArch64Operand::CreateToken(ZM, false, getLoc(), getContext())); | |||
3213 | ||||
3214 | Parser.Lex(); // Eat zero/merge token. | |||
3215 | return MatchOperand_Success; | |||
3216 | } | |||
3217 | ||||
3218 | /// parseRegister - Parse a register operand. | |||
3219 | bool AArch64AsmParser::parseRegister(OperandVector &Operands) { | |||
3220 | // Try for a Neon vector register. | |||
3221 | if (!tryParseNeonVectorRegister(Operands)) | |||
3222 | return false; | |||
3223 | ||||
3224 | // Otherwise try for a scalar register. | |||
3225 | if (tryParseGPROperand<false>(Operands) == MatchOperand_Success) | |||
3226 | return false; | |||
3227 | ||||
3228 | return true; | |||
3229 | } | |||
3230 | ||||
3231 | bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { | |||
3232 | MCAsmParser &Parser = getParser(); | |||
3233 | bool HasELFModifier = false; | |||
3234 | AArch64MCExpr::VariantKind RefKind; | |||
3235 | ||||
3236 | if (parseOptionalToken(AsmToken::Colon)) { | |||
3237 | HasELFModifier = true; | |||
3238 | ||||
3239 | if (Parser.getTok().isNot(AsmToken::Identifier)) | |||
3240 | return TokError("expect relocation specifier in operand after ':'"); | |||
3241 | ||||
3242 | std::string LowerCase = Parser.getTok().getIdentifier().lower(); | |||
3243 | RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) | |||
3244 | .Case("lo12", AArch64MCExpr::VK_LO12) | |||
3245 | .Case("abs_g3", AArch64MCExpr::VK_ABS_G3) | |||
3246 | .Case("abs_g2", AArch64MCExpr::VK_ABS_G2) | |||
3247 | .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S) | |||
3248 | .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC) | |||
3249 | .Case("abs_g1", AArch64MCExpr::VK_ABS_G1) | |||
3250 | .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S) | |||
3251 | .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC) | |||
3252 | .Case("abs_g0", AArch64MCExpr::VK_ABS_G0) | |||
3253 | .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S) | |||
3254 | .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC) | |||
3255 | .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2) | |||
3256 | .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1) | |||
3257 | .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC) | |||
3258 | .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0) | |||
3259 | .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC) | |||
3260 | .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12) | |||
3261 | .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12) | |||
3262 | .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC) | |||
3263 | .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2) | |||
3264 | .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1) | |||
3265 | .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC) | |||
3266 | .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0) | |||
3267 | .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC) | |||
3268 | .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12) | |||
3269 | .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12) | |||
3270 | .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC) | |||
3271 | .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12) | |||
3272 | .Case("got", AArch64MCExpr::VK_GOT_PAGE) | |||
3273 | .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12) | |||
3274 | .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE) | |||
3275 | .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC) | |||
3276 | .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) | |||
3277 | .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) | |||
3278 | .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) | |||
3279 | .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12) | |||
3280 | .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12) | |||
3281 | .Default(AArch64MCExpr::VK_INVALID); | |||
3282 | ||||
3283 | if (RefKind == AArch64MCExpr::VK_INVALID) | |||
3284 | return TokError("expect relocation specifier in operand after ':'"); | |||
3285 | ||||
3286 | Parser.Lex(); // Eat identifier | |||
3287 | ||||
3288 | if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier")) | |||
3289 | return true; | |||
3290 | } | |||
3291 | ||||
3292 | if (getParser().parseExpression(ImmVal)) | |||
3293 | return true; | |||
3294 | ||||
3295 | if (HasELFModifier) | |||
3296 | ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext()); | |||
3297 | ||||
3298 | return false; | |||
3299 | } | |||
3300 | ||||
3301 | template <RegKind VectorKind> | |||
3302 | OperandMatchResultTy | |||
3303 | AArch64AsmParser::tryParseVectorList(OperandVector &Operands, | |||
3304 | bool ExpectMatch) { | |||
3305 | MCAsmParser &Parser = getParser(); | |||
3306 | if (!Parser.getTok().is(AsmToken::LCurly)) | |||
3307 | return MatchOperand_NoMatch; | |||
3308 | ||||
3309 | // Wrapper around parse function | |||
3310 | auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc, | |||
3311 | bool NoMatchIsError) { | |||
3312 | auto RegTok = Parser.getTok(); | |||
3313 | auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind); | |||
3314 | if (ParseRes == MatchOperand_Success) { | |||
3315 | if (parseVectorKind(Kind, VectorKind)) | |||
3316 | return ParseRes; | |||
3317 | llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 3317); | |||
3318 | } | |||
3319 | ||||
3320 | if (RegTok.isNot(AsmToken::Identifier) || | |||
3321 | ParseRes == MatchOperand_ParseFail || | |||
3322 | (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) { | |||
3323 | Error(Loc, "vector register expected"); | |||
3324 | return MatchOperand_ParseFail; | |||
3325 | } | |||
3326 | ||||
3327 | return MatchOperand_NoMatch; | |||
3328 | }; | |||
3329 | ||||
3330 | SMLoc S = getLoc(); | |||
3331 | auto LCurly = Parser.getTok(); | |||
3332 | Parser.Lex(); // Eat left bracket token. | |||
3333 | ||||
3334 | StringRef Kind; | |||
3335 | unsigned FirstReg; | |||
3336 | auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); | |||
3337 | ||||
3338 | // Put back the original left bracket if there was no match, so that | |||
3339 | // different types of list-operands can be matched (e.g. SVE, Neon). | |||
3340 | if (ParseRes == MatchOperand_NoMatch) | |||
3341 | Parser.getLexer().UnLex(LCurly); | |||
3342 | ||||
3343 | if (ParseRes != MatchOperand_Success) | |||
3344 | return ParseRes; | |||
3345 | ||||
3346 | int64_t PrevReg = FirstReg; | |||
3347 | unsigned Count = 1; | |||
3348 | ||||
3349 | if (parseOptionalToken(AsmToken::Minus)) { | |||
3350 | SMLoc Loc = getLoc(); | |||
3351 | StringRef NextKind; | |||
3352 | ||||
3353 | unsigned Reg; | |||
3354 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); | |||
3355 | if (ParseRes != MatchOperand_Success) | |||
3356 | return ParseRes; | |||
3357 | ||||
3358 | // Any Kind suffices must match on all regs in the list. | |||
3359 | if (Kind != NextKind) { | |||
3360 | Error(Loc, "mismatched register size suffix"); | |||
3361 | return MatchOperand_ParseFail; | |||
3362 | } | |||
3363 | ||||
3364 | unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg); | |||
3365 | ||||
3366 | if (Space == 0 || Space > 3) { | |||
3367 | Error(Loc, "invalid number of vectors"); | |||
3368 | return MatchOperand_ParseFail; | |||
3369 | } | |||
3370 | ||||
3371 | Count += Space; | |||
3372 | } | |||
3373 | else { | |||
3374 | while (parseOptionalToken(AsmToken::Comma)) { | |||
3375 | SMLoc Loc = getLoc(); | |||
3376 | StringRef NextKind; | |||
3377 | unsigned Reg; | |||
3378 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); | |||
3379 | if (ParseRes != MatchOperand_Success) | |||
3380 | return ParseRes; | |||
3381 | ||||
3382 | // Any Kind suffices must match on all regs in the list. | |||
3383 | if (Kind != NextKind) { | |||
3384 | Error(Loc, "mismatched register size suffix"); | |||
3385 | return MatchOperand_ParseFail; | |||
3386 | } | |||
3387 | ||||
3388 | // Registers must be incremental (with wraparound at 31) | |||
3389 | if (getContext().getRegisterInfo()->getEncodingValue(Reg) != | |||
3390 | (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) { | |||
3391 | Error(Loc, "registers must be sequential"); | |||
3392 | return MatchOperand_ParseFail; | |||
3393 | } | |||
3394 | ||||
3395 | PrevReg = Reg; | |||
3396 | ++Count; | |||
3397 | } | |||
3398 | } | |||
3399 | ||||
3400 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
3401 | return MatchOperand_ParseFail; | |||
3402 | ||||
3403 | if (Count > 4) { | |||
3404 | Error(S, "invalid number of vectors"); | |||
3405 | return MatchOperand_ParseFail; | |||
3406 | } | |||
3407 | ||||
3408 | unsigned NumElements = 0; | |||
3409 | unsigned ElementWidth = 0; | |||
3410 | if (!Kind.empty()) { | |||
3411 | if (const auto &VK = parseVectorKind(Kind, VectorKind)) | |||
3412 | std::tie(NumElements, ElementWidth) = *VK; | |||
3413 | } | |||
3414 | ||||
3415 | Operands.push_back(AArch64Operand::CreateVectorList( | |||
3416 | FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(), | |||
3417 | getContext())); | |||
3418 | ||||
3419 | return MatchOperand_Success; | |||
3420 | } | |||
3421 | ||||
3422 | /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. | |||
3423 | bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { | |||
3424 | auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true); | |||
3425 | if (ParseRes != MatchOperand_Success) | |||
3426 | return true; | |||
3427 | ||||
3428 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; | |||
3429 | } | |||
3430 | ||||
3431 | OperandMatchResultTy | |||
3432 | AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { | |||
3433 | SMLoc StartLoc = getLoc(); | |||
3434 | ||||
3435 | unsigned RegNum; | |||
3436 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); | |||
3437 | if (Res != MatchOperand_Success) | |||
3438 | return Res; | |||
3439 | ||||
3440 | if (!parseOptionalToken(AsmToken::Comma)) { | |||
3441 | Operands.push_back(AArch64Operand::CreateReg( | |||
3442 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
3443 | return MatchOperand_Success; | |||
3444 | } | |||
3445 | ||||
3446 | parseOptionalToken(AsmToken::Hash); | |||
3447 | ||||
3448 | if (getParser().getTok().isNot(AsmToken::Integer)) { | |||
3449 | Error(getLoc(), "index must be absent or #0"); | |||
3450 | return MatchOperand_ParseFail; | |||
3451 | } | |||
3452 | ||||
3453 | const MCExpr *ImmVal; | |||
3454 | if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || | |||
3455 | cast<MCConstantExpr>(ImmVal)->getValue() != 0) { | |||
3456 | Error(getLoc(), "index must be absent or #0"); | |||
3457 | return MatchOperand_ParseFail; | |||
3458 | } | |||
3459 | ||||
3460 | Operands.push_back(AArch64Operand::CreateReg( | |||
3461 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
3462 | return MatchOperand_Success; | |||
3463 | } | |||
3464 | ||||
3465 | template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> | |||
3466 | OperandMatchResultTy | |||
3467 | AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { | |||
3468 | SMLoc StartLoc = getLoc(); | |||
3469 | ||||
3470 | unsigned RegNum; | |||
3471 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); | |||
3472 | if (Res != MatchOperand_Success) | |||
3473 | return Res; | |||
3474 | ||||
3475 | // No shift/extend is the default. | |||
3476 | if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { | |||
3477 | Operands.push_back(AArch64Operand::CreateReg( | |||
3478 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); | |||
3479 | return MatchOperand_Success; | |||
3480 | } | |||
3481 | ||||
3482 | // Eat the comma | |||
3483 | getParser().Lex(); | |||
3484 | ||||
3485 | // Match the shift | |||
3486 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; | |||
3487 | Res = tryParseOptionalShiftExtend(ExtOpnd); | |||
3488 | if (Res != MatchOperand_Success) | |||
3489 | return Res; | |||
3490 | ||||
3491 | auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); | |||
3492 | Operands.push_back(AArch64Operand::CreateReg( | |||
3493 | RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, | |||
3494 | Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), | |||
3495 | Ext->hasShiftExtendAmount())); | |||
3496 | ||||
3497 | return MatchOperand_Success; | |||
3498 | } | |||
3499 | ||||
3500 | bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { | |||
3501 | MCAsmParser &Parser = getParser(); | |||
3502 | ||||
3503 | // Some SVE instructions have a decoration after the immediate, i.e. | |||
3504 | // "mul vl". We parse them here and add tokens, which must be present in the | |||
3505 | // asm string in the tablegen instruction. | |||
3506 | bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl"); | |||
3507 | bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash); | |||
3508 | if (!Parser.getTok().getString().equals_lower("mul") || | |||
3509 | !(NextIsVL || NextIsHash)) | |||
3510 | return true; | |||
3511 | ||||
3512 | Operands.push_back( | |||
3513 | AArch64Operand::CreateToken("mul", false, getLoc(), getContext())); | |||
3514 | Parser.Lex(); // Eat the "mul" | |||
3515 | ||||
3516 | if (NextIsVL) { | |||
3517 | Operands.push_back( | |||
3518 | AArch64Operand::CreateToken("vl", false, getLoc(), getContext())); | |||
3519 | Parser.Lex(); // Eat the "vl" | |||
3520 | return false; | |||
3521 | } | |||
3522 | ||||
3523 | if (NextIsHash) { | |||
3524 | Parser.Lex(); // Eat the # | |||
3525 | SMLoc S = getLoc(); | |||
3526 | ||||
3527 | // Parse immediate operand. | |||
3528 | const MCExpr *ImmVal; | |||
3529 | if (!Parser.parseExpression(ImmVal)) | |||
3530 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) { | |||
3531 | Operands.push_back(AArch64Operand::CreateImm( | |||
3532 | MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(), | |||
3533 | getContext())); | |||
3534 | return MatchOperand_Success; | |||
3535 | } | |||
3536 | } | |||
3537 | ||||
3538 | return Error(getLoc(), "expected 'vl' or '#<imm>'"); | |||
3539 | } | |||
3540 | ||||
3541 | /// parseOperand - Parse a arm instruction operand. For now this parses the | |||
3542 | /// operand regardless of the mnemonic. | |||
3543 | bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, | |||
3544 | bool invertCondCode) { | |||
3545 | MCAsmParser &Parser = getParser(); | |||
3546 | ||||
3547 | OperandMatchResultTy ResTy = | |||
3548 | MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true); | |||
3549 | ||||
3550 | // Check if the current operand has a custom associated parser, if so, try to | |||
3551 | // custom parse the operand, or fallback to the general approach. | |||
3552 | if (ResTy == MatchOperand_Success) | |||
3553 | return false; | |||
3554 | // If there wasn't a custom match, try the generic matcher below. Otherwise, | |||
3555 | // there was a match, but an error occurred, in which case, just return that | |||
3556 | // the operand parsing failed. | |||
3557 | if (ResTy == MatchOperand_ParseFail) | |||
3558 | return true; | |||
3559 | ||||
3560 | // Nothing custom, so do general case parsing. | |||
3561 | SMLoc S, E; | |||
3562 | switch (getLexer().getKind()) { | |||
3563 | default: { | |||
3564 | SMLoc S = getLoc(); | |||
3565 | const MCExpr *Expr; | |||
3566 | if (parseSymbolicImmVal(Expr)) | |||
3567 | return Error(S, "invalid operand"); | |||
3568 | ||||
3569 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3570 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
3571 | return false; | |||
3572 | } | |||
3573 | case AsmToken::LBrac: { | |||
3574 | SMLoc Loc = Parser.getTok().getLoc(); | |||
3575 | Operands.push_back(AArch64Operand::CreateToken("[", false, Loc, | |||
3576 | getContext())); | |||
3577 | Parser.Lex(); // Eat '[' | |||
3578 | ||||
3579 | // There's no comma after a '[', so we can parse the next operand | |||
3580 | // immediately. | |||
3581 | return parseOperand(Operands, false, false); | |||
3582 | } | |||
3583 | case AsmToken::LCurly: | |||
3584 | return parseNeonVectorList(Operands); | |||
3585 | case AsmToken::Identifier: { | |||
3586 | // If we're expecting a Condition Code operand, then just parse that. | |||
3587 | if (isCondCode) | |||
3588 | return parseCondCode(Operands, invertCondCode); | |||
3589 | ||||
3590 | // If it's a register name, parse it. | |||
3591 | if (!parseRegister(Operands)) | |||
3592 | return false; | |||
3593 | ||||
3594 | // See if this is a "mul vl" decoration or "mul #<int>" operand used | |||
3595 | // by SVE instructions. | |||
3596 | if (!parseOptionalMulOperand(Operands)) | |||
3597 | return false; | |||
3598 | ||||
3599 | // This could be an optional "shift" or "extend" operand. | |||
3600 | OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); | |||
3601 | // We can only continue if no tokens were eaten. | |||
3602 | if (GotShift != MatchOperand_NoMatch) | |||
3603 | return GotShift; | |||
3604 | ||||
3605 | // This was not a register so parse other operands that start with an | |||
3606 | // identifier (like labels) as expressions and create them as immediates. | |||
3607 | const MCExpr *IdVal; | |||
3608 | S = getLoc(); | |||
3609 | if (getParser().parseExpression(IdVal)) | |||
3610 | return true; | |||
3611 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3612 | Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext())); | |||
3613 | return false; | |||
3614 | } | |||
3615 | case AsmToken::Integer: | |||
3616 | case AsmToken::Real: | |||
3617 | case AsmToken::Hash: { | |||
3618 | // #42 -> immediate. | |||
3619 | S = getLoc(); | |||
3620 | ||||
3621 | parseOptionalToken(AsmToken::Hash); | |||
3622 | ||||
3623 | // Parse a negative sign | |||
3624 | bool isNegative = false; | |||
3625 | if (Parser.getTok().is(AsmToken::Minus)) { | |||
3626 | isNegative = true; | |||
3627 | // We need to consume this token only when we have a Real, otherwise | |||
3628 | // we let parseSymbolicImmVal take care of it | |||
3629 | if (Parser.getLexer().peekTok().is(AsmToken::Real)) | |||
3630 | Parser.Lex(); | |||
3631 | } | |||
3632 | ||||
3633 | // The only Real that should come through here is a literal #0.0 for | |||
3634 | // the fcmp[e] r, #0.0 instructions. They expect raw token operands, | |||
3635 | // so convert the value. | |||
3636 | const AsmToken &Tok = Parser.getTok(); | |||
3637 | if (Tok.is(AsmToken::Real)) { | |||
3638 | APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); | |||
3639 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); | |||
3640 | if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && | |||
3641 | Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && | |||
3642 | Mnemonic != "fcmlt" && Mnemonic != "fcmne") | |||
3643 | return TokError("unexpected floating point literal"); | |||
3644 | else if (IntVal != 0 || isNegative) | |||
3645 | return TokError("expected floating-point constant #0.0"); | |||
3646 | Parser.Lex(); // Eat the token. | |||
3647 | ||||
3648 | Operands.push_back( | |||
3649 | AArch64Operand::CreateToken("#0", false, S, getContext())); | |||
3650 | Operands.push_back( | |||
3651 | AArch64Operand::CreateToken(".0", false, S, getContext())); | |||
3652 | return false; | |||
3653 | } | |||
3654 | ||||
3655 | const MCExpr *ImmVal; | |||
3656 | if (parseSymbolicImmVal(ImmVal)) | |||
3657 | return true; | |||
3658 | ||||
3659 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
3660 | Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext())); | |||
3661 | return false; | |||
3662 | } | |||
3663 | case AsmToken::Equal: { | |||
3664 | SMLoc Loc = getLoc(); | |||
3665 | if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) | |||
3666 | return TokError("unexpected token in operand"); | |||
3667 | Parser.Lex(); // Eat '=' | |||
3668 | const MCExpr *SubExprVal; | |||
3669 | if (getParser().parseExpression(SubExprVal)) | |||
3670 | return true; | |||
3671 | ||||
3672 | if (Operands.size() < 2 || | |||
3673 | !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) | |||
3674 | return Error(Loc, "Only valid when first operand is register"); | |||
3675 | ||||
3676 | bool IsXReg = | |||
3677 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
3678 | Operands[1]->getReg()); | |||
3679 | ||||
3680 | MCContext& Ctx = getContext(); | |||
3681 | E = SMLoc::getFromPointer(Loc.getPointer() - 1); | |||
3682 | // If the op is an imm and can be fit into a mov, then replace ldr with mov. | |||
3683 | if (isa<MCConstantExpr>(SubExprVal)) { | |||
3684 | uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue(); | |||
3685 | uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; | |||
3686 | while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) { | |||
3687 | ShiftAmt += 16; | |||
3688 | Imm >>= 16; | |||
3689 | } | |||
3690 | if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { | |||
3691 | Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx); | |||
3692 | Operands.push_back(AArch64Operand::CreateImm( | |||
3693 | MCConstantExpr::create(Imm, Ctx), S, E, Ctx)); | |||
3694 | if (ShiftAmt) | |||
3695 | Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL, | |||
3696 | ShiftAmt, true, S, E, Ctx)); | |||
3697 | return false; | |||
3698 | } | |||
3699 | APInt Simm = APInt(64, Imm << ShiftAmt); | |||
3700 | // check if the immediate is an unsigned or signed 32-bit int for W regs | |||
3701 | if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32))) | |||
3702 | return Error(Loc, "Immediate too large for register"); | |||
3703 | } | |||
3704 | // If it is a label or an imm that cannot fit in a movz, put it into CP. | |||
3705 | const MCExpr *CPLoc = | |||
3706 | getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc); | |||
3707 | Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx)); | |||
3708 | return false; | |||
3709 | } | |||
3710 | } | |||
3711 | } | |||
3712 | ||||
3713 | bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1, | |||
3714 | const MCParsedAsmOperand &Op2) const { | |||
3715 | auto &AOp1 = static_cast<const AArch64Operand&>(Op1); | |||
3716 | auto &AOp2 = static_cast<const AArch64Operand&>(Op2); | |||
3717 | if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && | |||
3718 | AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) | |||
3719 | return MCTargetAsmParser::regsEqual(Op1, Op2); | |||
3720 | ||||
3721 | assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&((AOp1.isScalarReg() && AOp2.isScalarReg() && "Testing equality of non-scalar registers not supported") ? static_cast <void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 3722, __PRETTY_FUNCTION__)) | |||
3722 | "Testing equality of non-scalar registers not supported")((AOp1.isScalarReg() && AOp2.isScalarReg() && "Testing equality of non-scalar registers not supported") ? static_cast <void> (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 3722, __PRETTY_FUNCTION__)); | |||
3723 | ||||
3724 | // Check if a registers match their sub/super register classes. | |||
3725 | if (AOp1.getRegEqualityTy() == EqualsSuperReg) | |||
3726 | return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); | |||
3727 | if (AOp1.getRegEqualityTy() == EqualsSubReg) | |||
3728 | return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); | |||
3729 | if (AOp2.getRegEqualityTy() == EqualsSuperReg) | |||
3730 | return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); | |||
3731 | if (AOp2.getRegEqualityTy() == EqualsSubReg) | |||
3732 | return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); | |||
3733 | ||||
3734 | return false; | |||
3735 | } | |||
3736 | ||||
3737 | /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its | |||
3738 | /// operands. | |||
3739 | bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, | |||
3740 | StringRef Name, SMLoc NameLoc, | |||
3741 | OperandVector &Operands) { | |||
3742 | MCAsmParser &Parser = getParser(); | |||
3743 | Name = StringSwitch<StringRef>(Name.lower()) | |||
3744 | .Case("beq", "b.eq") | |||
3745 | .Case("bne", "b.ne") | |||
3746 | .Case("bhs", "b.hs") | |||
3747 | .Case("bcs", "b.cs") | |||
3748 | .Case("blo", "b.lo") | |||
3749 | .Case("bcc", "b.cc") | |||
3750 | .Case("bmi", "b.mi") | |||
3751 | .Case("bpl", "b.pl") | |||
3752 | .Case("bvs", "b.vs") | |||
3753 | .Case("bvc", "b.vc") | |||
3754 | .Case("bhi", "b.hi") | |||
3755 | .Case("bls", "b.ls") | |||
3756 | .Case("bge", "b.ge") | |||
3757 | .Case("blt", "b.lt") | |||
3758 | .Case("bgt", "b.gt") | |||
3759 | .Case("ble", "b.le") | |||
3760 | .Case("bal", "b.al") | |||
3761 | .Case("bnv", "b.nv") | |||
3762 | .Default(Name); | |||
3763 | ||||
3764 | // First check for the AArch64-specific .req directive. | |||
3765 | if (Parser.getTok().is(AsmToken::Identifier) && | |||
3766 | Parser.getTok().getIdentifier() == ".req") { | |||
3767 | parseDirectiveReq(Name, NameLoc); | |||
3768 | // We always return 'error' for this, as we're done with this | |||
3769 | // statement and don't need to match the 'instruction." | |||
3770 | return true; | |||
3771 | } | |||
3772 | ||||
3773 | // Create the leading tokens for the mnemonic, split by '.' characters. | |||
3774 | size_t Start = 0, Next = Name.find('.'); | |||
3775 | StringRef Head = Name.slice(Start, Next); | |||
3776 | ||||
3777 | // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for | |||
3778 | // the SYS instruction. | |||
3779 | if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" || | |||
3780 | Head == "cfp" || Head == "dvp" || Head == "cpp") | |||
3781 | return parseSysAlias(Head, NameLoc, Operands); | |||
3782 | ||||
3783 | Operands.push_back( | |||
3784 | AArch64Operand::CreateToken(Head, false, NameLoc, getContext())); | |||
3785 | Mnemonic = Head; | |||
3786 | ||||
3787 | // Handle condition codes for a branch mnemonic | |||
3788 | if (Head == "b" && Next != StringRef::npos) { | |||
3789 | Start = Next; | |||
3790 | Next = Name.find('.', Start + 1); | |||
3791 | Head = Name.slice(Start + 1, Next); | |||
3792 | ||||
3793 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
3794 | (Head.data() - Name.data())); | |||
3795 | AArch64CC::CondCode CC = parseCondCodeString(Head); | |||
3796 | if (CC == AArch64CC::Invalid) | |||
3797 | return Error(SuffixLoc, "invalid condition code"); | |||
3798 | Operands.push_back( | |||
3799 | AArch64Operand::CreateToken(".", true, SuffixLoc, getContext())); | |||
3800 | Operands.push_back( | |||
3801 | AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext())); | |||
3802 | } | |||
3803 | ||||
3804 | // Add the remaining tokens in the mnemonic. | |||
3805 | while (Next != StringRef::npos) { | |||
3806 | Start = Next; | |||
3807 | Next = Name.find('.', Start + 1); | |||
3808 | Head = Name.slice(Start, Next); | |||
3809 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
3810 | (Head.data() - Name.data()) + 1); | |||
3811 | Operands.push_back( | |||
3812 | AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext())); | |||
3813 | } | |||
3814 | ||||
3815 | // Conditional compare instructions have a Condition Code operand, which needs | |||
3816 | // to be parsed and an immediate operand created. | |||
3817 | bool condCodeFourthOperand = | |||
3818 | (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || | |||
3819 | Head == "fccmpe" || Head == "fcsel" || Head == "csel" || | |||
3820 | Head == "csinc" || Head == "csinv" || Head == "csneg"); | |||
3821 | ||||
3822 | // These instructions are aliases to some of the conditional select | |||
3823 | // instructions. However, the condition code is inverted in the aliased | |||
3824 | // instruction. | |||
3825 | // | |||
3826 | // FIXME: Is this the correct way to handle these? Or should the parser | |||
3827 | // generate the aliased instructions directly? | |||
3828 | bool condCodeSecondOperand = (Head == "cset" || Head == "csetm"); | |||
3829 | bool condCodeThirdOperand = | |||
3830 | (Head == "cinc" || Head == "cinv" || Head == "cneg"); | |||
3831 | ||||
3832 | // Read the remaining operands. | |||
3833 | if (getLexer().isNot(AsmToken::EndOfStatement)) { | |||
3834 | ||||
3835 | unsigned N = 1; | |||
3836 | do { | |||
3837 | // Parse and remember the operand. | |||
3838 | if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) || | |||
3839 | (N == 3 && condCodeThirdOperand) || | |||
3840 | (N == 2 && condCodeSecondOperand), | |||
3841 | condCodeSecondOperand || condCodeThirdOperand)) { | |||
3842 | return true; | |||
3843 | } | |||
3844 | ||||
3845 | // After successfully parsing some operands there are two special cases to | |||
3846 | // consider (i.e. notional operands not separated by commas). Both are due | |||
3847 | // to memory specifiers: | |||
3848 | // + An RBrac will end an address for load/store/prefetch | |||
3849 | // + An '!' will indicate a pre-indexed operation. | |||
3850 | // | |||
3851 | // It's someone else's responsibility to make sure these tokens are sane | |||
3852 | // in the given context! | |||
3853 | ||||
3854 | SMLoc RLoc = Parser.getTok().getLoc(); | |||
3855 | if (parseOptionalToken(AsmToken::RBrac)) | |||
3856 | Operands.push_back( | |||
3857 | AArch64Operand::CreateToken("]", false, RLoc, getContext())); | |||
3858 | SMLoc ELoc = Parser.getTok().getLoc(); | |||
3859 | if (parseOptionalToken(AsmToken::Exclaim)) | |||
3860 | Operands.push_back( | |||
3861 | AArch64Operand::CreateToken("!", false, ELoc, getContext())); | |||
3862 | ||||
3863 | ++N; | |||
3864 | } while (parseOptionalToken(AsmToken::Comma)); | |||
3865 | } | |||
3866 | ||||
3867 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
3868 | return true; | |||
3869 | ||||
3870 | return false; | |||
3871 | } | |||
3872 | ||||
3873 | static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) { | |||
3874 | assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31 )) ? static_cast<void> (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 3874, __PRETTY_FUNCTION__)); | |||
3875 | return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) || | |||
3876 | (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) || | |||
3877 | (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) || | |||
3878 | (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) || | |||
3879 | (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) || | |||
3880 | (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0)); | |||
3881 | } | |||
3882 | ||||
3883 | // FIXME: This entire function is a giant hack to provide us with decent | |||
3884 | // operand range validation/diagnostics until TableGen/MC can be extended | |||
3885 | // to support autogeneration of this kind of validation. | |||
3886 | bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc, | |||
3887 | SmallVectorImpl<SMLoc> &Loc) { | |||
3888 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
3889 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
3890 | ||||
3891 | // A prefix only applies to the instruction following it. Here we extract | |||
3892 | // prefix information for the next instruction before validating the current | |||
3893 | // one so that in the case of failure we don't erronously continue using the | |||
3894 | // current prefix. | |||
3895 | PrefixInfo Prefix = NextPrefix; | |||
3896 | NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags); | |||
3897 | ||||
3898 | // Before validating the instruction in isolation we run through the rules | |||
3899 | // applicable when it follows a prefix instruction. | |||
3900 | // NOTE: brk & hlt can be prefixed but require no additional validation. | |||
3901 | if (Prefix.isActive() && | |||
3902 | (Inst.getOpcode() != AArch64::BRK) && | |||
3903 | (Inst.getOpcode() != AArch64::HLT)) { | |||
3904 | ||||
3905 | // Prefixed intructions must have a destructive operand. | |||
3906 | if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) == | |||
3907 | AArch64::NotDestructive) | |||
3908 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
3909 | " movprfx, suggest replacing movprfx with mov"); | |||
3910 | ||||
3911 | // Destination operands must match. | |||
3912 | if (Inst.getOperand(0).getReg() != Prefix.getDstReg()) | |||
3913 | return Error(Loc[0], "instruction is unpredictable when following a" | |||
3914 | " movprfx writing to a different destination"); | |||
3915 | ||||
3916 | // Destination operand must not be used in any other location. | |||
3917 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) { | |||
3918 | if (Inst.getOperand(i).isReg() && | |||
3919 | (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) && | |||
3920 | isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg())) | |||
3921 | return Error(Loc[0], "instruction is unpredictable when following a" | |||
3922 | " movprfx and destination also used as non-destructive" | |||
3923 | " source"); | |||
3924 | } | |||
3925 | ||||
3926 | auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID]; | |||
3927 | if (Prefix.isPredicated()) { | |||
3928 | int PgIdx = -1; | |||
3929 | ||||
3930 | // Find the instructions general predicate. | |||
3931 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) | |||
3932 | if (Inst.getOperand(i).isReg() && | |||
3933 | PPRRegClass.contains(Inst.getOperand(i).getReg())) { | |||
3934 | PgIdx = i; | |||
3935 | break; | |||
3936 | } | |||
3937 | ||||
3938 | // Instruction must be predicated if the movprfx is predicated. | |||
3939 | if (PgIdx == -1 || | |||
3940 | (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone) | |||
3941 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
3942 | " predicated movprfx, suggest using unpredicated movprfx"); | |||
3943 | ||||
3944 | // Instruction must use same general predicate as the movprfx. | |||
3945 | if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg()) | |||
3946 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
3947 | " predicated movprfx using a different general predicate"); | |||
3948 | ||||
3949 | // Instruction element type must match the movprfx. | |||
3950 | if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize()) | |||
3951 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
3952 | " predicated movprfx with a different element size"); | |||
3953 | } | |||
3954 | } | |||
3955 | ||||
3956 | // Check for indexed addressing modes w/ the base register being the | |||
3957 | // same as a destination/source register or pair load where | |||
3958 | // the Rt == Rt2. All of those are undefined behaviour. | |||
3959 | switch (Inst.getOpcode()) { | |||
3960 | case AArch64::LDPSWpre: | |||
3961 | case AArch64::LDPWpost: | |||
3962 | case AArch64::LDPWpre: | |||
3963 | case AArch64::LDPXpost: | |||
3964 | case AArch64::LDPXpre: { | |||
3965 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
3966 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
3967 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
3968 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
3969 | return Error(Loc[0], "unpredictable LDP instruction, writeback base " | |||
3970 | "is also a destination"); | |||
3971 | if (RI->isSubRegisterEq(Rn, Rt2)) | |||
3972 | return Error(Loc[1], "unpredictable LDP instruction, writeback base " | |||
3973 | "is also a destination"); | |||
3974 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
3975 | } | |||
3976 | case AArch64::LDPDi: | |||
3977 | case AArch64::LDPQi: | |||
3978 | case AArch64::LDPSi: | |||
3979 | case AArch64::LDPSWi: | |||
3980 | case AArch64::LDPWi: | |||
3981 | case AArch64::LDPXi: { | |||
3982 | unsigned Rt = Inst.getOperand(0).getReg(); | |||
3983 | unsigned Rt2 = Inst.getOperand(1).getReg(); | |||
3984 | if (Rt == Rt2) | |||
3985 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); | |||
3986 | break; | |||
3987 | } | |||
3988 | case AArch64::LDPDpost: | |||
3989 | case AArch64::LDPDpre: | |||
3990 | case AArch64::LDPQpost: | |||
3991 | case AArch64::LDPQpre: | |||
3992 | case AArch64::LDPSpost: | |||
3993 | case AArch64::LDPSpre: | |||
3994 | case AArch64::LDPSWpost: { | |||
3995 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
3996 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
3997 | if (Rt == Rt2) | |||
3998 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); | |||
3999 | break; | |||
4000 | } | |||
4001 | case AArch64::STPDpost: | |||
4002 | case AArch64::STPDpre: | |||
4003 | case AArch64::STPQpost: | |||
4004 | case AArch64::STPQpre: | |||
4005 | case AArch64::STPSpost: | |||
4006 | case AArch64::STPSpre: | |||
4007 | case AArch64::STPWpost: | |||
4008 | case AArch64::STPWpre: | |||
4009 | case AArch64::STPXpost: | |||
4010 | case AArch64::STPXpre: { | |||
4011 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
4012 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
4013 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
4014 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
4015 | return Error(Loc[0], "unpredictable STP instruction, writeback base " | |||
4016 | "is also a source"); | |||
4017 | if (RI->isSubRegisterEq(Rn, Rt2)) | |||
4018 | return Error(Loc[1], "unpredictable STP instruction, writeback base " | |||
4019 | "is also a source"); | |||
4020 | break; | |||
4021 | } | |||
4022 | case AArch64::LDRBBpre: | |||
4023 | case AArch64::LDRBpre: | |||
4024 | case AArch64::LDRHHpre: | |||
4025 | case AArch64::LDRHpre: | |||
4026 | case AArch64::LDRSBWpre: | |||
4027 | case AArch64::LDRSBXpre: | |||
4028 | case AArch64::LDRSHWpre: | |||
4029 | case AArch64::LDRSHXpre: | |||
4030 | case AArch64::LDRSWpre: | |||
4031 | case AArch64::LDRWpre: | |||
4032 | case AArch64::LDRXpre: | |||
4033 | case AArch64::LDRBBpost: | |||
4034 | case AArch64::LDRBpost: | |||
4035 | case AArch64::LDRHHpost: | |||
4036 | case AArch64::LDRHpost: | |||
4037 | case AArch64::LDRSBWpost: | |||
4038 | case AArch64::LDRSBXpost: | |||
4039 | case AArch64::LDRSHWpost: | |||
4040 | case AArch64::LDRSHXpost: | |||
4041 | case AArch64::LDRSWpost: | |||
4042 | case AArch64::LDRWpost: | |||
4043 | case AArch64::LDRXpost: { | |||
4044 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
4045 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
4046 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
4047 | return Error(Loc[0], "unpredictable LDR instruction, writeback base " | |||
4048 | "is also a source"); | |||
4049 | break; | |||
4050 | } | |||
4051 | case AArch64::STRBBpost: | |||
4052 | case AArch64::STRBpost: | |||
4053 | case AArch64::STRHHpost: | |||
4054 | case AArch64::STRHpost: | |||
4055 | case AArch64::STRWpost: | |||
4056 | case AArch64::STRXpost: | |||
4057 | case AArch64::STRBBpre: | |||
4058 | case AArch64::STRBpre: | |||
4059 | case AArch64::STRHHpre: | |||
4060 | case AArch64::STRHpre: | |||
4061 | case AArch64::STRWpre: | |||
4062 | case AArch64::STRXpre: { | |||
4063 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
4064 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
4065 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
4066 | return Error(Loc[0], "unpredictable STR instruction, writeback base " | |||
4067 | "is also a source"); | |||
4068 | break; | |||
4069 | } | |||
4070 | case AArch64::STXRB: | |||
4071 | case AArch64::STXRH: | |||
4072 | case AArch64::STXRW: | |||
4073 | case AArch64::STXRX: | |||
4074 | case AArch64::STLXRB: | |||
4075 | case AArch64::STLXRH: | |||
4076 | case AArch64::STLXRW: | |||
4077 | case AArch64::STLXRX: { | |||
4078 | unsigned Rs = Inst.getOperand(0).getReg(); | |||
4079 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
4080 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
4081 | if (RI->isSubRegisterEq(Rt, Rs) || | |||
4082 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) | |||
4083 | return Error(Loc[0], | |||
4084 | "unpredictable STXR instruction, status is also a source"); | |||
4085 | break; | |||
4086 | } | |||
4087 | case AArch64::STXPW: | |||
4088 | case AArch64::STXPX: | |||
4089 | case AArch64::STLXPW: | |||
4090 | case AArch64::STLXPX: { | |||
4091 | unsigned Rs = Inst.getOperand(0).getReg(); | |||
4092 | unsigned Rt1 = Inst.getOperand(1).getReg(); | |||
4093 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
4094 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
4095 | if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) || | |||
4096 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) | |||
4097 | return Error(Loc[0], | |||
4098 | "unpredictable STXP instruction, status is also a source"); | |||
4099 | break; | |||
4100 | } | |||
4101 | } | |||
4102 | ||||
4103 | ||||
4104 | // Now check immediate ranges. Separate from the above as there is overlap | |||
4105 | // in the instructions being checked and this keeps the nested conditionals | |||
4106 | // to a minimum. | |||
4107 | switch (Inst.getOpcode()) { | |||
4108 | case AArch64::ADDSWri: | |||
4109 | case AArch64::ADDSXri: | |||
4110 | case AArch64::ADDWri: | |||
4111 | case AArch64::ADDXri: | |||
4112 | case AArch64::SUBSWri: | |||
4113 | case AArch64::SUBSXri: | |||
4114 | case AArch64::SUBWri: | |||
4115 | case AArch64::SUBXri: { | |||
4116 | // Annoyingly we can't do this in the isAddSubImm predicate, so there is | |||
4117 | // some slight duplication here. | |||
4118 | if (Inst.getOperand(2).isExpr()) { | |||
4119 | const MCExpr *Expr = Inst.getOperand(2).getExpr(); | |||
4120 | AArch64MCExpr::VariantKind ELFRefKind; | |||
4121 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
4122 | int64_t Addend; | |||
4123 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
4124 | ||||
4125 | // Only allow these with ADDXri. | |||
4126 | if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || | |||
4127 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && | |||
4128 | Inst.getOpcode() == AArch64::ADDXri) | |||
4129 | return false; | |||
4130 | ||||
4131 | // Only allow these with ADDXri/ADDWri | |||
4132 | if ((ELFRefKind == AArch64MCExpr::VK_LO12 || | |||
4133 | ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || | |||
4134 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || | |||
4135 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || | |||
4136 | ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || | |||
4137 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || | |||
4138 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || | |||
4139 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || | |||
4140 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || | |||
4141 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && | |||
4142 | (Inst.getOpcode() == AArch64::ADDXri || | |||
4143 | Inst.getOpcode() == AArch64::ADDWri)) | |||
4144 | return false; | |||
4145 | ||||
4146 | // Don't allow symbol refs in the immediate field otherwise | |||
4147 | // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of | |||
4148 | // operands of the original instruction (i.e. 'add w0, w1, borked' vs | |||
4149 | // 'cmp w0, 'borked') | |||
4150 | return Error(Loc.back(), "invalid immediate expression"); | |||
4151 | } | |||
4152 | // We don't validate more complex expressions here | |||
4153 | } | |||
4154 | return false; | |||
4155 | } | |||
4156 | default: | |||
4157 | return false; | |||
4158 | } | |||
4159 | } | |||
4160 | ||||
4161 | static std::string AArch64MnemonicSpellCheck(StringRef S, | |||
4162 | const FeatureBitset &FBS, | |||
4163 | unsigned VariantID = 0); | |||
4164 | ||||
4165 | bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, | |||
4166 | uint64_t ErrorInfo, | |||
4167 | OperandVector &Operands) { | |||
4168 | switch (ErrCode) { | |||
4169 | case Match_InvalidTiedOperand: { | |||
4170 | RegConstraintEqualityTy EqTy = | |||
4171 | static_cast<const AArch64Operand &>(*Operands[ErrorInfo]) | |||
4172 | .getRegEqualityTy(); | |||
4173 | switch (EqTy) { | |||
4174 | case RegConstraintEqualityTy::EqualsSubReg: | |||
4175 | return Error(Loc, "operand must be 64-bit form of destination register"); | |||
4176 | case RegConstraintEqualityTy::EqualsSuperReg: | |||
4177 | return Error(Loc, "operand must be 32-bit form of destination register"); | |||
4178 | case RegConstraintEqualityTy::EqualsReg: | |||
4179 | return Error(Loc, "operand must match destination register"); | |||
4180 | } | |||
4181 | llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4181); | |||
4182 | } | |||
4183 | case Match_MissingFeature: | |||
4184 | return Error(Loc, | |||
4185 | "instruction requires a CPU feature not currently enabled"); | |||
4186 | case Match_InvalidOperand: | |||
4187 | return Error(Loc, "invalid operand for instruction"); | |||
4188 | case Match_InvalidSuffix: | |||
4189 | return Error(Loc, "invalid type suffix for instruction"); | |||
4190 | case Match_InvalidCondCode: | |||
4191 | return Error(Loc, "expected AArch64 condition code"); | |||
4192 | case Match_AddSubRegExtendSmall: | |||
4193 | return Error(Loc, | |||
4194 | "expected '[su]xt[bhw]' with optional integer in range [0, 4]"); | |||
4195 | case Match_AddSubRegExtendLarge: | |||
4196 | return Error(Loc, | |||
4197 | "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]"); | |||
4198 | case Match_AddSubSecondSource: | |||
4199 | return Error(Loc, | |||
4200 | "expected compatible register, symbol or integer in range [0, 4095]"); | |||
4201 | case Match_LogicalSecondSource: | |||
4202 | return Error(Loc, "expected compatible register or logical immediate"); | |||
4203 | case Match_InvalidMovImm32Shift: | |||
4204 | return Error(Loc, "expected 'lsl' with optional integer 0 or 16"); | |||
4205 | case Match_InvalidMovImm64Shift: | |||
4206 | return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48"); | |||
4207 | case Match_AddSubRegShift32: | |||
4208 | return Error(Loc, | |||
4209 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]"); | |||
4210 | case Match_AddSubRegShift64: | |||
4211 | return Error(Loc, | |||
4212 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]"); | |||
4213 | case Match_InvalidFPImm: | |||
4214 | return Error(Loc, | |||
4215 | "expected compatible register or floating-point constant"); | |||
4216 | case Match_InvalidMemoryIndexedSImm6: | |||
4217 | return Error(Loc, "index must be an integer in range [-32, 31]."); | |||
4218 | case Match_InvalidMemoryIndexedSImm5: | |||
4219 | return Error(Loc, "index must be an integer in range [-16, 15]."); | |||
4220 | case Match_InvalidMemoryIndexed1SImm4: | |||
4221 | return Error(Loc, "index must be an integer in range [-8, 7]."); | |||
4222 | case Match_InvalidMemoryIndexed2SImm4: | |||
4223 | return Error(Loc, "index must be a multiple of 2 in range [-16, 14]."); | |||
4224 | case Match_InvalidMemoryIndexed3SImm4: | |||
4225 | return Error(Loc, "index must be a multiple of 3 in range [-24, 21]."); | |||
4226 | case Match_InvalidMemoryIndexed4SImm4: | |||
4227 | return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); | |||
4228 | case Match_InvalidMemoryIndexed16SImm4: | |||
4229 | return Error(Loc, "index must be a multiple of 16 in range [-128, 112]."); | |||
4230 | case Match_InvalidMemoryIndexed1SImm6: | |||
4231 | return Error(Loc, "index must be an integer in range [-32, 31]."); | |||
4232 | case Match_InvalidMemoryIndexedSImm8: | |||
4233 | return Error(Loc, "index must be an integer in range [-128, 127]."); | |||
4234 | case Match_InvalidMemoryIndexedSImm9: | |||
4235 | return Error(Loc, "index must be an integer in range [-256, 255]."); | |||
4236 | case Match_InvalidMemoryIndexed16SImm9: | |||
4237 | return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080]."); | |||
4238 | case Match_InvalidMemoryIndexed8SImm10: | |||
4239 | return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); | |||
4240 | case Match_InvalidMemoryIndexed4SImm7: | |||
4241 | return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); | |||
4242 | case Match_InvalidMemoryIndexed8SImm7: | |||
4243 | return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); | |||
4244 | case Match_InvalidMemoryIndexed16SImm7: | |||
4245 | return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); | |||
4246 | case Match_InvalidMemoryIndexed8UImm5: | |||
4247 | return Error(Loc, "index must be a multiple of 8 in range [0, 248]."); | |||
4248 | case Match_InvalidMemoryIndexed4UImm5: | |||
4249 | return Error(Loc, "index must be a multiple of 4 in range [0, 124]."); | |||
4250 | case Match_InvalidMemoryIndexed2UImm5: | |||
4251 | return Error(Loc, "index must be a multiple of 2 in range [0, 62]."); | |||
4252 | case Match_InvalidMemoryIndexed8UImm6: | |||
4253 | return Error(Loc, "index must be a multiple of 8 in range [0, 504]."); | |||
4254 | case Match_InvalidMemoryIndexed16UImm6: | |||
4255 | return Error(Loc, "index must be a multiple of 16 in range [0, 1008]."); | |||
4256 | case Match_InvalidMemoryIndexed4UImm6: | |||
4257 | return Error(Loc, "index must be a multiple of 4 in range [0, 252]."); | |||
4258 | case Match_InvalidMemoryIndexed2UImm6: | |||
4259 | return Error(Loc, "index must be a multiple of 2 in range [0, 126]."); | |||
4260 | case Match_InvalidMemoryIndexed1UImm6: | |||
4261 | return Error(Loc, "index must be in range [0, 63]."); | |||
4262 | case Match_InvalidMemoryWExtend8: | |||
4263 | return Error(Loc, | |||
4264 | "expected 'uxtw' or 'sxtw' with optional shift of #0"); | |||
4265 | case Match_InvalidMemoryWExtend16: | |||
4266 | return Error(Loc, | |||
4267 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1"); | |||
4268 | case Match_InvalidMemoryWExtend32: | |||
4269 | return Error(Loc, | |||
4270 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2"); | |||
4271 | case Match_InvalidMemoryWExtend64: | |||
4272 | return Error(Loc, | |||
4273 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3"); | |||
4274 | case Match_InvalidMemoryWExtend128: | |||
4275 | return Error(Loc, | |||
4276 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4"); | |||
4277 | case Match_InvalidMemoryXExtend8: | |||
4278 | return Error(Loc, | |||
4279 | "expected 'lsl' or 'sxtx' with optional shift of #0"); | |||
4280 | case Match_InvalidMemoryXExtend16: | |||
4281 | return Error(Loc, | |||
4282 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #1"); | |||
4283 | case Match_InvalidMemoryXExtend32: | |||
4284 | return Error(Loc, | |||
4285 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #2"); | |||
4286 | case Match_InvalidMemoryXExtend64: | |||
4287 | return Error(Loc, | |||
4288 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #3"); | |||
4289 | case Match_InvalidMemoryXExtend128: | |||
4290 | return Error(Loc, | |||
4291 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #4"); | |||
4292 | case Match_InvalidMemoryIndexed1: | |||
4293 | return Error(Loc, "index must be an integer in range [0, 4095]."); | |||
4294 | case Match_InvalidMemoryIndexed2: | |||
4295 | return Error(Loc, "index must be a multiple of 2 in range [0, 8190]."); | |||
4296 | case Match_InvalidMemoryIndexed4: | |||
4297 | return Error(Loc, "index must be a multiple of 4 in range [0, 16380]."); | |||
4298 | case Match_InvalidMemoryIndexed8: | |||
4299 | return Error(Loc, "index must be a multiple of 8 in range [0, 32760]."); | |||
4300 | case Match_InvalidMemoryIndexed16: | |||
4301 | return Error(Loc, "index must be a multiple of 16 in range [0, 65520]."); | |||
4302 | case Match_InvalidImm0_1: | |||
4303 | return Error(Loc, "immediate must be an integer in range [0, 1]."); | |||
4304 | case Match_InvalidImm0_7: | |||
4305 | return Error(Loc, "immediate must be an integer in range [0, 7]."); | |||
4306 | case Match_InvalidImm0_15: | |||
4307 | return Error(Loc, "immediate must be an integer in range [0, 15]."); | |||
4308 | case Match_InvalidImm0_31: | |||
4309 | return Error(Loc, "immediate must be an integer in range [0, 31]."); | |||
4310 | case Match_InvalidImm0_63: | |||
4311 | return Error(Loc, "immediate must be an integer in range [0, 63]."); | |||
4312 | case Match_InvalidImm0_127: | |||
4313 | return Error(Loc, "immediate must be an integer in range [0, 127]."); | |||
4314 | case Match_InvalidImm0_255: | |||
4315 | return Error(Loc, "immediate must be an integer in range [0, 255]."); | |||
4316 | case Match_InvalidImm0_65535: | |||
4317 | return Error(Loc, "immediate must be an integer in range [0, 65535]."); | |||
4318 | case Match_InvalidImm1_8: | |||
4319 | return Error(Loc, "immediate must be an integer in range [1, 8]."); | |||
4320 | case Match_InvalidImm1_16: | |||
4321 | return Error(Loc, "immediate must be an integer in range [1, 16]."); | |||
4322 | case Match_InvalidImm1_32: | |||
4323 | return Error(Loc, "immediate must be an integer in range [1, 32]."); | |||
4324 | case Match_InvalidImm1_64: | |||
4325 | return Error(Loc, "immediate must be an integer in range [1, 64]."); | |||
4326 | case Match_InvalidSVEAddSubImm8: | |||
4327 | return Error(Loc, "immediate must be an integer in range [0, 255]" | |||
4328 | " with a shift amount of 0"); | |||
4329 | case Match_InvalidSVEAddSubImm16: | |||
4330 | case Match_InvalidSVEAddSubImm32: | |||
4331 | case Match_InvalidSVEAddSubImm64: | |||
4332 | return Error(Loc, "immediate must be an integer in range [0, 255] or a " | |||
4333 | "multiple of 256 in range [256, 65280]"); | |||
4334 | case Match_InvalidSVECpyImm8: | |||
4335 | return Error(Loc, "immediate must be an integer in range [-128, 255]" | |||
4336 | " with a shift amount of 0"); | |||
4337 | case Match_InvalidSVECpyImm16: | |||
4338 | return Error(Loc, "immediate must be an integer in range [-128, 127] or a " | |||
4339 | "multiple of 256 in range [-32768, 65280]"); | |||
4340 | case Match_InvalidSVECpyImm32: | |||
4341 | case Match_InvalidSVECpyImm64: | |||
4342 | return Error(Loc, "immediate must be an integer in range [-128, 127] or a " | |||
4343 | "multiple of 256 in range [-32768, 32512]"); | |||
4344 | case Match_InvalidIndexRange1_1: | |||
4345 | return Error(Loc, "expected lane specifier '[1]'"); | |||
4346 | case Match_InvalidIndexRange0_15: | |||
4347 | return Error(Loc, "vector lane must be an integer in range [0, 15]."); | |||
4348 | case Match_InvalidIndexRange0_7: | |||
4349 | return Error(Loc, "vector lane must be an integer in range [0, 7]."); | |||
4350 | case Match_InvalidIndexRange0_3: | |||
4351 | return Error(Loc, "vector lane must be an integer in range [0, 3]."); | |||
4352 | case Match_InvalidIndexRange0_1: | |||
4353 | return Error(Loc, "vector lane must be an integer in range [0, 1]."); | |||
4354 | case Match_InvalidSVEIndexRange0_63: | |||
4355 | return Error(Loc, "vector lane must be an integer in range [0, 63]."); | |||
4356 | case Match_InvalidSVEIndexRange0_31: | |||
4357 | return Error(Loc, "vector lane must be an integer in range [0, 31]."); | |||
4358 | case Match_InvalidSVEIndexRange0_15: | |||
4359 | return Error(Loc, "vector lane must be an integer in range [0, 15]."); | |||
4360 | case Match_InvalidSVEIndexRange0_7: | |||
4361 | return Error(Loc, "vector lane must be an integer in range [0, 7]."); | |||
4362 | case Match_InvalidSVEIndexRange0_3: | |||
4363 | return Error(Loc, "vector lane must be an integer in range [0, 3]."); | |||
4364 | case Match_InvalidLabel: | |||
4365 | return Error(Loc, "expected label or encodable integer pc offset"); | |||
4366 | case Match_MRS: | |||
4367 | return Error(Loc, "expected readable system register"); | |||
4368 | case Match_MSR: | |||
4369 | return Error(Loc, "expected writable system register or pstate"); | |||
4370 | case Match_InvalidComplexRotationEven: | |||
4371 | return Error(Loc, "complex rotation must be 0, 90, 180 or 270."); | |||
4372 | case Match_InvalidComplexRotationOdd: | |||
4373 | return Error(Loc, "complex rotation must be 90 or 270."); | |||
4374 | case Match_MnemonicFail: { | |||
4375 | std::string Suggestion = AArch64MnemonicSpellCheck( | |||
4376 | ((AArch64Operand &)*Operands[0]).getToken(), | |||
4377 | ComputeAvailableFeatures(STI->getFeatureBits())); | |||
4378 | return Error(Loc, "unrecognized instruction mnemonic" + Suggestion); | |||
4379 | } | |||
4380 | case Match_InvalidGPR64shifted8: | |||
4381 | return Error(Loc, "register must be x0..x30 or xzr, without shift"); | |||
4382 | case Match_InvalidGPR64shifted16: | |||
4383 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'"); | |||
4384 | case Match_InvalidGPR64shifted32: | |||
4385 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'"); | |||
4386 | case Match_InvalidGPR64shifted64: | |||
4387 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'"); | |||
4388 | case Match_InvalidGPR64NoXZRshifted8: | |||
4389 | return Error(Loc, "register must be x0..x30 without shift"); | |||
4390 | case Match_InvalidGPR64NoXZRshifted16: | |||
4391 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'"); | |||
4392 | case Match_InvalidGPR64NoXZRshifted32: | |||
4393 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'"); | |||
4394 | case Match_InvalidGPR64NoXZRshifted64: | |||
4395 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'"); | |||
4396 | case Match_InvalidZPR32UXTW8: | |||
4397 | case Match_InvalidZPR32SXTW8: | |||
4398 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'"); | |||
4399 | case Match_InvalidZPR32UXTW16: | |||
4400 | case Match_InvalidZPR32SXTW16: | |||
4401 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'"); | |||
4402 | case Match_InvalidZPR32UXTW32: | |||
4403 | case Match_InvalidZPR32SXTW32: | |||
4404 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'"); | |||
4405 | case Match_InvalidZPR32UXTW64: | |||
4406 | case Match_InvalidZPR32SXTW64: | |||
4407 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'"); | |||
4408 | case Match_InvalidZPR64UXTW8: | |||
4409 | case Match_InvalidZPR64SXTW8: | |||
4410 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'"); | |||
4411 | case Match_InvalidZPR64UXTW16: | |||
4412 | case Match_InvalidZPR64SXTW16: | |||
4413 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'"); | |||
4414 | case Match_InvalidZPR64UXTW32: | |||
4415 | case Match_InvalidZPR64SXTW32: | |||
4416 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'"); | |||
4417 | case Match_InvalidZPR64UXTW64: | |||
4418 | case Match_InvalidZPR64SXTW64: | |||
4419 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'"); | |||
4420 | case Match_InvalidZPR32LSL8: | |||
4421 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'"); | |||
4422 | case Match_InvalidZPR32LSL16: | |||
4423 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'"); | |||
4424 | case Match_InvalidZPR32LSL32: | |||
4425 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'"); | |||
4426 | case Match_InvalidZPR32LSL64: | |||
4427 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'"); | |||
4428 | case Match_InvalidZPR64LSL8: | |||
4429 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'"); | |||
4430 | case Match_InvalidZPR64LSL16: | |||
4431 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'"); | |||
4432 | case Match_InvalidZPR64LSL32: | |||
4433 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'"); | |||
4434 | case Match_InvalidZPR64LSL64: | |||
4435 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'"); | |||
4436 | case Match_InvalidZPR0: | |||
4437 | return Error(Loc, "expected register without element width suffix"); | |||
4438 | case Match_InvalidZPR8: | |||
4439 | case Match_InvalidZPR16: | |||
4440 | case Match_InvalidZPR32: | |||
4441 | case Match_InvalidZPR64: | |||
4442 | case Match_InvalidZPR128: | |||
4443 | return Error(Loc, "invalid element width"); | |||
4444 | case Match_InvalidZPR_3b8: | |||
4445 | return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b"); | |||
4446 | case Match_InvalidZPR_3b16: | |||
4447 | return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h"); | |||
4448 | case Match_InvalidZPR_3b32: | |||
4449 | return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s"); | |||
4450 | case Match_InvalidZPR_4b16: | |||
4451 | return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h"); | |||
4452 | case Match_InvalidZPR_4b32: | |||
4453 | return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s"); | |||
4454 | case Match_InvalidZPR_4b64: | |||
4455 | return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d"); | |||
4456 | case Match_InvalidSVEPattern: | |||
4457 | return Error(Loc, "invalid predicate pattern"); | |||
4458 | case Match_InvalidSVEPredicateAnyReg: | |||
4459 | case Match_InvalidSVEPredicateBReg: | |||
4460 | case Match_InvalidSVEPredicateHReg: | |||
4461 | case Match_InvalidSVEPredicateSReg: | |||
4462 | case Match_InvalidSVEPredicateDReg: | |||
4463 | return Error(Loc, "invalid predicate register."); | |||
4464 | case Match_InvalidSVEPredicate3bAnyReg: | |||
4465 | case Match_InvalidSVEPredicate3bBReg: | |||
4466 | case Match_InvalidSVEPredicate3bHReg: | |||
4467 | case Match_InvalidSVEPredicate3bSReg: | |||
4468 | case Match_InvalidSVEPredicate3bDReg: | |||
4469 | return Error(Loc, "restricted predicate has range [0, 7]."); | |||
4470 | case Match_InvalidSVEExactFPImmOperandHalfOne: | |||
4471 | return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0."); | |||
4472 | case Match_InvalidSVEExactFPImmOperandHalfTwo: | |||
4473 | return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0."); | |||
4474 | case Match_InvalidSVEExactFPImmOperandZeroOne: | |||
4475 | return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0."); | |||
4476 | default: | |||
4477 | llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4477); | |||
4478 | } | |||
4479 | } | |||
4480 | ||||
4481 | static const char *getSubtargetFeatureName(uint64_t Val); | |||
4482 | ||||
4483 | bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
4484 | OperandVector &Operands, | |||
4485 | MCStreamer &Out, | |||
4486 | uint64_t &ErrorInfo, | |||
4487 | bool MatchingInlineAsm) { | |||
4488 | assert(!Operands.empty() && "Unexpect empty operand list!")((!Operands.empty() && "Unexpect empty operand list!" ) ? static_cast<void> (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4488, __PRETTY_FUNCTION__)); | |||
4489 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]); | |||
4490 | assert(Op.isToken() && "Leading operand should always be a mnemonic!")((Op.isToken() && "Leading operand should always be a mnemonic!" ) ? static_cast<void> (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4490, __PRETTY_FUNCTION__)); | |||
4491 | ||||
4492 | StringRef Tok = Op.getToken(); | |||
4493 | unsigned NumOperands = Operands.size(); | |||
4494 | ||||
4495 | if (NumOperands == 4 && Tok == "lsl") { | |||
4496 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); | |||
4497 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
4498 | if (Op2.isScalarReg() && Op3.isImm()) { | |||
4499 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); | |||
4500 | if (Op3CE) { | |||
4501 | uint64_t Op3Val = Op3CE->getValue(); | |||
4502 | uint64_t NewOp3Val = 0; | |||
4503 | uint64_t NewOp4Val = 0; | |||
4504 | if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains( | |||
4505 | Op2.getReg())) { | |||
4506 | NewOp3Val = (32 - Op3Val) & 0x1f; | |||
4507 | NewOp4Val = 31 - Op3Val; | |||
4508 | } else { | |||
4509 | NewOp3Val = (64 - Op3Val) & 0x3f; | |||
4510 | NewOp4Val = 63 - Op3Val; | |||
4511 | } | |||
4512 | ||||
4513 | const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext()); | |||
4514 | const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext()); | |||
4515 | ||||
4516 | Operands[0] = AArch64Operand::CreateToken( | |||
4517 | "ubfm", false, Op.getStartLoc(), getContext()); | |||
4518 | Operands.push_back(AArch64Operand::CreateImm( | |||
4519 | NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext())); | |||
4520 | Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(), | |||
4521 | Op3.getEndLoc(), getContext()); | |||
4522 | } | |||
4523 | } | |||
4524 | } else if (NumOperands == 4 && Tok == "bfc") { | |||
4525 | // FIXME: Horrible hack to handle BFC->BFM alias. | |||
4526 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
4527 | AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); | |||
4528 | AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); | |||
4529 | ||||
4530 | if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { | |||
4531 | const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm()); | |||
4532 | const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm()); | |||
4533 | ||||
4534 | if (LSBCE && WidthCE) { | |||
4535 | uint64_t LSB = LSBCE->getValue(); | |||
4536 | uint64_t Width = WidthCE->getValue(); | |||
4537 | ||||
4538 | uint64_t RegWidth = 0; | |||
4539 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
4540 | Op1.getReg())) | |||
4541 | RegWidth = 64; | |||
4542 | else | |||
4543 | RegWidth = 32; | |||
4544 | ||||
4545 | if (LSB >= RegWidth) | |||
4546 | return Error(LSBOp.getStartLoc(), | |||
4547 | "expected integer in range [0, 31]"); | |||
4548 | if (Width < 1 || Width > RegWidth) | |||
4549 | return Error(WidthOp.getStartLoc(), | |||
4550 | "expected integer in range [1, 32]"); | |||
4551 | ||||
4552 | uint64_t ImmR = 0; | |||
4553 | if (RegWidth == 32) | |||
4554 | ImmR = (32 - LSB) & 0x1f; | |||
4555 | else | |||
4556 | ImmR = (64 - LSB) & 0x3f; | |||
4557 | ||||
4558 | uint64_t ImmS = Width - 1; | |||
4559 | ||||
4560 | if (ImmR != 0 && ImmS >= ImmR) | |||
4561 | return Error(WidthOp.getStartLoc(), | |||
4562 | "requested insert overflows register"); | |||
4563 | ||||
4564 | const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext()); | |||
4565 | const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext()); | |||
4566 | Operands[0] = AArch64Operand::CreateToken( | |||
4567 | "bfm", false, Op.getStartLoc(), getContext()); | |||
4568 | Operands[2] = AArch64Operand::CreateReg( | |||
4569 | RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar, | |||
4570 | SMLoc(), SMLoc(), getContext()); | |||
4571 | Operands[3] = AArch64Operand::CreateImm( | |||
4572 | ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext()); | |||
4573 | Operands.emplace_back( | |||
4574 | AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(), | |||
4575 | WidthOp.getEndLoc(), getContext())); | |||
4576 | } | |||
4577 | } | |||
4578 | } else if (NumOperands == 5) { | |||
4579 | // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and | |||
4580 | // UBFIZ -> UBFM aliases. | |||
4581 | if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") { | |||
4582 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
4583 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
4584 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); | |||
4585 | ||||
4586 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { | |||
4587 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); | |||
4588 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); | |||
4589 | ||||
4590 | if (Op3CE && Op4CE) { | |||
4591 | uint64_t Op3Val = Op3CE->getValue(); | |||
4592 | uint64_t Op4Val = Op4CE->getValue(); | |||
4593 | ||||
4594 | uint64_t RegWidth = 0; | |||
4595 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
4596 | Op1.getReg())) | |||
4597 | RegWidth = 64; | |||
4598 | else | |||
4599 | RegWidth = 32; | |||
4600 | ||||
4601 | if (Op3Val >= RegWidth) | |||
4602 | return Error(Op3.getStartLoc(), | |||
4603 | "expected integer in range [0, 31]"); | |||
4604 | if (Op4Val < 1 || Op4Val > RegWidth) | |||
4605 | return Error(Op4.getStartLoc(), | |||
4606 | "expected integer in range [1, 32]"); | |||
4607 | ||||
4608 | uint64_t NewOp3Val = 0; | |||
4609 | if (RegWidth == 32) | |||
4610 | NewOp3Val = (32 - Op3Val) & 0x1f; | |||
4611 | else | |||
4612 | NewOp3Val = (64 - Op3Val) & 0x3f; | |||
4613 | ||||
4614 | uint64_t NewOp4Val = Op4Val - 1; | |||
4615 | ||||
4616 | if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) | |||
4617 | return Error(Op4.getStartLoc(), | |||
4618 | "requested insert overflows register"); | |||
4619 | ||||
4620 | const MCExpr *NewOp3 = | |||
4621 | MCConstantExpr::create(NewOp3Val, getContext()); | |||
4622 | const MCExpr *NewOp4 = | |||
4623 | MCConstantExpr::create(NewOp4Val, getContext()); | |||
4624 | Operands[3] = AArch64Operand::CreateImm( | |||
4625 | NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext()); | |||
4626 | Operands[4] = AArch64Operand::CreateImm( | |||
4627 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); | |||
4628 | if (Tok == "bfi") | |||
4629 | Operands[0] = AArch64Operand::CreateToken( | |||
4630 | "bfm", false, Op.getStartLoc(), getContext()); | |||
4631 | else if (Tok == "sbfiz") | |||
4632 | Operands[0] = AArch64Operand::CreateToken( | |||
4633 | "sbfm", false, Op.getStartLoc(), getContext()); | |||
4634 | else if (Tok == "ubfiz") | |||
4635 | Operands[0] = AArch64Operand::CreateToken( | |||
4636 | "ubfm", false, Op.getStartLoc(), getContext()); | |||
4637 | else | |||
4638 | llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4638); | |||
4639 | } | |||
4640 | } | |||
4641 | ||||
4642 | // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and | |||
4643 | // UBFX -> UBFM aliases. | |||
4644 | } else if (NumOperands == 5 && | |||
4645 | (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) { | |||
4646 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
4647 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
4648 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); | |||
4649 | ||||
4650 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { | |||
4651 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); | |||
4652 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); | |||
4653 | ||||
4654 | if (Op3CE && Op4CE) { | |||
4655 | uint64_t Op3Val = Op3CE->getValue(); | |||
4656 | uint64_t Op4Val = Op4CE->getValue(); | |||
4657 | ||||
4658 | uint64_t RegWidth = 0; | |||
4659 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
4660 | Op1.getReg())) | |||
4661 | RegWidth = 64; | |||
4662 | else | |||
4663 | RegWidth = 32; | |||
4664 | ||||
4665 | if (Op3Val >= RegWidth) | |||
4666 | return Error(Op3.getStartLoc(), | |||
4667 | "expected integer in range [0, 31]"); | |||
4668 | if (Op4Val < 1 || Op4Val > RegWidth) | |||
4669 | return Error(Op4.getStartLoc(), | |||
4670 | "expected integer in range [1, 32]"); | |||
4671 | ||||
4672 | uint64_t NewOp4Val = Op3Val + Op4Val - 1; | |||
4673 | ||||
4674 | if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) | |||
4675 | return Error(Op4.getStartLoc(), | |||
4676 | "requested extract overflows register"); | |||
4677 | ||||
4678 | const MCExpr *NewOp4 = | |||
4679 | MCConstantExpr::create(NewOp4Val, getContext()); | |||
4680 | Operands[4] = AArch64Operand::CreateImm( | |||
4681 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); | |||
4682 | if (Tok == "bfxil") | |||
4683 | Operands[0] = AArch64Operand::CreateToken( | |||
4684 | "bfm", false, Op.getStartLoc(), getContext()); | |||
4685 | else if (Tok == "sbfx") | |||
4686 | Operands[0] = AArch64Operand::CreateToken( | |||
4687 | "sbfm", false, Op.getStartLoc(), getContext()); | |||
4688 | else if (Tok == "ubfx") | |||
4689 | Operands[0] = AArch64Operand::CreateToken( | |||
4690 | "ubfm", false, Op.getStartLoc(), getContext()); | |||
4691 | else | |||
4692 | llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4692); | |||
4693 | } | |||
4694 | } | |||
4695 | } | |||
4696 | } | |||
4697 | ||||
4698 | // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing | |||
4699 | // instruction for FP registers correctly in some rare circumstances. Convert | |||
4700 | // it to a safe instruction and warn (because silently changing someone's | |||
4701 | // assembly is rude). | |||
4702 | if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] && | |||
4703 | NumOperands == 4 && Tok == "movi") { | |||
4704 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
4705 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); | |||
4706 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
4707 | if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) || | |||
4708 | (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) { | |||
4709 | StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken(); | |||
4710 | if (Suffix.lower() == ".2d" && | |||
4711 | cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) { | |||
4712 | Warning(IDLoc, "instruction movi.2d with immediate #0 may not function" | |||
4713 | " correctly on this CPU, converting to equivalent movi.16b"); | |||
4714 | // Switch the suffix to .16b. | |||
4715 | unsigned Idx = Op1.isToken() ? 1 : 2; | |||
4716 | Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc, | |||
4717 | getContext()); | |||
4718 | } | |||
4719 | } | |||
4720 | } | |||
4721 | ||||
4722 | // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands. | |||
4723 | // InstAlias can't quite handle this since the reg classes aren't | |||
4724 | // subclasses. | |||
4725 | if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) { | |||
4726 | // The source register can be Wn here, but the matcher expects a | |||
4727 | // GPR64. Twiddle it here if necessary. | |||
4728 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); | |||
4729 | if (Op.isScalarReg()) { | |||
4730 | unsigned Reg = getXRegFromWReg(Op.getReg()); | |||
4731 | Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, | |||
4732 | Op.getStartLoc(), Op.getEndLoc(), | |||
4733 | getContext()); | |||
4734 | } | |||
4735 | } | |||
4736 | // FIXME: Likewise for sxt[bh] with a Xd dst operand | |||
4737 | else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { | |||
4738 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); | |||
4739 | if (Op.isScalarReg() && | |||
4740 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
4741 | Op.getReg())) { | |||
4742 | // The source register can be Wn here, but the matcher expects a | |||
4743 | // GPR64. Twiddle it here if necessary. | |||
4744 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); | |||
4745 | if (Op.isScalarReg()) { | |||
4746 | unsigned Reg = getXRegFromWReg(Op.getReg()); | |||
4747 | Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, | |||
4748 | Op.getStartLoc(), | |||
4749 | Op.getEndLoc(), getContext()); | |||
4750 | } | |||
4751 | } | |||
4752 | } | |||
4753 | // FIXME: Likewise for uxt[bh] with a Xd dst operand | |||
4754 | else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { | |||
4755 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); | |||
4756 | if (Op.isScalarReg() && | |||
4757 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
4758 | Op.getReg())) { | |||
4759 | // The source register can be Wn here, but the matcher expects a | |||
4760 | // GPR32. Twiddle it here if necessary. | |||
4761 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); | |||
4762 | if (Op.isScalarReg()) { | |||
4763 | unsigned Reg = getWRegFromXReg(Op.getReg()); | |||
4764 | Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, | |||
4765 | Op.getStartLoc(), | |||
4766 | Op.getEndLoc(), getContext()); | |||
4767 | } | |||
4768 | } | |||
4769 | } | |||
4770 | ||||
4771 | MCInst Inst; | |||
4772 | FeatureBitset MissingFeatures; | |||
4773 | // First try to match against the secondary set of tables containing the | |||
4774 | // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). | |||
4775 | unsigned MatchResult = | |||
4776 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, | |||
4777 | MatchingInlineAsm, 1); | |||
4778 | ||||
4779 | // If that fails, try against the alternate table containing long-form NEON: | |||
4780 | // "fadd v0.2s, v1.2s, v2.2s" | |||
4781 | if (MatchResult != Match_Success) { | |||
4782 | // But first, save the short-form match result: we can use it in case the | |||
4783 | // long-form match also fails. | |||
4784 | auto ShortFormNEONErrorInfo = ErrorInfo; | |||
4785 | auto ShortFormNEONMatchResult = MatchResult; | |||
4786 | auto ShortFormNEONMissingFeatures = MissingFeatures; | |||
4787 | ||||
4788 | MatchResult = | |||
4789 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, | |||
4790 | MatchingInlineAsm, 0); | |||
4791 | ||||
4792 | // Now, both matches failed, and the long-form match failed on the mnemonic | |||
4793 | // suffix token operand. The short-form match failure is probably more | |||
4794 | // relevant: use it instead. | |||
4795 | if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 && | |||
4796 | Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() && | |||
4797 | ((AArch64Operand &)*Operands[1]).isTokenSuffix()) { | |||
4798 | MatchResult = ShortFormNEONMatchResult; | |||
4799 | ErrorInfo = ShortFormNEONErrorInfo; | |||
4800 | MissingFeatures = ShortFormNEONMissingFeatures; | |||
4801 | } | |||
4802 | } | |||
4803 | ||||
4804 | switch (MatchResult) { | |||
4805 | case Match_Success: { | |||
4806 | // Perform range checking and other semantic validations | |||
4807 | SmallVector<SMLoc, 8> OperandLocs; | |||
4808 | NumOperands = Operands.size(); | |||
4809 | for (unsigned i = 1; i < NumOperands; ++i) | |||
4810 | OperandLocs.push_back(Operands[i]->getStartLoc()); | |||
4811 | if (validateInstruction(Inst, IDLoc, OperandLocs)) | |||
4812 | return true; | |||
4813 | ||||
4814 | Inst.setLoc(IDLoc); | |||
4815 | Out.EmitInstruction(Inst, getSTI()); | |||
4816 | return false; | |||
4817 | } | |||
4818 | case Match_MissingFeature: { | |||
4819 | assert(MissingFeatures.any() && "Unknown missing feature!")((MissingFeatures.any() && "Unknown missing feature!" ) ? static_cast<void> (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 4819, __PRETTY_FUNCTION__)); | |||
4820 | // Special case the error message for the very common case where only | |||
4821 | // a single subtarget feature is missing (neon, e.g.). | |||
4822 | std::string Msg = "instruction requires:"; | |||
4823 | for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) { | |||
4824 | if (MissingFeatures[i]) { | |||
4825 | Msg += " "; | |||
4826 | Msg += getSubtargetFeatureName(i); | |||
4827 | } | |||
4828 | } | |||
4829 | return Error(IDLoc, Msg); | |||
4830 | } | |||
4831 | case Match_MnemonicFail: | |||
4832 | return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands); | |||
4833 | case Match_InvalidOperand: { | |||
4834 | SMLoc ErrorLoc = IDLoc; | |||
4835 | ||||
4836 | if (ErrorInfo != ~0ULL) { | |||
4837 | if (ErrorInfo >= Operands.size()) | |||
4838 | return Error(IDLoc, "too few operands for instruction", | |||
4839 | SMRange(IDLoc, getTok().getLoc())); | |||
4840 | ||||
4841 | ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); | |||
4842 | if (ErrorLoc == SMLoc()) | |||
4843 | ErrorLoc = IDLoc; | |||
4844 | } | |||
4845 | // If the match failed on a suffix token operand, tweak the diagnostic | |||
4846 | // accordingly. | |||
4847 | if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() && | |||
4848 | ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) | |||
4849 | MatchResult = Match_InvalidSuffix; | |||
4850 | ||||
4851 | return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); | |||
4852 | } | |||
4853 | case Match_InvalidTiedOperand: | |||
4854 | case Match_InvalidMemoryIndexed1: | |||
4855 | case Match_InvalidMemoryIndexed2: | |||
4856 | case Match_InvalidMemoryIndexed4: | |||
4857 | case Match_InvalidMemoryIndexed8: | |||
4858 | case Match_InvalidMemoryIndexed16: | |||
4859 | case Match_InvalidCondCode: | |||
4860 | case Match_AddSubRegExtendSmall: | |||
4861 | case Match_AddSubRegExtendLarge: | |||
4862 | case Match_AddSubSecondSource: | |||
4863 | case Match_LogicalSecondSource: | |||
4864 | case Match_AddSubRegShift32: | |||
4865 | case Match_AddSubRegShift64: | |||
4866 | case Match_InvalidMovImm32Shift: | |||
4867 | case Match_InvalidMovImm64Shift: | |||
4868 | case Match_InvalidFPImm: | |||
4869 | case Match_InvalidMemoryWExtend8: | |||
4870 | case Match_InvalidMemoryWExtend16: | |||
4871 | case Match_InvalidMemoryWExtend32: | |||
4872 | case Match_InvalidMemoryWExtend64: | |||
4873 | case Match_InvalidMemoryWExtend128: | |||
4874 | case Match_InvalidMemoryXExtend8: | |||
4875 | case Match_InvalidMemoryXExtend16: | |||
4876 | case Match_InvalidMemoryXExtend32: | |||
4877 | case Match_InvalidMemoryXExtend64: | |||
4878 | case Match_InvalidMemoryXExtend128: | |||
4879 | case Match_InvalidMemoryIndexed1SImm4: | |||
4880 | case Match_InvalidMemoryIndexed2SImm4: | |||
4881 | case Match_InvalidMemoryIndexed3SImm4: | |||
4882 | case Match_InvalidMemoryIndexed4SImm4: | |||
4883 | case Match_InvalidMemoryIndexed1SImm6: | |||
4884 | case Match_InvalidMemoryIndexed16SImm4: | |||
4885 | case Match_InvalidMemoryIndexed4SImm7: | |||
4886 | case Match_InvalidMemoryIndexed8SImm7: | |||
4887 | case Match_InvalidMemoryIndexed16SImm7: | |||
4888 | case Match_InvalidMemoryIndexed8UImm5: | |||
4889 | case Match_InvalidMemoryIndexed4UImm5: | |||
4890 | case Match_InvalidMemoryIndexed2UImm5: | |||
4891 | case Match_InvalidMemoryIndexed1UImm6: | |||
4892 | case Match_InvalidMemoryIndexed2UImm6: | |||
4893 | case Match_InvalidMemoryIndexed4UImm6: | |||
4894 | case Match_InvalidMemoryIndexed8UImm6: | |||
4895 | case Match_InvalidMemoryIndexed16UImm6: | |||
4896 | case Match_InvalidMemoryIndexedSImm6: | |||
4897 | case Match_InvalidMemoryIndexedSImm5: | |||
4898 | case Match_InvalidMemoryIndexedSImm8: | |||
4899 | case Match_InvalidMemoryIndexedSImm9: | |||
4900 | case Match_InvalidMemoryIndexed16SImm9: | |||
4901 | case Match_InvalidMemoryIndexed8SImm10: | |||
4902 | case Match_InvalidImm0_1: | |||
4903 | case Match_InvalidImm0_7: | |||
4904 | case Match_InvalidImm0_15: | |||
4905 | case Match_InvalidImm0_31: | |||
4906 | case Match_InvalidImm0_63: | |||
4907 | case Match_InvalidImm0_127: | |||
4908 | case Match_InvalidImm0_255: | |||
4909 | case Match_InvalidImm0_65535: | |||
4910 | case Match_InvalidImm1_8: | |||
4911 | case Match_InvalidImm1_16: | |||
4912 | case Match_InvalidImm1_32: | |||
4913 | case Match_InvalidImm1_64: | |||
4914 | case Match_InvalidSVEAddSubImm8: | |||
4915 | case Match_InvalidSVEAddSubImm16: | |||
4916 | case Match_InvalidSVEAddSubImm32: | |||
4917 | case Match_InvalidSVEAddSubImm64: | |||
4918 | case Match_InvalidSVECpyImm8: | |||
4919 | case Match_InvalidSVECpyImm16: | |||
4920 | case Match_InvalidSVECpyImm32: | |||
4921 | case Match_InvalidSVECpyImm64: | |||
4922 | case Match_InvalidIndexRange1_1: | |||
4923 | case Match_InvalidIndexRange0_15: | |||
4924 | case Match_InvalidIndexRange0_7: | |||
4925 | case Match_InvalidIndexRange0_3: | |||
4926 | case Match_InvalidIndexRange0_1: | |||
4927 | case Match_InvalidSVEIndexRange0_63: | |||
4928 | case Match_InvalidSVEIndexRange0_31: | |||
4929 | case Match_InvalidSVEIndexRange0_15: | |||
4930 | case Match_InvalidSVEIndexRange0_7: | |||
4931 | case Match_InvalidSVEIndexRange0_3: | |||
4932 | case Match_InvalidLabel: | |||
4933 | case Match_InvalidComplexRotationEven: | |||
4934 | case Match_InvalidComplexRotationOdd: | |||
4935 | case Match_InvalidGPR64shifted8: | |||
4936 | case Match_InvalidGPR64shifted16: | |||
4937 | case Match_InvalidGPR64shifted32: | |||
4938 | case Match_InvalidGPR64shifted64: | |||
4939 | case Match_InvalidGPR64NoXZRshifted8: | |||
4940 | case Match_InvalidGPR64NoXZRshifted16: | |||
4941 | case Match_InvalidGPR64NoXZRshifted32: | |||
4942 | case Match_InvalidGPR64NoXZRshifted64: | |||
4943 | case Match_InvalidZPR32UXTW8: | |||
4944 | case Match_InvalidZPR32UXTW16: | |||
4945 | case Match_InvalidZPR32UXTW32: | |||
4946 | case Match_InvalidZPR32UXTW64: | |||
4947 | case Match_InvalidZPR32SXTW8: | |||
4948 | case Match_InvalidZPR32SXTW16: | |||
4949 | case Match_InvalidZPR32SXTW32: | |||
4950 | case Match_InvalidZPR32SXTW64: | |||
4951 | case Match_InvalidZPR64UXTW8: | |||
4952 | case Match_InvalidZPR64SXTW8: | |||
4953 | case Match_InvalidZPR64UXTW16: | |||
4954 | case Match_InvalidZPR64SXTW16: | |||
4955 | case Match_InvalidZPR64UXTW32: | |||
4956 | case Match_InvalidZPR64SXTW32: | |||
4957 | case Match_InvalidZPR64UXTW64: | |||
4958 | case Match_InvalidZPR64SXTW64: | |||
4959 | case Match_InvalidZPR32LSL8: | |||
4960 | case Match_InvalidZPR32LSL16: | |||
4961 | case Match_InvalidZPR32LSL32: | |||
4962 | case Match_InvalidZPR32LSL64: | |||
4963 | case Match_InvalidZPR64LSL8: | |||
4964 | case Match_InvalidZPR64LSL16: | |||
4965 | case Match_InvalidZPR64LSL32: | |||
4966 | case Match_InvalidZPR64LSL64: | |||
4967 | case Match_InvalidZPR0: | |||
4968 | case Match_InvalidZPR8: | |||
4969 | case Match_InvalidZPR16: | |||
4970 | case Match_InvalidZPR32: | |||
4971 | case Match_InvalidZPR64: | |||
4972 | case Match_InvalidZPR128: | |||
4973 | case Match_InvalidZPR_3b8: | |||
4974 | case Match_InvalidZPR_3b16: | |||
4975 | case Match_InvalidZPR_3b32: | |||
4976 | case Match_InvalidZPR_4b16: | |||
4977 | case Match_InvalidZPR_4b32: | |||
4978 | case Match_InvalidZPR_4b64: | |||
4979 | case Match_InvalidSVEPredicateAnyReg: | |||
4980 | case Match_InvalidSVEPattern: | |||
4981 | case Match_InvalidSVEPredicateBReg: | |||
4982 | case Match_InvalidSVEPredicateHReg: | |||
4983 | case Match_InvalidSVEPredicateSReg: | |||
4984 | case Match_InvalidSVEPredicateDReg: | |||
4985 | case Match_InvalidSVEPredicate3bAnyReg: | |||
4986 | case Match_InvalidSVEPredicate3bBReg: | |||
4987 | case Match_InvalidSVEPredicate3bHReg: | |||
4988 | case Match_InvalidSVEPredicate3bSReg: | |||
4989 | case Match_InvalidSVEPredicate3bDReg: | |||
4990 | case Match_InvalidSVEExactFPImmOperandHalfOne: | |||
4991 | case Match_InvalidSVEExactFPImmOperandHalfTwo: | |||
4992 | case Match_InvalidSVEExactFPImmOperandZeroOne: | |||
4993 | case Match_MSR: | |||
4994 | case Match_MRS: { | |||
4995 | if (ErrorInfo >= Operands.size()) | |||
4996 | return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc())); | |||
4997 | // Any time we get here, there's nothing fancy to do. Just get the | |||
4998 | // operand SMLoc and display the diagnostic. | |||
4999 | SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); | |||
5000 | if (ErrorLoc == SMLoc()) | |||
5001 | ErrorLoc = IDLoc; | |||
5002 | return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); | |||
5003 | } | |||
5004 | } | |||
5005 | ||||
5006 | llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 5006); | |||
5007 | } | |||
5008 | ||||
5009 | /// ParseDirective parses the arm specific directives | |||
5010 | bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) { | |||
5011 | const MCObjectFileInfo::Environment Format = | |||
5012 | getContext().getObjectFileInfo()->getObjectFileType(); | |||
5013 | bool IsMachO = Format == MCObjectFileInfo::IsMachO; | |||
5014 | ||||
5015 | StringRef IDVal = DirectiveID.getIdentifier(); | |||
5016 | SMLoc Loc = DirectiveID.getLoc(); | |||
5017 | if (IDVal == ".arch") | |||
5018 | parseDirectiveArch(Loc); | |||
5019 | else if (IDVal == ".cpu") | |||
5020 | parseDirectiveCPU(Loc); | |||
5021 | else if (IDVal == ".tlsdesccall") | |||
5022 | parseDirectiveTLSDescCall(Loc); | |||
5023 | else if (IDVal == ".ltorg" || IDVal == ".pool") | |||
5024 | parseDirectiveLtorg(Loc); | |||
5025 | else if (IDVal == ".unreq") | |||
5026 | parseDirectiveUnreq(Loc); | |||
5027 | else if (IDVal == ".inst") | |||
5028 | parseDirectiveInst(Loc); | |||
5029 | else if (IDVal == ".cfi_negate_ra_state") | |||
5030 | parseDirectiveCFINegateRAState(); | |||
5031 | else if (IDVal == ".cfi_b_key_frame") | |||
5032 | parseDirectiveCFIBKeyFrame(); | |||
5033 | else if (IDVal == ".arch_extension") | |||
5034 | parseDirectiveArchExtension(Loc); | |||
5035 | else if (IsMachO) { | |||
5036 | if (IDVal == MCLOHDirectiveName()) | |||
5037 | parseDirectiveLOH(IDVal, Loc); | |||
5038 | else | |||
5039 | return true; | |||
5040 | } else | |||
5041 | return true; | |||
5042 | return false; | |||
5043 | } | |||
5044 | ||||
5045 | static void ExpandCryptoAEK(AArch64::ArchKind ArchKind, | |||
5046 | SmallVector<StringRef, 4> &RequestedExtensions) { | |||
5047 | const bool NoCrypto = | |||
5048 | (std::find(RequestedExtensions.begin(), RequestedExtensions.end(), | |||
5049 | "nocrypto") != std::end(RequestedExtensions)); | |||
5050 | const bool Crypto = | |||
5051 | (std::find(RequestedExtensions.begin(), RequestedExtensions.end(), | |||
5052 | "crypto") != std::end(RequestedExtensions)); | |||
5053 | ||||
5054 | if (!NoCrypto && Crypto) { | |||
5055 | switch (ArchKind) { | |||
5056 | default: | |||
5057 | // Map 'generic' (and others) to sha2 and aes, because | |||
5058 | // that was the traditional meaning of crypto. | |||
5059 | case AArch64::ArchKind::ARMV8_1A: | |||
5060 | case AArch64::ArchKind::ARMV8_2A: | |||
5061 | case AArch64::ArchKind::ARMV8_3A: | |||
5062 | RequestedExtensions.push_back("sha2"); | |||
5063 | RequestedExtensions.push_back("aes"); | |||
5064 | break; | |||
5065 | case AArch64::ArchKind::ARMV8_4A: | |||
5066 | case AArch64::ArchKind::ARMV8_5A: | |||
5067 | RequestedExtensions.push_back("sm4"); | |||
5068 | RequestedExtensions.push_back("sha3"); | |||
5069 | RequestedExtensions.push_back("sha2"); | |||
5070 | RequestedExtensions.push_back("aes"); | |||
5071 | break; | |||
5072 | } | |||
5073 | } else if (NoCrypto) { | |||
5074 | switch (ArchKind) { | |||
5075 | default: | |||
5076 | // Map 'generic' (and others) to sha2 and aes, because | |||
5077 | // that was the traditional meaning of crypto. | |||
5078 | case AArch64::ArchKind::ARMV8_1A: | |||
5079 | case AArch64::ArchKind::ARMV8_2A: | |||
5080 | case AArch64::ArchKind::ARMV8_3A: | |||
5081 | RequestedExtensions.push_back("nosha2"); | |||
5082 | RequestedExtensions.push_back("noaes"); | |||
5083 | break; | |||
5084 | case AArch64::ArchKind::ARMV8_4A: | |||
5085 | case AArch64::ArchKind::ARMV8_5A: | |||
5086 | RequestedExtensions.push_back("nosm4"); | |||
5087 | RequestedExtensions.push_back("nosha3"); | |||
5088 | RequestedExtensions.push_back("nosha2"); | |||
5089 | RequestedExtensions.push_back("noaes"); | |||
5090 | break; | |||
5091 | } | |||
5092 | } | |||
5093 | } | |||
5094 | ||||
5095 | /// parseDirectiveArch | |||
5096 | /// ::= .arch token | |||
5097 | bool AArch64AsmParser::parseDirectiveArch(SMLoc L) { | |||
5098 | SMLoc ArchLoc = getLoc(); | |||
5099 | ||||
5100 | StringRef Arch, ExtensionString; | |||
5101 | std::tie(Arch, ExtensionString) = | |||
5102 | getParser().parseStringToEndOfStatement().trim().split('+'); | |||
5103 | ||||
5104 | AArch64::ArchKind ID = AArch64::parseArch(Arch); | |||
5105 | if (ID == AArch64::ArchKind::INVALID) | |||
5106 | return Error(ArchLoc, "unknown arch name"); | |||
5107 | ||||
5108 | if (parseToken(AsmToken::EndOfStatement)) | |||
5109 | return true; | |||
5110 | ||||
5111 | // Get the architecture and extension features. | |||
5112 | std::vector<StringRef> AArch64Features; | |||
5113 | AArch64::getArchFeatures(ID, AArch64Features); | |||
5114 | AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID), | |||
5115 | AArch64Features); | |||
5116 | ||||
5117 | MCSubtargetInfo &STI = copySTI(); | |||
5118 | std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end()); | |||
5119 | STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ",")); | |||
5120 | ||||
5121 | SmallVector<StringRef, 4> RequestedExtensions; | |||
5122 | if (!ExtensionString.empty()) | |||
5123 | ExtensionString.split(RequestedExtensions, '+'); | |||
5124 | ||||
5125 | ExpandCryptoAEK(ID, RequestedExtensions); | |||
5126 | ||||
5127 | FeatureBitset Features = STI.getFeatureBits(); | |||
5128 | for (auto Name : RequestedExtensions) { | |||
5129 | bool EnableFeature = true; | |||
5130 | ||||
5131 | if (Name.startswith_lower("no")) { | |||
5132 | EnableFeature = false; | |||
5133 | Name = Name.substr(2); | |||
5134 | } | |||
5135 | ||||
5136 | for (const auto &Extension : ExtensionMap) { | |||
5137 | if (Extension.Name != Name) | |||
5138 | continue; | |||
5139 | ||||
5140 | if (Extension.Features.none()) | |||
5141 | report_fatal_error("unsupported architectural extension: " + Name); | |||
5142 | ||||
5143 | FeatureBitset ToggleFeatures = EnableFeature | |||
5144 | ? (~Features & Extension.Features) | |||
5145 | : ( Features & Extension.Features); | |||
5146 | FeatureBitset Features = | |||
5147 | ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); | |||
5148 | setAvailableFeatures(Features); | |||
5149 | break; | |||
5150 | } | |||
5151 | } | |||
5152 | return false; | |||
5153 | } | |||
5154 | ||||
5155 | /// parseDirectiveArchExtension | |||
5156 | /// ::= .arch_extension [no]feature | |||
5157 | bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) { | |||
5158 | SMLoc ExtLoc = getLoc(); | |||
5159 | ||||
5160 | StringRef Name = getParser().parseStringToEndOfStatement().trim(); | |||
5161 | ||||
5162 | if (parseToken(AsmToken::EndOfStatement, | |||
5163 | "unexpected token in '.arch_extension' directive")) | |||
5164 | return true; | |||
5165 | ||||
5166 | bool EnableFeature = true; | |||
5167 | if (Name.startswith_lower("no")) { | |||
5168 | EnableFeature = false; | |||
5169 | Name = Name.substr(2); | |||
5170 | } | |||
5171 | ||||
5172 | MCSubtargetInfo &STI = copySTI(); | |||
5173 | FeatureBitset Features = STI.getFeatureBits(); | |||
5174 | for (const auto &Extension : ExtensionMap) { | |||
5175 | if (Extension.Name != Name) | |||
5176 | continue; | |||
5177 | ||||
5178 | if (Extension.Features.none()) | |||
5179 | return Error(ExtLoc, "unsupported architectural extension: " + Name); | |||
5180 | ||||
5181 | FeatureBitset ToggleFeatures = EnableFeature | |||
5182 | ? (~Features & Extension.Features) | |||
5183 | : (Features & Extension.Features); | |||
5184 | FeatureBitset Features = | |||
5185 | ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); | |||
5186 | setAvailableFeatures(Features); | |||
5187 | return false; | |||
5188 | } | |||
5189 | ||||
5190 | return Error(ExtLoc, "unknown architectural extension: " + Name); | |||
5191 | } | |||
5192 | ||||
5193 | static SMLoc incrementLoc(SMLoc L, int Offset) { | |||
5194 | return SMLoc::getFromPointer(L.getPointer() + Offset); | |||
5195 | } | |||
5196 | ||||
5197 | /// parseDirectiveCPU | |||
5198 | /// ::= .cpu id | |||
5199 | bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) { | |||
5200 | SMLoc CurLoc = getLoc(); | |||
5201 | ||||
5202 | StringRef CPU, ExtensionString; | |||
5203 | std::tie(CPU, ExtensionString) = | |||
5204 | getParser().parseStringToEndOfStatement().trim().split('+'); | |||
5205 | ||||
5206 | if (parseToken(AsmToken::EndOfStatement)) | |||
5207 | return true; | |||
5208 | ||||
5209 | SmallVector<StringRef, 4> RequestedExtensions; | |||
5210 | if (!ExtensionString.empty()) | |||
5211 | ExtensionString.split(RequestedExtensions, '+'); | |||
5212 | ||||
5213 | // FIXME This is using tablegen data, but should be moved to ARMTargetParser | |||
5214 | // once that is tablegen'ed | |||
5215 | if (!getSTI().isCPUStringValid(CPU)) { | |||
5216 | Error(CurLoc, "unknown CPU name"); | |||
5217 | return false; | |||
5218 | } | |||
5219 | ||||
5220 | MCSubtargetInfo &STI = copySTI(); | |||
5221 | STI.setDefaultFeatures(CPU, ""); | |||
5222 | CurLoc = incrementLoc(CurLoc, CPU.size()); | |||
5223 | ||||
5224 | ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions); | |||
5225 | ||||
5226 | FeatureBitset Features = STI.getFeatureBits(); | |||
5227 | for (auto Name : RequestedExtensions) { | |||
5228 | // Advance source location past '+'. | |||
5229 | CurLoc = incrementLoc(CurLoc, 1); | |||
5230 | ||||
5231 | bool EnableFeature = true; | |||
5232 | ||||
5233 | if (Name.startswith_lower("no")) { | |||
5234 | EnableFeature = false; | |||
5235 | Name = Name.substr(2); | |||
5236 | } | |||
5237 | ||||
5238 | bool FoundExtension = false; | |||
5239 | for (const auto &Extension : ExtensionMap) { | |||
5240 | if (Extension.Name != Name) | |||
5241 | continue; | |||
5242 | ||||
5243 | if (Extension.Features.none()) | |||
5244 | report_fatal_error("unsupported architectural extension: " + Name); | |||
5245 | ||||
5246 | FeatureBitset ToggleFeatures = EnableFeature | |||
5247 | ? (~Features & Extension.Features) | |||
5248 | : ( Features & Extension.Features); | |||
5249 | FeatureBitset Features = | |||
5250 | ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); | |||
5251 | setAvailableFeatures(Features); | |||
5252 | FoundExtension = true; | |||
5253 | ||||
5254 | break; | |||
5255 | } | |||
5256 | ||||
5257 | if (!FoundExtension) | |||
5258 | Error(CurLoc, "unsupported architectural extension"); | |||
5259 | ||||
5260 | CurLoc = incrementLoc(CurLoc, Name.size()); | |||
5261 | } | |||
5262 | return false; | |||
5263 | } | |||
5264 | ||||
5265 | /// parseDirectiveInst | |||
5266 | /// ::= .inst opcode [, ...] | |||
5267 | bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) { | |||
5268 | if (getLexer().is(AsmToken::EndOfStatement)) | |||
5269 | return Error(Loc, "expected expression following '.inst' directive"); | |||
5270 | ||||
5271 | auto parseOp = [&]() -> bool { | |||
5272 | SMLoc L = getLoc(); | |||
5273 | const MCExpr *Expr; | |||
5274 | if (check(getParser().parseExpression(Expr), L, "expected expression")) | |||
5275 | return true; | |||
5276 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); | |||
5277 | if (check(!Value, L, "expected constant expression")) | |||
5278 | return true; | |||
5279 | getTargetStreamer().emitInst(Value->getValue()); | |||
5280 | return false; | |||
5281 | }; | |||
5282 | ||||
5283 | if (parseMany(parseOp)) | |||
5284 | return addErrorSuffix(" in '.inst' directive"); | |||
5285 | return false; | |||
5286 | } | |||
5287 | ||||
5288 | // parseDirectiveTLSDescCall: | |||
5289 | // ::= .tlsdesccall symbol | |||
5290 | bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) { | |||
5291 | StringRef Name; | |||
5292 | if (check(getParser().parseIdentifier(Name), L, | |||
5293 | "expected symbol after directive") || | |||
5294 | parseToken(AsmToken::EndOfStatement)) | |||
5295 | return true; | |||
5296 | ||||
5297 | MCSymbol *Sym = getContext().getOrCreateSymbol(Name); | |||
5298 | const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext()); | |||
5299 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext()); | |||
5300 | ||||
5301 | MCInst Inst; | |||
5302 | Inst.setOpcode(AArch64::TLSDESCCALL); | |||
5303 | Inst.addOperand(MCOperand::createExpr(Expr)); | |||
5304 | ||||
5305 | getParser().getStreamer().EmitInstruction(Inst, getSTI()); | |||
5306 | return false; | |||
5307 | } | |||
5308 | ||||
5309 | /// ::= .loh <lohName | lohId> label1, ..., labelN | |||
5310 | /// The number of arguments depends on the loh identifier. | |||
5311 | bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) { | |||
5312 | MCLOHType Kind; | |||
5313 | if (getParser().getTok().isNot(AsmToken::Identifier)) { | |||
5314 | if (getParser().getTok().isNot(AsmToken::Integer)) | |||
5315 | return TokError("expected an identifier or a number in directive"); | |||
5316 | // We successfully get a numeric value for the identifier. | |||
5317 | // Check if it is valid. | |||
5318 | int64_t Id = getParser().getTok().getIntVal(); | |||
5319 | if (Id <= -1U && !isValidMCLOHType(Id)) | |||
5320 | return TokError("invalid numeric identifier in directive"); | |||
5321 | Kind = (MCLOHType)Id; | |||
5322 | } else { | |||
5323 | StringRef Name = getTok().getIdentifier(); | |||
5324 | // We successfully parse an identifier. | |||
5325 | // Check if it is a recognized one. | |||
5326 | int Id = MCLOHNameToId(Name); | |||
5327 | ||||
5328 | if (Id == -1) | |||
5329 | return TokError("invalid identifier in directive"); | |||
5330 | Kind = (MCLOHType)Id; | |||
5331 | } | |||
5332 | // Consume the identifier. | |||
5333 | Lex(); | |||
5334 | // Get the number of arguments of this LOH. | |||
5335 | int NbArgs = MCLOHIdToNbArgs(Kind); | |||
5336 | ||||
5337 | assert(NbArgs != -1 && "Invalid number of arguments")((NbArgs != -1 && "Invalid number of arguments") ? static_cast <void> (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 5337, __PRETTY_FUNCTION__)); | |||
5338 | ||||
5339 | SmallVector<MCSymbol *, 3> Args; | |||
5340 | for (int Idx = 0; Idx < NbArgs; ++Idx) { | |||
5341 | StringRef Name; | |||
5342 | if (getParser().parseIdentifier(Name)) | |||
5343 | return TokError("expected identifier in directive"); | |||
5344 | Args.push_back(getContext().getOrCreateSymbol(Name)); | |||
5345 | ||||
5346 | if (Idx + 1 == NbArgs) | |||
5347 | break; | |||
5348 | if (parseToken(AsmToken::Comma, | |||
5349 | "unexpected token in '" + Twine(IDVal) + "' directive")) | |||
5350 | return true; | |||
5351 | } | |||
5352 | if (parseToken(AsmToken::EndOfStatement, | |||
5353 | "unexpected token in '" + Twine(IDVal) + "' directive")) | |||
5354 | return true; | |||
5355 | ||||
5356 | getStreamer().EmitLOHDirective((MCLOHType)Kind, Args); | |||
5357 | return false; | |||
5358 | } | |||
5359 | ||||
5360 | /// parseDirectiveLtorg | |||
5361 | /// ::= .ltorg | .pool | |||
5362 | bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) { | |||
5363 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
5364 | return true; | |||
5365 | getTargetStreamer().emitCurrentConstantPool(); | |||
5366 | return false; | |||
5367 | } | |||
5368 | ||||
5369 | /// parseDirectiveReq | |||
5370 | /// ::= name .req registername | |||
5371 | bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { | |||
5372 | MCAsmParser &Parser = getParser(); | |||
5373 | Parser.Lex(); // Eat the '.req' token. | |||
5374 | SMLoc SRegLoc = getLoc(); | |||
5375 | RegKind RegisterKind = RegKind::Scalar; | |||
5376 | unsigned RegNum; | |||
5377 | OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum); | |||
5378 | ||||
5379 | if (ParseRes != MatchOperand_Success) { | |||
5380 | StringRef Kind; | |||
5381 | RegisterKind = RegKind::NeonVector; | |||
5382 | ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector); | |||
5383 | ||||
5384 | if (ParseRes == MatchOperand_ParseFail) | |||
5385 | return true; | |||
5386 | ||||
5387 | if (ParseRes == MatchOperand_Success && !Kind.empty()) | |||
5388 | return Error(SRegLoc, "vector register without type specifier expected"); | |||
5389 | } | |||
5390 | ||||
5391 | if (ParseRes != MatchOperand_Success) { | |||
5392 | StringRef Kind; | |||
5393 | RegisterKind = RegKind::SVEDataVector; | |||
5394 | ParseRes = | |||
5395 | tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); | |||
5396 | ||||
5397 | if (ParseRes == MatchOperand_ParseFail) | |||
5398 | return true; | |||
5399 | ||||
5400 | if (ParseRes == MatchOperand_Success && !Kind.empty()) | |||
5401 | return Error(SRegLoc, | |||
5402 | "sve vector register without type specifier expected"); | |||
5403 | } | |||
5404 | ||||
5405 | if (ParseRes != MatchOperand_Success) { | |||
5406 | StringRef Kind; | |||
5407 | RegisterKind = RegKind::SVEPredicateVector; | |||
5408 | ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); | |||
5409 | ||||
5410 | if (ParseRes == MatchOperand_ParseFail) | |||
5411 | return true; | |||
5412 | ||||
5413 | if (ParseRes == MatchOperand_Success && !Kind.empty()) | |||
5414 | return Error(SRegLoc, | |||
5415 | "sve predicate register without type specifier expected"); | |||
5416 | } | |||
5417 | ||||
5418 | if (ParseRes != MatchOperand_Success) | |||
5419 | return Error(SRegLoc, "register name or alias expected"); | |||
5420 | ||||
5421 | // Shouldn't be anything else. | |||
5422 | if (parseToken(AsmToken::EndOfStatement, | |||
5423 | "unexpected input in .req directive")) | |||
5424 | return true; | |||
5425 | ||||
5426 | auto pair = std::make_pair(RegisterKind, (unsigned) RegNum); | |||
5427 | if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair) | |||
5428 | Warning(L, "ignoring redefinition of register alias '" + Name + "'"); | |||
5429 | ||||
5430 | return false; | |||
5431 | } | |||
5432 | ||||
5433 | /// parseDirectiveUneq | |||
5434 | /// ::= .unreq registername | |||
5435 | bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) { | |||
5436 | MCAsmParser &Parser = getParser(); | |||
5437 | if (getTok().isNot(AsmToken::Identifier)) | |||
5438 | return TokError("unexpected input in .unreq directive."); | |||
5439 | RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); | |||
5440 | Parser.Lex(); // Eat the identifier. | |||
5441 | if (parseToken(AsmToken::EndOfStatement)) | |||
5442 | return addErrorSuffix("in '.unreq' directive"); | |||
5443 | return false; | |||
5444 | } | |||
5445 | ||||
5446 | bool AArch64AsmParser::parseDirectiveCFINegateRAState() { | |||
5447 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) | |||
5448 | return true; | |||
5449 | getStreamer().EmitCFINegateRAState(); | |||
5450 | return false; | |||
5451 | } | |||
5452 | ||||
5453 | /// parseDirectiveCFIBKeyFrame | |||
5454 | /// ::= .cfi_b_key | |||
5455 | bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() { | |||
5456 | if (parseToken(AsmToken::EndOfStatement, | |||
5457 | "unexpected token in '.cfi_b_key_frame'")) | |||
5458 | return true; | |||
5459 | getStreamer().EmitCFIBKeyFrame(); | |||
5460 | return false; | |||
5461 | } | |||
5462 | ||||
5463 | bool | |||
5464 | AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, | |||
5465 | AArch64MCExpr::VariantKind &ELFRefKind, | |||
5466 | MCSymbolRefExpr::VariantKind &DarwinRefKind, | |||
5467 | int64_t &Addend) { | |||
5468 | ELFRefKind = AArch64MCExpr::VK_INVALID; | |||
5469 | DarwinRefKind = MCSymbolRefExpr::VK_None; | |||
5470 | Addend = 0; | |||
5471 | ||||
5472 | if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) { | |||
5473 | ELFRefKind = AE->getKind(); | |||
5474 | Expr = AE->getSubExpr(); | |||
5475 | } | |||
5476 | ||||
5477 | const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr); | |||
5478 | if (SE) { | |||
5479 | // It's a simple symbol reference with no addend. | |||
5480 | DarwinRefKind = SE->getKind(); | |||
5481 | return true; | |||
5482 | } | |||
5483 | ||||
5484 | // Check that it looks like a symbol + an addend | |||
5485 | MCValue Res; | |||
5486 | bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr); | |||
5487 | if (!Relocatable || Res.getSymB()) | |||
5488 | return false; | |||
5489 | ||||
5490 | // Treat expressions with an ELFRefKind (like ":abs_g1:3", or | |||
5491 | // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol. | |||
5492 | if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID) | |||
5493 | return false; | |||
5494 | ||||
5495 | if (Res.getSymA()) | |||
5496 | DarwinRefKind = Res.getSymA()->getKind(); | |||
5497 | Addend = Res.getConstant(); | |||
5498 | ||||
5499 | // It's some symbol reference + a constant addend, but really | |||
5500 | // shouldn't use both Darwin and ELF syntax. | |||
5501 | return ELFRefKind == AArch64MCExpr::VK_INVALID || | |||
5502 | DarwinRefKind == MCSymbolRefExpr::VK_None; | |||
5503 | } | |||
5504 | ||||
5505 | /// Force static initialization. | |||
5506 | extern "C" void LLVMInitializeAArch64AsmParser() { | |||
5507 | RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget()); | |||
5508 | RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget()); | |||
5509 | RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target()); | |||
5510 | } | |||
5511 | ||||
5512 | #define GET_REGISTER_MATCHER | |||
5513 | #define GET_SUBTARGET_FEATURE_NAME | |||
5514 | #define GET_MATCHER_IMPLEMENTATION | |||
5515 | #define GET_MNEMONIC_SPELL_CHECKER | |||
5516 | #include "AArch64GenAsmMatcher.inc" | |||
5517 | ||||
5518 | // Define this matcher function after the auto-generated include so we | |||
5519 | // have the match class enum definitions. | |||
5520 | unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, | |||
5521 | unsigned Kind) { | |||
5522 | AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp); | |||
5523 | // If the kind is a token for a literal immediate, check if our asm | |||
5524 | // operand matches. This is for InstAliases which have a fixed-value | |||
5525 | // immediate in the syntax. | |||
5526 | int64_t ExpectedVal; | |||
5527 | switch (Kind) { | |||
5528 | default: | |||
5529 | return Match_InvalidOperand; | |||
5530 | case MCK__35_0: | |||
5531 | ExpectedVal = 0; | |||
5532 | break; | |||
5533 | case MCK__35_1: | |||
5534 | ExpectedVal = 1; | |||
5535 | break; | |||
5536 | case MCK__35_12: | |||
5537 | ExpectedVal = 12; | |||
5538 | break; | |||
5539 | case MCK__35_16: | |||
5540 | ExpectedVal = 16; | |||
5541 | break; | |||
5542 | case MCK__35_2: | |||
5543 | ExpectedVal = 2; | |||
5544 | break; | |||
5545 | case MCK__35_24: | |||
5546 | ExpectedVal = 24; | |||
5547 | break; | |||
5548 | case MCK__35_3: | |||
5549 | ExpectedVal = 3; | |||
5550 | break; | |||
5551 | case MCK__35_32: | |||
5552 | ExpectedVal = 32; | |||
5553 | break; | |||
5554 | case MCK__35_4: | |||
5555 | ExpectedVal = 4; | |||
5556 | break; | |||
5557 | case MCK__35_48: | |||
5558 | ExpectedVal = 48; | |||
5559 | break; | |||
5560 | case MCK__35_6: | |||
5561 | ExpectedVal = 6; | |||
5562 | break; | |||
5563 | case MCK__35_64: | |||
5564 | ExpectedVal = 64; | |||
5565 | break; | |||
5566 | case MCK__35_8: | |||
5567 | ExpectedVal = 8; | |||
5568 | break; | |||
5569 | } | |||
5570 | if (!Op.isImm()) | |||
5571 | return Match_InvalidOperand; | |||
5572 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); | |||
5573 | if (!CE) | |||
5574 | return Match_InvalidOperand; | |||
5575 | if (CE->getValue() == ExpectedVal) | |||
5576 | return Match_Success; | |||
5577 | return Match_InvalidOperand; | |||
5578 | } | |||
5579 | ||||
5580 | OperandMatchResultTy | |||
5581 | AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { | |||
5582 | ||||
5583 | SMLoc S = getLoc(); | |||
5584 | ||||
5585 | if (getParser().getTok().isNot(AsmToken::Identifier)) { | |||
5586 | Error(S, "expected register"); | |||
5587 | return MatchOperand_ParseFail; | |||
5588 | } | |||
5589 | ||||
5590 | unsigned FirstReg; | |||
5591 | OperandMatchResultTy Res = tryParseScalarRegister(FirstReg); | |||
5592 | if (Res != MatchOperand_Success) | |||
5593 | return MatchOperand_ParseFail; | |||
5594 | ||||
5595 | const MCRegisterClass &WRegClass = | |||
5596 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID]; | |||
5597 | const MCRegisterClass &XRegClass = | |||
5598 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID]; | |||
5599 | ||||
5600 | bool isXReg = XRegClass.contains(FirstReg), | |||
5601 | isWReg = WRegClass.contains(FirstReg); | |||
5602 | if (!isXReg && !isWReg) { | |||
5603 | Error(S, "expected first even register of a " | |||
5604 | "consecutive same-size even/odd register pair"); | |||
5605 | return MatchOperand_ParseFail; | |||
5606 | } | |||
5607 | ||||
5608 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
5609 | unsigned FirstEncoding = RI->getEncodingValue(FirstReg); | |||
5610 | ||||
5611 | if (FirstEncoding & 0x1) { | |||
5612 | Error(S, "expected first even register of a " | |||
5613 | "consecutive same-size even/odd register pair"); | |||
5614 | return MatchOperand_ParseFail; | |||
5615 | } | |||
5616 | ||||
5617 | if (getParser().getTok().isNot(AsmToken::Comma)) { | |||
5618 | Error(getLoc(), "expected comma"); | |||
5619 | return MatchOperand_ParseFail; | |||
5620 | } | |||
5621 | // Eat the comma | |||
5622 | getParser().Lex(); | |||
5623 | ||||
5624 | SMLoc E = getLoc(); | |||
5625 | unsigned SecondReg; | |||
5626 | Res = tryParseScalarRegister(SecondReg); | |||
5627 | if (Res != MatchOperand_Success) | |||
5628 | return MatchOperand_ParseFail; | |||
5629 | ||||
5630 | if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 || | |||
5631 | (isXReg && !XRegClass.contains(SecondReg)) || | |||
5632 | (isWReg && !WRegClass.contains(SecondReg))) { | |||
5633 | Error(E,"expected second odd register of a " | |||
5634 | "consecutive same-size even/odd register pair"); | |||
5635 | return MatchOperand_ParseFail; | |||
5636 | } | |||
5637 | ||||
5638 | unsigned Pair = 0; | |||
5639 | if (isXReg) { | |||
5640 | Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64, | |||
5641 | &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]); | |||
5642 | } else { | |||
5643 | Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32, | |||
5644 | &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]); | |||
5645 | } | |||
5646 | ||||
5647 | Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S, | |||
5648 | getLoc(), getContext())); | |||
5649 | ||||
5650 | return MatchOperand_Success; | |||
5651 | } | |||
5652 | ||||
5653 | template <bool ParseShiftExtend, bool ParseSuffix> | |||
5654 | OperandMatchResultTy | |||
5655 | AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { | |||
5656 | const SMLoc S = getLoc(); | |||
5657 | // Check for a SVE vector register specifier first. | |||
5658 | unsigned RegNum; | |||
5659 | StringRef Kind; | |||
5660 | ||||
5661 | OperandMatchResultTy Res = | |||
5662 | tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); | |||
5663 | ||||
5664 | if (Res != MatchOperand_Success) | |||
5665 | return Res; | |||
5666 | ||||
5667 | if (ParseSuffix && Kind.empty()) | |||
5668 | return MatchOperand_NoMatch; | |||
5669 | ||||
5670 | const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector); | |||
5671 | if (!KindRes) | |||
5672 | return MatchOperand_NoMatch; | |||
5673 | ||||
5674 | unsigned ElementWidth = KindRes->second; | |||
5675 | ||||
5676 | // No shift/extend is the default. | |||
5677 | if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { | |||
5678 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
5679 | RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext())); | |||
5680 | ||||
5681 | OperandMatchResultTy Res = tryParseVectorIndex(Operands); | |||
5682 | if (Res == MatchOperand_ParseFail) | |||
5683 | return MatchOperand_ParseFail; | |||
5684 | return MatchOperand_Success; | |||
5685 | } | |||
5686 | ||||
5687 | // Eat the comma | |||
5688 | getParser().Lex(); | |||
5689 | ||||
5690 | // Match the shift | |||
5691 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; | |||
5692 | Res = tryParseOptionalShiftExtend(ExtOpnd); | |||
5693 | if (Res != MatchOperand_Success) | |||
5694 | return Res; | |||
5695 | ||||
5696 | auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get()); | |||
5697 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
5698 | RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(), | |||
5699 | getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), | |||
5700 | Ext->hasShiftExtendAmount())); | |||
5701 | ||||
5702 | return MatchOperand_Success; | |||
5703 | } | |||
5704 | ||||
5705 | OperandMatchResultTy | |||
5706 | AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) { | |||
5707 | MCAsmParser &Parser = getParser(); | |||
5708 | ||||
5709 | SMLoc SS = getLoc(); | |||
5710 | const AsmToken &TokE = Parser.getTok(); | |||
5711 | bool IsHash = TokE.is(AsmToken::Hash); | |||
5712 | ||||
5713 | if (!IsHash && TokE.isNot(AsmToken::Identifier)) | |||
5714 | return MatchOperand_NoMatch; | |||
5715 | ||||
5716 | int64_t Pattern; | |||
5717 | if (IsHash) { | |||
5718 | Parser.Lex(); // Eat hash | |||
5719 | ||||
5720 | // Parse the immediate operand. | |||
5721 | const MCExpr *ImmVal; | |||
5722 | SS = getLoc(); | |||
5723 | if (Parser.parseExpression(ImmVal)) | |||
5724 | return MatchOperand_ParseFail; | |||
5725 | ||||
5726 | auto *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
5727 | if (!MCE) | |||
5728 | return MatchOperand_ParseFail; | |||
5729 | ||||
5730 | Pattern = MCE->getValue(); | |||
5731 | } else { | |||
5732 | // Parse the pattern | |||
5733 | auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString()); | |||
5734 | if (!Pat) | |||
5735 | return MatchOperand_NoMatch; | |||
5736 | ||||
5737 | Parser.Lex(); | |||
5738 | Pattern = Pat->Encoding; | |||
5739 | assert(Pattern >= 0 && Pattern < 32)((Pattern >= 0 && Pattern < 32) ? static_cast< void> (0) : __assert_fail ("Pattern >= 0 && Pattern < 32" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 5739, __PRETTY_FUNCTION__)); | |||
5740 | } | |||
5741 | ||||
5742 | Operands.push_back( | |||
5743 | AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()), | |||
5744 | SS, getLoc(), getContext())); | |||
5745 | ||||
5746 | return MatchOperand_Success; | |||
5747 | } |