| File: | build/source/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp |
| Warning: | line 4971, column 9 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | ||||
| 9 | #include "AArch64InstrInfo.h" | |||
| 10 | #include "MCTargetDesc/AArch64AddressingModes.h" | |||
| 11 | #include "MCTargetDesc/AArch64InstPrinter.h" | |||
| 12 | #include "MCTargetDesc/AArch64MCExpr.h" | |||
| 13 | #include "MCTargetDesc/AArch64MCTargetDesc.h" | |||
| 14 | #include "MCTargetDesc/AArch64TargetStreamer.h" | |||
| 15 | #include "TargetInfo/AArch64TargetInfo.h" | |||
| 16 | #include "Utils/AArch64BaseInfo.h" | |||
| 17 | #include "llvm/ADT/APFloat.h" | |||
| 18 | #include "llvm/ADT/APInt.h" | |||
| 19 | #include "llvm/ADT/ArrayRef.h" | |||
| 20 | #include "llvm/ADT/STLExtras.h" | |||
| 21 | #include "llvm/ADT/SmallSet.h" | |||
| 22 | #include "llvm/ADT/SmallVector.h" | |||
| 23 | #include "llvm/ADT/StringExtras.h" | |||
| 24 | #include "llvm/ADT/StringMap.h" | |||
| 25 | #include "llvm/ADT/StringRef.h" | |||
| 26 | #include "llvm/ADT/StringSwitch.h" | |||
| 27 | #include "llvm/ADT/Twine.h" | |||
| 28 | #include "llvm/MC/MCContext.h" | |||
| 29 | #include "llvm/MC/MCExpr.h" | |||
| 30 | #include "llvm/MC/MCInst.h" | |||
| 31 | #include "llvm/MC/MCLinkerOptimizationHint.h" | |||
| 32 | #include "llvm/MC/MCObjectFileInfo.h" | |||
| 33 | #include "llvm/MC/MCParser/MCAsmLexer.h" | |||
| 34 | #include "llvm/MC/MCParser/MCAsmParser.h" | |||
| 35 | #include "llvm/MC/MCParser/MCAsmParserExtension.h" | |||
| 36 | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" | |||
| 37 | #include "llvm/MC/MCParser/MCTargetAsmParser.h" | |||
| 38 | #include "llvm/MC/MCRegisterInfo.h" | |||
| 39 | #include "llvm/MC/MCStreamer.h" | |||
| 40 | #include "llvm/MC/MCSubtargetInfo.h" | |||
| 41 | #include "llvm/MC/MCSymbol.h" | |||
| 42 | #include "llvm/MC/MCTargetOptions.h" | |||
| 43 | #include "llvm/MC/MCValue.h" | |||
| 44 | #include "llvm/MC/SubtargetFeature.h" | |||
| 45 | #include "llvm/MC/TargetRegistry.h" | |||
| 46 | #include "llvm/Support/Casting.h" | |||
| 47 | #include "llvm/Support/Compiler.h" | |||
| 48 | #include "llvm/Support/ErrorHandling.h" | |||
| 49 | #include "llvm/Support/MathExtras.h" | |||
| 50 | #include "llvm/Support/SMLoc.h" | |||
| 51 | #include "llvm/Support/raw_ostream.h" | |||
| 52 | #include "llvm/TargetParser/AArch64TargetParser.h" | |||
| 53 | #include <cassert> | |||
| 54 | #include <cctype> | |||
| 55 | #include <cstdint> | |||
| 56 | #include <cstdio> | |||
| 57 | #include <optional> | |||
| 58 | #include <string> | |||
| 59 | #include <tuple> | |||
| 60 | #include <utility> | |||
| 61 | #include <vector> | |||
| 62 | ||||
| 63 | using namespace llvm; | |||
| 64 | ||||
| 65 | namespace { | |||
| 66 | ||||
| 67 | enum class RegKind { | |||
| 68 | Scalar, | |||
| 69 | NeonVector, | |||
| 70 | SVEDataVector, | |||
| 71 | SVEPredicateAsCounter, | |||
| 72 | SVEPredicateVector, | |||
| 73 | Matrix, | |||
| 74 | LookupTable | |||
| 75 | }; | |||
| 76 | ||||
| 77 | enum class MatrixKind { Array, Tile, Row, Col }; | |||
| 78 | ||||
| 79 | enum RegConstraintEqualityTy { | |||
| 80 | EqualsReg, | |||
| 81 | EqualsSuperReg, | |||
| 82 | EqualsSubReg | |||
| 83 | }; | |||
| 84 | ||||
| 85 | class AArch64AsmParser : public MCTargetAsmParser { | |||
| 86 | private: | |||
| 87 | StringRef Mnemonic; ///< Instruction mnemonic. | |||
| 88 | ||||
| 89 | // Map of register aliases registers via the .req directive. | |||
| 90 | StringMap<std::pair<RegKind, unsigned>> RegisterReqs; | |||
| 91 | ||||
| 92 | class PrefixInfo { | |||
| 93 | public: | |||
| 94 | static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) { | |||
| 95 | PrefixInfo Prefix; | |||
| 96 | switch (Inst.getOpcode()) { | |||
| 97 | case AArch64::MOVPRFX_ZZ: | |||
| 98 | Prefix.Active = true; | |||
| 99 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
| 100 | break; | |||
| 101 | case AArch64::MOVPRFX_ZPmZ_B: | |||
| 102 | case AArch64::MOVPRFX_ZPmZ_H: | |||
| 103 | case AArch64::MOVPRFX_ZPmZ_S: | |||
| 104 | case AArch64::MOVPRFX_ZPmZ_D: | |||
| 105 | Prefix.Active = true; | |||
| 106 | Prefix.Predicated = true; | |||
| 107 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; | |||
| 108 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 109 , __extension__ __PRETTY_FUNCTION__)) | |||
| 109 | "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 109 , __extension__ __PRETTY_FUNCTION__)); | |||
| 110 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
| 111 | Prefix.Pg = Inst.getOperand(2).getReg(); | |||
| 112 | break; | |||
| 113 | case AArch64::MOVPRFX_ZPzZ_B: | |||
| 114 | case AArch64::MOVPRFX_ZPzZ_H: | |||
| 115 | case AArch64::MOVPRFX_ZPzZ_S: | |||
| 116 | case AArch64::MOVPRFX_ZPzZ_D: | |||
| 117 | Prefix.Active = true; | |||
| 118 | Prefix.Predicated = true; | |||
| 119 | Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; | |||
| 120 | assert(Prefix.ElementSize != AArch64::ElementSizeNone &&(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 121 , __extension__ __PRETTY_FUNCTION__)) | |||
| 121 | "No destructive element size set for movprfx")(static_cast <bool> (Prefix.ElementSize != AArch64::ElementSizeNone && "No destructive element size set for movprfx") ? void (0) : __assert_fail ("Prefix.ElementSize != AArch64::ElementSizeNone && \"No destructive element size set for movprfx\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 121 , __extension__ __PRETTY_FUNCTION__)); | |||
| 122 | Prefix.Dst = Inst.getOperand(0).getReg(); | |||
| 123 | Prefix.Pg = Inst.getOperand(1).getReg(); | |||
| 124 | break; | |||
| 125 | default: | |||
| 126 | break; | |||
| 127 | } | |||
| 128 | ||||
| 129 | return Prefix; | |||
| 130 | } | |||
| 131 | ||||
| 132 | PrefixInfo() = default; | |||
| 133 | bool isActive() const { return Active; } | |||
| 134 | bool isPredicated() const { return Predicated; } | |||
| 135 | unsigned getElementSize() const { | |||
| 136 | assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail ("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 136, __extension__ __PRETTY_FUNCTION__)); | |||
| 137 | return ElementSize; | |||
| 138 | } | |||
| 139 | unsigned getDstReg() const { return Dst; } | |||
| 140 | unsigned getPgReg() const { | |||
| 141 | assert(Predicated)(static_cast <bool> (Predicated) ? void (0) : __assert_fail ("Predicated", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 141, __extension__ __PRETTY_FUNCTION__)); | |||
| 142 | return Pg; | |||
| 143 | } | |||
| 144 | ||||
| 145 | private: | |||
| 146 | bool Active = false; | |||
| 147 | bool Predicated = false; | |||
| 148 | unsigned ElementSize; | |||
| 149 | unsigned Dst; | |||
| 150 | unsigned Pg; | |||
| 151 | } NextPrefix; | |||
| 152 | ||||
| 153 | AArch64TargetStreamer &getTargetStreamer() { | |||
| 154 | MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); | |||
| 155 | return static_cast<AArch64TargetStreamer &>(TS); | |||
| 156 | } | |||
| 157 | ||||
| 158 | SMLoc getLoc() const { return getParser().getTok().getLoc(); } | |||
| 159 | ||||
| 160 | bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); | |||
| 161 | bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); | |||
| 162 | void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S); | |||
| 163 | AArch64CC::CondCode parseCondCodeString(StringRef Cond, | |||
| 164 | std::string &Suggestion); | |||
| 165 | bool parseCondCode(OperandVector &Operands, bool invertCondCode); | |||
| 166 | unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); | |||
| 167 | bool parseRegister(OperandVector &Operands); | |||
| 168 | bool parseSymbolicImmVal(const MCExpr *&ImmVal); | |||
| 169 | bool parseNeonVectorList(OperandVector &Operands); | |||
| 170 | bool parseOptionalMulOperand(OperandVector &Operands); | |||
| 171 | bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup); | |||
| 172 | bool parseKeywordOperand(OperandVector &Operands); | |||
| 173 | bool parseOperand(OperandVector &Operands, bool isCondCode, | |||
| 174 | bool invertCondCode); | |||
| 175 | bool parseImmExpr(int64_t &Out); | |||
| 176 | bool parseComma(); | |||
| 177 | bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First, | |||
| 178 | unsigned Last); | |||
| 179 | ||||
| 180 | bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, | |||
| 181 | OperandVector &Operands); | |||
| 182 | ||||
| 183 | bool parseDirectiveArch(SMLoc L); | |||
| 184 | bool parseDirectiveArchExtension(SMLoc L); | |||
| 185 | bool parseDirectiveCPU(SMLoc L); | |||
| 186 | bool parseDirectiveInst(SMLoc L); | |||
| 187 | ||||
| 188 | bool parseDirectiveTLSDescCall(SMLoc L); | |||
| 189 | ||||
| 190 | bool parseDirectiveLOH(StringRef LOH, SMLoc L); | |||
| 191 | bool parseDirectiveLtorg(SMLoc L); | |||
| 192 | ||||
| 193 | bool parseDirectiveReq(StringRef Name, SMLoc L); | |||
| 194 | bool parseDirectiveUnreq(SMLoc L); | |||
| 195 | bool parseDirectiveCFINegateRAState(); | |||
| 196 | bool parseDirectiveCFIBKeyFrame(); | |||
| 197 | bool parseDirectiveCFIMTETaggedFrame(); | |||
| 198 | ||||
| 199 | bool parseDirectiveVariantPCS(SMLoc L); | |||
| 200 | ||||
| 201 | bool parseDirectiveSEHAllocStack(SMLoc L); | |||
| 202 | bool parseDirectiveSEHPrologEnd(SMLoc L); | |||
| 203 | bool parseDirectiveSEHSaveR19R20X(SMLoc L); | |||
| 204 | bool parseDirectiveSEHSaveFPLR(SMLoc L); | |||
| 205 | bool parseDirectiveSEHSaveFPLRX(SMLoc L); | |||
| 206 | bool parseDirectiveSEHSaveReg(SMLoc L); | |||
| 207 | bool parseDirectiveSEHSaveRegX(SMLoc L); | |||
| 208 | bool parseDirectiveSEHSaveRegP(SMLoc L); | |||
| 209 | bool parseDirectiveSEHSaveRegPX(SMLoc L); | |||
| 210 | bool parseDirectiveSEHSaveLRPair(SMLoc L); | |||
| 211 | bool parseDirectiveSEHSaveFReg(SMLoc L); | |||
| 212 | bool parseDirectiveSEHSaveFRegX(SMLoc L); | |||
| 213 | bool parseDirectiveSEHSaveFRegP(SMLoc L); | |||
| 214 | bool parseDirectiveSEHSaveFRegPX(SMLoc L); | |||
| 215 | bool parseDirectiveSEHSetFP(SMLoc L); | |||
| 216 | bool parseDirectiveSEHAddFP(SMLoc L); | |||
| 217 | bool parseDirectiveSEHNop(SMLoc L); | |||
| 218 | bool parseDirectiveSEHSaveNext(SMLoc L); | |||
| 219 | bool parseDirectiveSEHEpilogStart(SMLoc L); | |||
| 220 | bool parseDirectiveSEHEpilogEnd(SMLoc L); | |||
| 221 | bool parseDirectiveSEHTrapFrame(SMLoc L); | |||
| 222 | bool parseDirectiveSEHMachineFrame(SMLoc L); | |||
| 223 | bool parseDirectiveSEHContext(SMLoc L); | |||
| 224 | bool parseDirectiveSEHClearUnwoundToCall(SMLoc L); | |||
| 225 | bool parseDirectiveSEHPACSignLR(SMLoc L); | |||
| 226 | bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback); | |||
| 227 | ||||
| 228 | bool validateInstruction(MCInst &Inst, SMLoc &IDLoc, | |||
| 229 | SmallVectorImpl<SMLoc> &Loc); | |||
| 230 | unsigned getNumRegsForRegKind(RegKind K); | |||
| 231 | bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
| 232 | OperandVector &Operands, MCStreamer &Out, | |||
| 233 | uint64_t &ErrorInfo, | |||
| 234 | bool MatchingInlineAsm) override; | |||
| 235 | /// @name Auto-generated Match Functions | |||
| 236 | /// { | |||
| 237 | ||||
| 238 | #define GET_ASSEMBLER_HEADER | |||
| 239 | #include "AArch64GenAsmMatcher.inc" | |||
| 240 | ||||
| 241 | /// } | |||
| 242 | ||||
| 243 | OperandMatchResultTy tryParseScalarRegister(MCRegister &Reg); | |||
| 244 | OperandMatchResultTy tryParseVectorRegister(MCRegister &Reg, StringRef &Kind, | |||
| 245 | RegKind MatchKind); | |||
| 246 | OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands); | |||
| 247 | OperandMatchResultTy tryParseSVCR(OperandVector &Operands); | |||
| 248 | OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); | |||
| 249 | OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); | |||
| 250 | OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands); | |||
| 251 | OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); | |||
| 252 | OperandMatchResultTy tryParseSysReg(OperandVector &Operands); | |||
| 253 | OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); | |||
| 254 | template <bool IsSVEPrefetch = false> | |||
| 255 | OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); | |||
| 256 | OperandMatchResultTy tryParseRPRFMOperand(OperandVector &Operands); | |||
| 257 | OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); | |||
| 258 | OperandMatchResultTy tryParseBTIHint(OperandVector &Operands); | |||
| 259 | OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); | |||
| 260 | OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); | |||
| 261 | template<bool AddFPZeroAsLiteral> | |||
| 262 | OperandMatchResultTy tryParseFPImm(OperandVector &Operands); | |||
| 263 | OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands); | |||
| 264 | OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); | |||
| 265 | bool tryParseNeonVectorRegister(OperandVector &Operands); | |||
| 266 | OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); | |||
| 267 | OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); | |||
| 268 | OperandMatchResultTy tryParseSyspXzrPair(OperandVector &Operands); | |||
| 269 | template <bool ParseShiftExtend, | |||
| 270 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> | |||
| 271 | OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); | |||
| 272 | OperandMatchResultTy tryParseZTOperand(OperandVector &Operands); | |||
| 273 | template <bool ParseShiftExtend, bool ParseSuffix> | |||
| 274 | OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); | |||
| 275 | template <RegKind RK> | |||
| 276 | OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands); | |||
| 277 | template <RegKind VectorKind> | |||
| 278 | OperandMatchResultTy tryParseVectorList(OperandVector &Operands, | |||
| 279 | bool ExpectMatch = false); | |||
| 280 | OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands); | |||
| 281 | OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands); | |||
| 282 | OperandMatchResultTy tryParseSVEVecLenSpecifier(OperandVector &Operands); | |||
| 283 | OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands); | |||
| 284 | OperandMatchResultTy tryParseImmRange(OperandVector &Operands); | |||
| 285 | ||||
| 286 | public: | |||
| 287 | enum AArch64MatchResultTy { | |||
| 288 | Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, | |||
| 289 | #define GET_OPERAND_DIAGNOSTIC_TYPES | |||
| 290 | #include "AArch64GenAsmMatcher.inc" | |||
| 291 | }; | |||
| 292 | bool IsILP32; | |||
| 293 | ||||
| 294 | AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, | |||
| 295 | const MCInstrInfo &MII, const MCTargetOptions &Options) | |||
| 296 | : MCTargetAsmParser(Options, STI, MII) { | |||
| 297 | IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; | |||
| 298 | MCAsmParserExtension::Initialize(Parser); | |||
| 299 | MCStreamer &S = getParser().getStreamer(); | |||
| 300 | if (S.getTargetStreamer() == nullptr) | |||
| 301 | new AArch64TargetStreamer(S); | |||
| 302 | ||||
| 303 | // Alias .hword/.word/.[dx]word to the target-independent | |||
| 304 | // .2byte/.4byte/.8byte directives as they have the same form and | |||
| 305 | // semantics: | |||
| 306 | /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ] | |||
| 307 | Parser.addAliasForDirective(".hword", ".2byte"); | |||
| 308 | Parser.addAliasForDirective(".word", ".4byte"); | |||
| 309 | Parser.addAliasForDirective(".dword", ".8byte"); | |||
| 310 | Parser.addAliasForDirective(".xword", ".8byte"); | |||
| 311 | ||||
| 312 | // Initialize the set of available features. | |||
| 313 | setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); | |||
| 314 | } | |||
| 315 | ||||
| 316 | bool areEqualRegs(const MCParsedAsmOperand &Op1, | |||
| 317 | const MCParsedAsmOperand &Op2) const override; | |||
| 318 | bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, | |||
| 319 | SMLoc NameLoc, OperandVector &Operands) override; | |||
| 320 | bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc, | |||
| 321 | SMLoc &EndLoc) override; | |||
| 322 | OperandMatchResultTy tryParseRegister(MCRegister &RegNo, SMLoc &StartLoc, | |||
| 323 | SMLoc &EndLoc) override; | |||
| 324 | bool ParseDirective(AsmToken DirectiveID) override; | |||
| 325 | unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, | |||
| 326 | unsigned Kind) override; | |||
| 327 | ||||
| 328 | static bool classifySymbolRef(const MCExpr *Expr, | |||
| 329 | AArch64MCExpr::VariantKind &ELFRefKind, | |||
| 330 | MCSymbolRefExpr::VariantKind &DarwinRefKind, | |||
| 331 | int64_t &Addend); | |||
| 332 | }; | |||
| 333 | ||||
| 334 | /// AArch64Operand - Instances of this class represent a parsed AArch64 machine | |||
| 335 | /// instruction. | |||
| 336 | class AArch64Operand : public MCParsedAsmOperand { | |||
| 337 | private: | |||
| 338 | enum KindTy { | |||
| 339 | k_Immediate, | |||
| 340 | k_ShiftedImm, | |||
| 341 | k_ImmRange, | |||
| 342 | k_CondCode, | |||
| 343 | k_Register, | |||
| 344 | k_MatrixRegister, | |||
| 345 | k_MatrixTileList, | |||
| 346 | k_SVCR, | |||
| 347 | k_VectorList, | |||
| 348 | k_VectorIndex, | |||
| 349 | k_Token, | |||
| 350 | k_SysReg, | |||
| 351 | k_SysCR, | |||
| 352 | k_Prefetch, | |||
| 353 | k_ShiftExtend, | |||
| 354 | k_FPImm, | |||
| 355 | k_Barrier, | |||
| 356 | k_PSBHint, | |||
| 357 | k_BTIHint, | |||
| 358 | } Kind; | |||
| 359 | ||||
| 360 | SMLoc StartLoc, EndLoc; | |||
| 361 | ||||
| 362 | struct TokOp { | |||
| 363 | const char *Data; | |||
| 364 | unsigned Length; | |||
| 365 | bool IsSuffix; // Is the operand actually a suffix on the mnemonic. | |||
| 366 | }; | |||
| 367 | ||||
| 368 | // Separate shift/extend operand. | |||
| 369 | struct ShiftExtendOp { | |||
| 370 | AArch64_AM::ShiftExtendType Type; | |||
| 371 | unsigned Amount; | |||
| 372 | bool HasExplicitAmount; | |||
| 373 | }; | |||
| 374 | ||||
| 375 | struct RegOp { | |||
| 376 | unsigned RegNum; | |||
| 377 | RegKind Kind; | |||
| 378 | int ElementWidth; | |||
| 379 | ||||
| 380 | // The register may be allowed as a different register class, | |||
| 381 | // e.g. for GPR64as32 or GPR32as64. | |||
| 382 | RegConstraintEqualityTy EqualityTy; | |||
| 383 | ||||
| 384 | // In some cases the shift/extend needs to be explicitly parsed together | |||
| 385 | // with the register, rather than as a separate operand. This is needed | |||
| 386 | // for addressing modes where the instruction as a whole dictates the | |||
| 387 | // scaling/extend, rather than specific bits in the instruction. | |||
| 388 | // By parsing them as a single operand, we avoid the need to pass an | |||
| 389 | // extra operand in all CodeGen patterns (because all operands need to | |||
| 390 | // have an associated value), and we avoid the need to update TableGen to | |||
| 391 | // accept operands that have no associated bits in the instruction. | |||
| 392 | // | |||
| 393 | // An added benefit of parsing them together is that the assembler | |||
| 394 | // can give a sensible diagnostic if the scaling is not correct. | |||
| 395 | // | |||
| 396 | // The default is 'lsl #0' (HasExplicitAmount = false) if no | |||
| 397 | // ShiftExtend is specified. | |||
| 398 | ShiftExtendOp ShiftExtend; | |||
| 399 | }; | |||
| 400 | ||||
| 401 | struct MatrixRegOp { | |||
| 402 | unsigned RegNum; | |||
| 403 | unsigned ElementWidth; | |||
| 404 | MatrixKind Kind; | |||
| 405 | }; | |||
| 406 | ||||
| 407 | struct MatrixTileListOp { | |||
| 408 | unsigned RegMask = 0; | |||
| 409 | }; | |||
| 410 | ||||
| 411 | struct VectorListOp { | |||
| 412 | unsigned RegNum; | |||
| 413 | unsigned Count; | |||
| 414 | unsigned Stride; | |||
| 415 | unsigned NumElements; | |||
| 416 | unsigned ElementWidth; | |||
| 417 | RegKind RegisterKind; | |||
| 418 | }; | |||
| 419 | ||||
| 420 | struct VectorIndexOp { | |||
| 421 | int Val; | |||
| 422 | }; | |||
| 423 | ||||
| 424 | struct ImmOp { | |||
| 425 | const MCExpr *Val; | |||
| 426 | }; | |||
| 427 | ||||
| 428 | struct ShiftedImmOp { | |||
| 429 | const MCExpr *Val; | |||
| 430 | unsigned ShiftAmount; | |||
| 431 | }; | |||
| 432 | ||||
| 433 | struct ImmRangeOp { | |||
| 434 | unsigned First; | |||
| 435 | unsigned Last; | |||
| 436 | }; | |||
| 437 | ||||
| 438 | struct CondCodeOp { | |||
| 439 | AArch64CC::CondCode Code; | |||
| 440 | }; | |||
| 441 | ||||
| 442 | struct FPImmOp { | |||
| 443 | uint64_t Val; // APFloat value bitcasted to uint64_t. | |||
| 444 | bool IsExact; // describes whether parsed value was exact. | |||
| 445 | }; | |||
| 446 | ||||
| 447 | struct BarrierOp { | |||
| 448 | const char *Data; | |||
| 449 | unsigned Length; | |||
| 450 | unsigned Val; // Not the enum since not all values have names. | |||
| 451 | bool HasnXSModifier; | |||
| 452 | }; | |||
| 453 | ||||
| 454 | struct SysRegOp { | |||
| 455 | const char *Data; | |||
| 456 | unsigned Length; | |||
| 457 | uint32_t MRSReg; | |||
| 458 | uint32_t MSRReg; | |||
| 459 | uint32_t PStateField; | |||
| 460 | }; | |||
| 461 | ||||
| 462 | struct SysCRImmOp { | |||
| 463 | unsigned Val; | |||
| 464 | }; | |||
| 465 | ||||
| 466 | struct PrefetchOp { | |||
| 467 | const char *Data; | |||
| 468 | unsigned Length; | |||
| 469 | unsigned Val; | |||
| 470 | }; | |||
| 471 | ||||
| 472 | struct PSBHintOp { | |||
| 473 | const char *Data; | |||
| 474 | unsigned Length; | |||
| 475 | unsigned Val; | |||
| 476 | }; | |||
| 477 | ||||
| 478 | struct BTIHintOp { | |||
| 479 | const char *Data; | |||
| 480 | unsigned Length; | |||
| 481 | unsigned Val; | |||
| 482 | }; | |||
| 483 | ||||
| 484 | struct SVCROp { | |||
| 485 | const char *Data; | |||
| 486 | unsigned Length; | |||
| 487 | unsigned PStateField; | |||
| 488 | }; | |||
| 489 | ||||
| 490 | union { | |||
| 491 | struct TokOp Tok; | |||
| 492 | struct RegOp Reg; | |||
| 493 | struct MatrixRegOp MatrixReg; | |||
| 494 | struct MatrixTileListOp MatrixTileList; | |||
| 495 | struct VectorListOp VectorList; | |||
| 496 | struct VectorIndexOp VectorIndex; | |||
| 497 | struct ImmOp Imm; | |||
| 498 | struct ShiftedImmOp ShiftedImm; | |||
| 499 | struct ImmRangeOp ImmRange; | |||
| 500 | struct CondCodeOp CondCode; | |||
| 501 | struct FPImmOp FPImm; | |||
| 502 | struct BarrierOp Barrier; | |||
| 503 | struct SysRegOp SysReg; | |||
| 504 | struct SysCRImmOp SysCRImm; | |||
| 505 | struct PrefetchOp Prefetch; | |||
| 506 | struct PSBHintOp PSBHint; | |||
| 507 | struct BTIHintOp BTIHint; | |||
| 508 | struct ShiftExtendOp ShiftExtend; | |||
| 509 | struct SVCROp SVCR; | |||
| 510 | }; | |||
| 511 | ||||
| 512 | // Keep the MCContext around as the MCExprs may need manipulated during | |||
| 513 | // the add<>Operands() calls. | |||
| 514 | MCContext &Ctx; | |||
| 515 | ||||
| 516 | public: | |||
| 517 | AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} | |||
| 518 | ||||
| 519 | AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { | |||
| 520 | Kind = o.Kind; | |||
| 521 | StartLoc = o.StartLoc; | |||
| 522 | EndLoc = o.EndLoc; | |||
| 523 | switch (Kind) { | |||
| 524 | case k_Token: | |||
| 525 | Tok = o.Tok; | |||
| 526 | break; | |||
| 527 | case k_Immediate: | |||
| 528 | Imm = o.Imm; | |||
| 529 | break; | |||
| 530 | case k_ShiftedImm: | |||
| 531 | ShiftedImm = o.ShiftedImm; | |||
| 532 | break; | |||
| 533 | case k_ImmRange: | |||
| 534 | ImmRange = o.ImmRange; | |||
| 535 | break; | |||
| 536 | case k_CondCode: | |||
| 537 | CondCode = o.CondCode; | |||
| 538 | break; | |||
| 539 | case k_FPImm: | |||
| 540 | FPImm = o.FPImm; | |||
| 541 | break; | |||
| 542 | case k_Barrier: | |||
| 543 | Barrier = o.Barrier; | |||
| 544 | break; | |||
| 545 | case k_Register: | |||
| 546 | Reg = o.Reg; | |||
| 547 | break; | |||
| 548 | case k_MatrixRegister: | |||
| 549 | MatrixReg = o.MatrixReg; | |||
| 550 | break; | |||
| 551 | case k_MatrixTileList: | |||
| 552 | MatrixTileList = o.MatrixTileList; | |||
| 553 | break; | |||
| 554 | case k_VectorList: | |||
| 555 | VectorList = o.VectorList; | |||
| 556 | break; | |||
| 557 | case k_VectorIndex: | |||
| 558 | VectorIndex = o.VectorIndex; | |||
| 559 | break; | |||
| 560 | case k_SysReg: | |||
| 561 | SysReg = o.SysReg; | |||
| 562 | break; | |||
| 563 | case k_SysCR: | |||
| 564 | SysCRImm = o.SysCRImm; | |||
| 565 | break; | |||
| 566 | case k_Prefetch: | |||
| 567 | Prefetch = o.Prefetch; | |||
| 568 | break; | |||
| 569 | case k_PSBHint: | |||
| 570 | PSBHint = o.PSBHint; | |||
| 571 | break; | |||
| 572 | case k_BTIHint: | |||
| 573 | BTIHint = o.BTIHint; | |||
| 574 | break; | |||
| 575 | case k_ShiftExtend: | |||
| 576 | ShiftExtend = o.ShiftExtend; | |||
| 577 | break; | |||
| 578 | case k_SVCR: | |||
| 579 | SVCR = o.SVCR; | |||
| 580 | break; | |||
| 581 | } | |||
| 582 | } | |||
| 583 | ||||
| 584 | /// getStartLoc - Get the location of the first token of this operand. | |||
| 585 | SMLoc getStartLoc() const override { return StartLoc; } | |||
| 586 | /// getEndLoc - Get the location of the last token of this operand. | |||
| 587 | SMLoc getEndLoc() const override { return EndLoc; } | |||
| 588 | ||||
| 589 | StringRef getToken() const { | |||
| 590 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 590 , __extension__ __PRETTY_FUNCTION__)); | |||
| 591 | return StringRef(Tok.Data, Tok.Length); | |||
| 592 | } | |||
| 593 | ||||
| 594 | bool isTokenSuffix() const { | |||
| 595 | assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 595 , __extension__ __PRETTY_FUNCTION__)); | |||
| 596 | return Tok.IsSuffix; | |||
| 597 | } | |||
| 598 | ||||
| 599 | const MCExpr *getImm() const { | |||
| 600 | assert(Kind == k_Immediate && "Invalid access!")(static_cast <bool> (Kind == k_Immediate && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Immediate && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 600 , __extension__ __PRETTY_FUNCTION__)); | |||
| 601 | return Imm.Val; | |||
| 602 | } | |||
| 603 | ||||
| 604 | const MCExpr *getShiftedImmVal() const { | |||
| 605 | assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 605 , __extension__ __PRETTY_FUNCTION__)); | |||
| 606 | return ShiftedImm.Val; | |||
| 607 | } | |||
| 608 | ||||
| 609 | unsigned getShiftedImmShift() const { | |||
| 610 | assert(Kind == k_ShiftedImm && "Invalid access!")(static_cast <bool> (Kind == k_ShiftedImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ShiftedImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 610 , __extension__ __PRETTY_FUNCTION__)); | |||
| 611 | return ShiftedImm.ShiftAmount; | |||
| 612 | } | |||
| 613 | ||||
| 614 | unsigned getFirstImmVal() const { | |||
| 615 | assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 615 , __extension__ __PRETTY_FUNCTION__)); | |||
| 616 | return ImmRange.First; | |||
| 617 | } | |||
| 618 | ||||
| 619 | unsigned getLastImmVal() const { | |||
| 620 | assert(Kind == k_ImmRange && "Invalid access!")(static_cast <bool> (Kind == k_ImmRange && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_ImmRange && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 620 , __extension__ __PRETTY_FUNCTION__)); | |||
| 621 | return ImmRange.Last; | |||
| 622 | } | |||
| 623 | ||||
| 624 | AArch64CC::CondCode getCondCode() const { | |||
| 625 | assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 625 , __extension__ __PRETTY_FUNCTION__)); | |||
| 626 | return CondCode.Code; | |||
| 627 | } | |||
| 628 | ||||
| 629 | APFloat getFPImm() const { | |||
| 630 | assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 630 , __extension__ __PRETTY_FUNCTION__)); | |||
| 631 | return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); | |||
| 632 | } | |||
| 633 | ||||
| 634 | bool getFPImmIsExact() const { | |||
| 635 | assert (Kind == k_FPImm && "Invalid access!")(static_cast <bool> (Kind == k_FPImm && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_FPImm && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 635 , __extension__ __PRETTY_FUNCTION__)); | |||
| 636 | return FPImm.IsExact; | |||
| 637 | } | |||
| 638 | ||||
| 639 | unsigned getBarrier() const { | |||
| 640 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 640 , __extension__ __PRETTY_FUNCTION__)); | |||
| 641 | return Barrier.Val; | |||
| 642 | } | |||
| 643 | ||||
| 644 | StringRef getBarrierName() const { | |||
| 645 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 645 , __extension__ __PRETTY_FUNCTION__)); | |||
| 646 | return StringRef(Barrier.Data, Barrier.Length); | |||
| 647 | } | |||
| 648 | ||||
| 649 | bool getBarriernXSModifier() const { | |||
| 650 | assert(Kind == k_Barrier && "Invalid access!")(static_cast <bool> (Kind == k_Barrier && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Barrier && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 650 , __extension__ __PRETTY_FUNCTION__)); | |||
| 651 | return Barrier.HasnXSModifier; | |||
| 652 | } | |||
| 653 | ||||
| 654 | unsigned getReg() const override { | |||
| 655 | assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 655 , __extension__ __PRETTY_FUNCTION__)); | |||
| 656 | return Reg.RegNum; | |||
| 657 | } | |||
| 658 | ||||
| 659 | unsigned getMatrixReg() const { | |||
| 660 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 660 , __extension__ __PRETTY_FUNCTION__)); | |||
| 661 | return MatrixReg.RegNum; | |||
| 662 | } | |||
| 663 | ||||
| 664 | unsigned getMatrixElementWidth() const { | |||
| 665 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 665 , __extension__ __PRETTY_FUNCTION__)); | |||
| 666 | return MatrixReg.ElementWidth; | |||
| 667 | } | |||
| 668 | ||||
| 669 | MatrixKind getMatrixKind() const { | |||
| 670 | assert(Kind == k_MatrixRegister && "Invalid access!")(static_cast <bool> (Kind == k_MatrixRegister && "Invalid access!") ? void (0) : __assert_fail ("Kind == k_MatrixRegister && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 670 , __extension__ __PRETTY_FUNCTION__)); | |||
| 671 | return MatrixReg.Kind; | |||
| 672 | } | |||
| 673 | ||||
| 674 | unsigned getMatrixTileListRegMask() const { | |||
| 675 | assert(isMatrixTileList() && "Invalid access!")(static_cast <bool> (isMatrixTileList() && "Invalid access!" ) ? void (0) : __assert_fail ("isMatrixTileList() && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 675 , __extension__ __PRETTY_FUNCTION__)); | |||
| 676 | return MatrixTileList.RegMask; | |||
| 677 | } | |||
| 678 | ||||
| 679 | RegConstraintEqualityTy getRegEqualityTy() const { | |||
| 680 | assert(Kind == k_Register && "Invalid access!")(static_cast <bool> (Kind == k_Register && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Register && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 680 , __extension__ __PRETTY_FUNCTION__)); | |||
| 681 | return Reg.EqualityTy; | |||
| 682 | } | |||
| 683 | ||||
| 684 | unsigned getVectorListStart() const { | |||
| 685 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 685 , __extension__ __PRETTY_FUNCTION__)); | |||
| 686 | return VectorList.RegNum; | |||
| 687 | } | |||
| 688 | ||||
| 689 | unsigned getVectorListCount() const { | |||
| 690 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 690 , __extension__ __PRETTY_FUNCTION__)); | |||
| 691 | return VectorList.Count; | |||
| 692 | } | |||
| 693 | ||||
| 694 | unsigned getVectorListStride() const { | |||
| 695 | assert(Kind == k_VectorList && "Invalid access!")(static_cast <bool> (Kind == k_VectorList && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorList && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 695 , __extension__ __PRETTY_FUNCTION__)); | |||
| 696 | return VectorList.Stride; | |||
| 697 | } | |||
| 698 | ||||
| 699 | int getVectorIndex() const { | |||
| 700 | assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 700 , __extension__ __PRETTY_FUNCTION__)); | |||
| 701 | return VectorIndex.Val; | |||
| 702 | } | |||
| 703 | ||||
| 704 | StringRef getSysReg() const { | |||
| 705 | assert(Kind == k_SysReg && "Invalid access!")(static_cast <bool> (Kind == k_SysReg && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SysReg && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 705 , __extension__ __PRETTY_FUNCTION__)); | |||
| 706 | return StringRef(SysReg.Data, SysReg.Length); | |||
| 707 | } | |||
| 708 | ||||
| 709 | unsigned getSysCR() const { | |||
| 710 | assert(Kind == k_SysCR && "Invalid access!")(static_cast <bool> (Kind == k_SysCR && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SysCR && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 710 , __extension__ __PRETTY_FUNCTION__)); | |||
| 711 | return SysCRImm.Val; | |||
| 712 | } | |||
| 713 | ||||
| 714 | unsigned getPrefetch() const { | |||
| 715 | assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 715 , __extension__ __PRETTY_FUNCTION__)); | |||
| 716 | return Prefetch.Val; | |||
| 717 | } | |||
| 718 | ||||
| 719 | unsigned getPSBHint() const { | |||
| 720 | assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 720 , __extension__ __PRETTY_FUNCTION__)); | |||
| 721 | return PSBHint.Val; | |||
| 722 | } | |||
| 723 | ||||
| 724 | StringRef getPSBHintName() const { | |||
| 725 | assert(Kind == k_PSBHint && "Invalid access!")(static_cast <bool> (Kind == k_PSBHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_PSBHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 725 , __extension__ __PRETTY_FUNCTION__)); | |||
| 726 | return StringRef(PSBHint.Data, PSBHint.Length); | |||
| 727 | } | |||
| 728 | ||||
| 729 | unsigned getBTIHint() const { | |||
| 730 | assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 730 , __extension__ __PRETTY_FUNCTION__)); | |||
| 731 | return BTIHint.Val; | |||
| 732 | } | |||
| 733 | ||||
| 734 | StringRef getBTIHintName() const { | |||
| 735 | assert(Kind == k_BTIHint && "Invalid access!")(static_cast <bool> (Kind == k_BTIHint && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_BTIHint && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 735 , __extension__ __PRETTY_FUNCTION__)); | |||
| 736 | return StringRef(BTIHint.Data, BTIHint.Length); | |||
| 737 | } | |||
| 738 | ||||
| 739 | StringRef getSVCR() const { | |||
| 740 | assert(Kind == k_SVCR && "Invalid access!")(static_cast <bool> (Kind == k_SVCR && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_SVCR && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 740 , __extension__ __PRETTY_FUNCTION__)); | |||
| 741 | return StringRef(SVCR.Data, SVCR.Length); | |||
| 742 | } | |||
| 743 | ||||
| 744 | StringRef getPrefetchName() const { | |||
| 745 | assert(Kind == k_Prefetch && "Invalid access!")(static_cast <bool> (Kind == k_Prefetch && "Invalid access!" ) ? void (0) : __assert_fail ("Kind == k_Prefetch && \"Invalid access!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 745 , __extension__ __PRETTY_FUNCTION__)); | |||
| 746 | return StringRef(Prefetch.Data, Prefetch.Length); | |||
| 747 | } | |||
| 748 | ||||
| 749 | AArch64_AM::ShiftExtendType getShiftExtendType() const { | |||
| 750 | if (Kind == k_ShiftExtend) | |||
| 751 | return ShiftExtend.Type; | |||
| 752 | if (Kind == k_Register) | |||
| 753 | return Reg.ShiftExtend.Type; | |||
| 754 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 754); | |||
| 755 | } | |||
| 756 | ||||
| 757 | unsigned getShiftExtendAmount() const { | |||
| 758 | if (Kind == k_ShiftExtend) | |||
| 759 | return ShiftExtend.Amount; | |||
| 760 | if (Kind == k_Register) | |||
| 761 | return Reg.ShiftExtend.Amount; | |||
| 762 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 762); | |||
| 763 | } | |||
| 764 | ||||
| 765 | bool hasShiftExtendAmount() const { | |||
| 766 | if (Kind == k_ShiftExtend) | |||
| 767 | return ShiftExtend.HasExplicitAmount; | |||
| 768 | if (Kind == k_Register) | |||
| 769 | return Reg.ShiftExtend.HasExplicitAmount; | |||
| 770 | llvm_unreachable("Invalid access!")::llvm::llvm_unreachable_internal("Invalid access!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 770); | |||
| 771 | } | |||
| 772 | ||||
| 773 | bool isImm() const override { return Kind == k_Immediate; } | |||
| 774 | bool isMem() const override { return false; } | |||
| 775 | ||||
| 776 | bool isUImm6() const { | |||
| 777 | if (!isImm()) | |||
| 778 | return false; | |||
| 779 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 780 | if (!MCE) | |||
| 781 | return false; | |||
| 782 | int64_t Val = MCE->getValue(); | |||
| 783 | return (Val >= 0 && Val < 64); | |||
| 784 | } | |||
| 785 | ||||
| 786 | template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } | |||
| 787 | ||||
| 788 | template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { | |||
| 789 | return isImmScaled<Bits, Scale>(true); | |||
| 790 | } | |||
| 791 | ||||
| 792 | template <int Bits, int Scale, int Offset = 0, bool IsRange = false> | |||
| 793 | DiagnosticPredicate isUImmScaled() const { | |||
| 794 | if (IsRange && isImmRange() && | |||
| 795 | (getLastImmVal() != getFirstImmVal() + Offset)) | |||
| 796 | return DiagnosticPredicateTy::NoMatch; | |||
| 797 | ||||
| 798 | return isImmScaled<Bits, Scale, IsRange>(false); | |||
| 799 | } | |||
| 800 | ||||
| 801 | template <int Bits, int Scale, bool IsRange = false> | |||
| 802 | DiagnosticPredicate isImmScaled(bool Signed) const { | |||
| 803 | if ((!isImm() && !isImmRange()) || (isImm() && IsRange) || | |||
| 804 | (isImmRange() && !IsRange)) | |||
| 805 | return DiagnosticPredicateTy::NoMatch; | |||
| 806 | ||||
| 807 | int64_t Val; | |||
| 808 | if (isImmRange()) | |||
| 809 | Val = getFirstImmVal(); | |||
| 810 | else { | |||
| 811 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 812 | if (!MCE) | |||
| 813 | return DiagnosticPredicateTy::NoMatch; | |||
| 814 | Val = MCE->getValue(); | |||
| 815 | } | |||
| 816 | ||||
| 817 | int64_t MinVal, MaxVal; | |||
| 818 | if (Signed) { | |||
| 819 | int64_t Shift = Bits - 1; | |||
| 820 | MinVal = (int64_t(1) << Shift) * -Scale; | |||
| 821 | MaxVal = ((int64_t(1) << Shift) - 1) * Scale; | |||
| 822 | } else { | |||
| 823 | MinVal = 0; | |||
| 824 | MaxVal = ((int64_t(1) << Bits) - 1) * Scale; | |||
| 825 | } | |||
| 826 | ||||
| 827 | if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) | |||
| 828 | return DiagnosticPredicateTy::Match; | |||
| 829 | ||||
| 830 | return DiagnosticPredicateTy::NearMatch; | |||
| 831 | } | |||
| 832 | ||||
| 833 | DiagnosticPredicate isSVEPattern() const { | |||
| 834 | if (!isImm()) | |||
| 835 | return DiagnosticPredicateTy::NoMatch; | |||
| 836 | auto *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 837 | if (!MCE) | |||
| 838 | return DiagnosticPredicateTy::NoMatch; | |||
| 839 | int64_t Val = MCE->getValue(); | |||
| 840 | if (Val >= 0 && Val < 32) | |||
| 841 | return DiagnosticPredicateTy::Match; | |||
| 842 | return DiagnosticPredicateTy::NearMatch; | |||
| 843 | } | |||
| 844 | ||||
| 845 | DiagnosticPredicate isSVEVecLenSpecifier() const { | |||
| 846 | if (!isImm()) | |||
| 847 | return DiagnosticPredicateTy::NoMatch; | |||
| 848 | auto *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 849 | if (!MCE) | |||
| 850 | return DiagnosticPredicateTy::NoMatch; | |||
| 851 | int64_t Val = MCE->getValue(); | |||
| 852 | if (Val >= 0 && Val <= 1) | |||
| 853 | return DiagnosticPredicateTy::Match; | |||
| 854 | return DiagnosticPredicateTy::NearMatch; | |||
| 855 | } | |||
| 856 | ||||
| 857 | bool isSymbolicUImm12Offset(const MCExpr *Expr) const { | |||
| 858 | AArch64MCExpr::VariantKind ELFRefKind; | |||
| 859 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
| 860 | int64_t Addend; | |||
| 861 | if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, | |||
| 862 | Addend)) { | |||
| 863 | // If we don't understand the expression, assume the best and | |||
| 864 | // let the fixup and relocation code deal with it. | |||
| 865 | return true; | |||
| 866 | } | |||
| 867 | ||||
| 868 | if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || | |||
| 869 | ELFRefKind == AArch64MCExpr::VK_LO12 || | |||
| 870 | ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || | |||
| 871 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || | |||
| 872 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || | |||
| 873 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || | |||
| 874 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || | |||
| 875 | ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || | |||
| 876 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || | |||
| 877 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || | |||
| 878 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 || | |||
| 879 | ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) { | |||
| 880 | // Note that we don't range-check the addend. It's adjusted modulo page | |||
| 881 | // size when converted, so there is no "out of range" condition when using | |||
| 882 | // @pageoff. | |||
| 883 | return true; | |||
| 884 | } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || | |||
| 885 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { | |||
| 886 | // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. | |||
| 887 | return Addend == 0; | |||
| 888 | } | |||
| 889 | ||||
| 890 | return false; | |||
| 891 | } | |||
| 892 | ||||
| 893 | template <int Scale> bool isUImm12Offset() const { | |||
| 894 | if (!isImm()) | |||
| 895 | return false; | |||
| 896 | ||||
| 897 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 898 | if (!MCE) | |||
| 899 | return isSymbolicUImm12Offset(getImm()); | |||
| 900 | ||||
| 901 | int64_t Val = MCE->getValue(); | |||
| 902 | return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; | |||
| 903 | } | |||
| 904 | ||||
| 905 | template <int N, int M> | |||
| 906 | bool isImmInRange() const { | |||
| 907 | if (!isImm()) | |||
| 908 | return false; | |||
| 909 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 910 | if (!MCE) | |||
| 911 | return false; | |||
| 912 | int64_t Val = MCE->getValue(); | |||
| 913 | return (Val >= N && Val <= M); | |||
| 914 | } | |||
| 915 | ||||
| 916 | // NOTE: Also used for isLogicalImmNot as anything that can be represented as | |||
| 917 | // a logical immediate can always be represented when inverted. | |||
| 918 | template <typename T> | |||
| 919 | bool isLogicalImm() const { | |||
| 920 | if (!isImm()) | |||
| 921 | return false; | |||
| 922 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 923 | if (!MCE) | |||
| 924 | return false; | |||
| 925 | ||||
| 926 | int64_t Val = MCE->getValue(); | |||
| 927 | // Avoid left shift by 64 directly. | |||
| 928 | uint64_t Upper = UINT64_C(-1)-1UL << (sizeof(T) * 4) << (sizeof(T) * 4); | |||
| 929 | // Allow all-0 or all-1 in top bits to permit bitwise NOT. | |||
| 930 | if ((Val & Upper) && (Val & Upper) != Upper) | |||
| 931 | return false; | |||
| 932 | ||||
| 933 | return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8); | |||
| 934 | } | |||
| 935 | ||||
| 936 | bool isShiftedImm() const { return Kind == k_ShiftedImm; } | |||
| 937 | ||||
| 938 | bool isImmRange() const { return Kind == k_ImmRange; } | |||
| 939 | ||||
| 940 | /// Returns the immediate value as a pair of (imm, shift) if the immediate is | |||
| 941 | /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted | |||
| 942 | /// immediate that can be shifted by 'Shift'. | |||
| 943 | template <unsigned Width> | |||
| 944 | std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const { | |||
| 945 | if (isShiftedImm() && Width == getShiftedImmShift()) | |||
| 946 | if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal())) | |||
| 947 | return std::make_pair(CE->getValue(), Width); | |||
| 948 | ||||
| 949 | if (isImm()) | |||
| 950 | if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) { | |||
| 951 | int64_t Val = CE->getValue(); | |||
| 952 | if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) | |||
| 953 | return std::make_pair(Val >> Width, Width); | |||
| 954 | else | |||
| 955 | return std::make_pair(Val, 0u); | |||
| 956 | } | |||
| 957 | ||||
| 958 | return {}; | |||
| 959 | } | |||
| 960 | ||||
| 961 | bool isAddSubImm() const { | |||
| 962 | if (!isShiftedImm() && !isImm()) | |||
| 963 | return false; | |||
| 964 | ||||
| 965 | const MCExpr *Expr; | |||
| 966 | ||||
| 967 | // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. | |||
| 968 | if (isShiftedImm()) { | |||
| 969 | unsigned Shift = ShiftedImm.ShiftAmount; | |||
| 970 | Expr = ShiftedImm.Val; | |||
| 971 | if (Shift != 0 && Shift != 12) | |||
| 972 | return false; | |||
| 973 | } else { | |||
| 974 | Expr = getImm(); | |||
| 975 | } | |||
| 976 | ||||
| 977 | AArch64MCExpr::VariantKind ELFRefKind; | |||
| 978 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
| 979 | int64_t Addend; | |||
| 980 | if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, | |||
| 981 | DarwinRefKind, Addend)) { | |||
| 982 | return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF | |||
| 983 | || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF | |||
| 984 | || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) | |||
| 985 | || ELFRefKind == AArch64MCExpr::VK_LO12 | |||
| 986 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 | |||
| 987 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 | |||
| 988 | || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC | |||
| 989 | || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 | |||
| 990 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 | |||
| 991 | || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC | |||
| 992 | || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 | |||
| 993 | || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 | |||
| 994 | || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; | |||
| 995 | } | |||
| 996 | ||||
| 997 | // If it's a constant, it should be a real immediate in range. | |||
| 998 | if (auto ShiftedVal = getShiftedVal<12>()) | |||
| 999 | return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; | |||
| 1000 | ||||
| 1001 | // If it's an expression, we hope for the best and let the fixup/relocation | |||
| 1002 | // code deal with it. | |||
| 1003 | return true; | |||
| 1004 | } | |||
| 1005 | ||||
| 1006 | bool isAddSubImmNeg() const { | |||
| 1007 | if (!isShiftedImm() && !isImm()) | |||
| 1008 | return false; | |||
| 1009 | ||||
| 1010 | // Otherwise it should be a real negative immediate in range. | |||
| 1011 | if (auto ShiftedVal = getShiftedVal<12>()) | |||
| 1012 | return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; | |||
| 1013 | ||||
| 1014 | return false; | |||
| 1015 | } | |||
| 1016 | ||||
| 1017 | // Signed value in the range -128 to +127. For element widths of | |||
| 1018 | // 16 bits or higher it may also be a signed multiple of 256 in the | |||
| 1019 | // range -32768 to +32512. | |||
| 1020 | // For element-width of 8 bits a range of -128 to 255 is accepted, | |||
| 1021 | // since a copy of a byte can be either signed/unsigned. | |||
| 1022 | template <typename T> | |||
| 1023 | DiagnosticPredicate isSVECpyImm() const { | |||
| 1024 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) | |||
| 1025 | return DiagnosticPredicateTy::NoMatch; | |||
| 1026 | ||||
| 1027 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || | |||
| 1028 | std::is_same<int8_t, T>::value; | |||
| 1029 | if (auto ShiftedImm = getShiftedVal<8>()) | |||
| 1030 | if (!(IsByte && ShiftedImm->second) && | |||
| 1031 | AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) | |||
| 1032 | << ShiftedImm->second)) | |||
| 1033 | return DiagnosticPredicateTy::Match; | |||
| 1034 | ||||
| 1035 | return DiagnosticPredicateTy::NearMatch; | |||
| 1036 | } | |||
| 1037 | ||||
| 1038 | // Unsigned value in the range 0 to 255. For element widths of | |||
| 1039 | // 16 bits or higher it may also be a signed multiple of 256 in the | |||
| 1040 | // range 0 to 65280. | |||
| 1041 | template <typename T> DiagnosticPredicate isSVEAddSubImm() const { | |||
| 1042 | if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) | |||
| 1043 | return DiagnosticPredicateTy::NoMatch; | |||
| 1044 | ||||
| 1045 | bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value || | |||
| 1046 | std::is_same<int8_t, T>::value; | |||
| 1047 | if (auto ShiftedImm = getShiftedVal<8>()) | |||
| 1048 | if (!(IsByte && ShiftedImm->second) && | |||
| 1049 | AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first | |||
| 1050 | << ShiftedImm->second)) | |||
| 1051 | return DiagnosticPredicateTy::Match; | |||
| 1052 | ||||
| 1053 | return DiagnosticPredicateTy::NearMatch; | |||
| 1054 | } | |||
| 1055 | ||||
| 1056 | template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { | |||
| 1057 | if (isLogicalImm<T>() && !isSVECpyImm<T>()) | |||
| 1058 | return DiagnosticPredicateTy::Match; | |||
| 1059 | return DiagnosticPredicateTy::NoMatch; | |||
| 1060 | } | |||
| 1061 | ||||
| 1062 | bool isCondCode() const { return Kind == k_CondCode; } | |||
| 1063 | ||||
| 1064 | bool isSIMDImmType10() const { | |||
| 1065 | if (!isImm()) | |||
| 1066 | return false; | |||
| 1067 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1068 | if (!MCE) | |||
| 1069 | return false; | |||
| 1070 | return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); | |||
| 1071 | } | |||
| 1072 | ||||
| 1073 | template<int N> | |||
| 1074 | bool isBranchTarget() const { | |||
| 1075 | if (!isImm()) | |||
| 1076 | return false; | |||
| 1077 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1078 | if (!MCE) | |||
| 1079 | return true; | |||
| 1080 | int64_t Val = MCE->getValue(); | |||
| 1081 | if (Val & 0x3) | |||
| 1082 | return false; | |||
| 1083 | assert(N > 0 && "Branch target immediate cannot be 0 bits!")(static_cast <bool> (N > 0 && "Branch target immediate cannot be 0 bits!" ) ? void (0) : __assert_fail ("N > 0 && \"Branch target immediate cannot be 0 bits!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1083 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1084 | return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2)); | |||
| 1085 | } | |||
| 1086 | ||||
| 1087 | bool | |||
| 1088 | isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { | |||
| 1089 | if (!isImm()) | |||
| 1090 | return false; | |||
| 1091 | ||||
| 1092 | AArch64MCExpr::VariantKind ELFRefKind; | |||
| 1093 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
| 1094 | int64_t Addend; | |||
| 1095 | if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind, | |||
| 1096 | DarwinRefKind, Addend)) { | |||
| 1097 | return false; | |||
| 1098 | } | |||
| 1099 | if (DarwinRefKind != MCSymbolRefExpr::VK_None) | |||
| 1100 | return false; | |||
| 1101 | ||||
| 1102 | return llvm::is_contained(AllowedModifiers, ELFRefKind); | |||
| 1103 | } | |||
| 1104 | ||||
| 1105 | bool isMovWSymbolG3() const { | |||
| 1106 | return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3}); | |||
| 1107 | } | |||
| 1108 | ||||
| 1109 | bool isMovWSymbolG2() const { | |||
| 1110 | return isMovWSymbol( | |||
| 1111 | {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, | |||
| 1112 | AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2, | |||
| 1113 | AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2, | |||
| 1114 | AArch64MCExpr::VK_DTPREL_G2}); | |||
| 1115 | } | |||
| 1116 | ||||
| 1117 | bool isMovWSymbolG1() const { | |||
| 1118 | return isMovWSymbol( | |||
| 1119 | {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, | |||
| 1120 | AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1, | |||
| 1121 | AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1, | |||
| 1122 | AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC, | |||
| 1123 | AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC}); | |||
| 1124 | } | |||
| 1125 | ||||
| 1126 | bool isMovWSymbolG0() const { | |||
| 1127 | return isMovWSymbol( | |||
| 1128 | {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, | |||
| 1129 | AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0, | |||
| 1130 | AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, | |||
| 1131 | AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC, | |||
| 1132 | AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC}); | |||
| 1133 | } | |||
| 1134 | ||||
| 1135 | template<int RegWidth, int Shift> | |||
| 1136 | bool isMOVZMovAlias() const { | |||
| 1137 | if (!isImm()) return false; | |||
| 1138 | ||||
| 1139 | const MCExpr *E = getImm(); | |||
| 1140 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) { | |||
| 1141 | uint64_t Value = CE->getValue(); | |||
| 1142 | ||||
| 1143 | return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); | |||
| 1144 | } | |||
| 1145 | // Only supports the case of Shift being 0 if an expression is used as an | |||
| 1146 | // operand | |||
| 1147 | return !Shift && E; | |||
| 1148 | } | |||
| 1149 | ||||
| 1150 | template<int RegWidth, int Shift> | |||
| 1151 | bool isMOVNMovAlias() const { | |||
| 1152 | if (!isImm()) return false; | |||
| 1153 | ||||
| 1154 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1155 | if (!CE) return false; | |||
| 1156 | uint64_t Value = CE->getValue(); | |||
| 1157 | ||||
| 1158 | return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); | |||
| 1159 | } | |||
| 1160 | ||||
| 1161 | bool isFPImm() const { | |||
| 1162 | return Kind == k_FPImm && | |||
| 1163 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1; | |||
| 1164 | } | |||
| 1165 | ||||
| 1166 | bool isBarrier() const { | |||
| 1167 | return Kind == k_Barrier && !getBarriernXSModifier(); | |||
| 1168 | } | |||
| 1169 | bool isBarriernXS() const { | |||
| 1170 | return Kind == k_Barrier && getBarriernXSModifier(); | |||
| 1171 | } | |||
| 1172 | bool isSysReg() const { return Kind == k_SysReg; } | |||
| 1173 | ||||
| 1174 | bool isMRSSystemRegister() const { | |||
| 1175 | if (!isSysReg()) return false; | |||
| 1176 | ||||
| 1177 | return SysReg.MRSReg != -1U; | |||
| 1178 | } | |||
| 1179 | ||||
| 1180 | bool isMSRSystemRegister() const { | |||
| 1181 | if (!isSysReg()) return false; | |||
| 1182 | return SysReg.MSRReg != -1U; | |||
| 1183 | } | |||
| 1184 | ||||
| 1185 | bool isSystemPStateFieldWithImm0_1() const { | |||
| 1186 | if (!isSysReg()) return false; | |||
| 1187 | return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField); | |||
| 1188 | } | |||
| 1189 | ||||
| 1190 | bool isSystemPStateFieldWithImm0_15() const { | |||
| 1191 | if (!isSysReg()) | |||
| 1192 | return false; | |||
| 1193 | return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField); | |||
| 1194 | } | |||
| 1195 | ||||
| 1196 | bool isSVCR() const { | |||
| 1197 | if (Kind != k_SVCR) | |||
| 1198 | return false; | |||
| 1199 | return SVCR.PStateField != -1U; | |||
| 1200 | } | |||
| 1201 | ||||
| 1202 | bool isReg() const override { | |||
| 1203 | return Kind == k_Register; | |||
| 1204 | } | |||
| 1205 | ||||
| 1206 | bool isVectorList() const { return Kind == k_VectorList; } | |||
| 1207 | ||||
| 1208 | bool isScalarReg() const { | |||
| 1209 | return Kind == k_Register && Reg.Kind == RegKind::Scalar; | |||
| 1210 | } | |||
| 1211 | ||||
| 1212 | bool isNeonVectorReg() const { | |||
| 1213 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector; | |||
| 1214 | } | |||
| 1215 | ||||
| 1216 | bool isNeonVectorRegLo() const { | |||
| 1217 | return Kind == k_Register && Reg.Kind == RegKind::NeonVector && | |||
| 1218 | (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( | |||
| 1219 | Reg.RegNum) || | |||
| 1220 | AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains( | |||
| 1221 | Reg.RegNum)); | |||
| 1222 | } | |||
| 1223 | ||||
| 1224 | bool isMatrix() const { return Kind == k_MatrixRegister; } | |||
| 1225 | bool isMatrixTileList() const { return Kind == k_MatrixTileList; } | |||
| 1226 | ||||
| 1227 | template <unsigned Class> bool isSVEPredicateAsCounterReg() const { | |||
| 1228 | RegKind RK; | |||
| 1229 | switch (Class) { | |||
| 1230 | case AArch64::PPRRegClassID: | |||
| 1231 | case AArch64::PPR_3bRegClassID: | |||
| 1232 | case AArch64::PPR_p8to15RegClassID: | |||
| 1233 | RK = RegKind::SVEPredicateAsCounter; | |||
| 1234 | break; | |||
| 1235 | default: | |||
| 1236 | llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1236 ); | |||
| 1237 | } | |||
| 1238 | ||||
| 1239 | return (Kind == k_Register && Reg.Kind == RK) && | |||
| 1240 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
| 1241 | } | |||
| 1242 | ||||
| 1243 | template <unsigned Class> bool isSVEVectorReg() const { | |||
| 1244 | RegKind RK; | |||
| 1245 | switch (Class) { | |||
| 1246 | case AArch64::ZPRRegClassID: | |||
| 1247 | case AArch64::ZPR_3bRegClassID: | |||
| 1248 | case AArch64::ZPR_4bRegClassID: | |||
| 1249 | RK = RegKind::SVEDataVector; | |||
| 1250 | break; | |||
| 1251 | case AArch64::PPRRegClassID: | |||
| 1252 | case AArch64::PPR_3bRegClassID: | |||
| 1253 | RK = RegKind::SVEPredicateVector; | |||
| 1254 | break; | |||
| 1255 | default: | |||
| 1256 | llvm_unreachable("Unsupport register class")::llvm::llvm_unreachable_internal("Unsupport register class", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1256 ); | |||
| 1257 | } | |||
| 1258 | ||||
| 1259 | return (Kind == k_Register && Reg.Kind == RK) && | |||
| 1260 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
| 1261 | } | |||
| 1262 | ||||
| 1263 | template <unsigned Class> bool isFPRasZPR() const { | |||
| 1264 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1265 | AArch64MCRegisterClasses[Class].contains(getReg()); | |||
| 1266 | } | |||
| 1267 | ||||
| 1268 | template <int ElementWidth, unsigned Class> | |||
| 1269 | DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { | |||
| 1270 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) | |||
| 1271 | return DiagnosticPredicateTy::NoMatch; | |||
| 1272 | ||||
| 1273 | if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth)) | |||
| 1274 | return DiagnosticPredicateTy::Match; | |||
| 1275 | ||||
| 1276 | return DiagnosticPredicateTy::NearMatch; | |||
| 1277 | } | |||
| 1278 | ||||
| 1279 | template <int ElementWidth, unsigned Class> | |||
| 1280 | DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const { | |||
| 1281 | if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter) | |||
| 1282 | return DiagnosticPredicateTy::NoMatch; | |||
| 1283 | ||||
| 1284 | if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth)) | |||
| 1285 | return DiagnosticPredicateTy::Match; | |||
| 1286 | ||||
| 1287 | return DiagnosticPredicateTy::NearMatch; | |||
| 1288 | } | |||
| 1289 | ||||
| 1290 | template <int ElementWidth, unsigned Class> | |||
| 1291 | DiagnosticPredicate isSVEDataVectorRegOfWidth() const { | |||
| 1292 | if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) | |||
| 1293 | return DiagnosticPredicateTy::NoMatch; | |||
| 1294 | ||||
| 1295 | if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth) | |||
| 1296 | return DiagnosticPredicateTy::Match; | |||
| 1297 | ||||
| 1298 | return DiagnosticPredicateTy::NearMatch; | |||
| 1299 | } | |||
| 1300 | ||||
| 1301 | template <int ElementWidth, unsigned Class, | |||
| 1302 | AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, | |||
| 1303 | bool ShiftWidthAlwaysSame> | |||
| 1304 | DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { | |||
| 1305 | auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); | |||
| 1306 | if (!VectorMatch.isMatch()) | |||
| 1307 | return DiagnosticPredicateTy::NoMatch; | |||
| 1308 | ||||
| 1309 | // Give a more specific diagnostic when the user has explicitly typed in | |||
| 1310 | // a shift-amount that does not match what is expected, but for which | |||
| 1311 | // there is also an unscaled addressing mode (e.g. sxtw/uxtw). | |||
| 1312 | bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); | |||
| 1313 | if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || | |||
| 1314 | ShiftExtendTy == AArch64_AM::SXTW) && | |||
| 1315 | !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) | |||
| 1316 | return DiagnosticPredicateTy::NoMatch; | |||
| 1317 | ||||
| 1318 | if (MatchShift && ShiftExtendTy == getShiftExtendType()) | |||
| 1319 | return DiagnosticPredicateTy::Match; | |||
| 1320 | ||||
| 1321 | return DiagnosticPredicateTy::NearMatch; | |||
| 1322 | } | |||
| 1323 | ||||
| 1324 | bool isGPR32as64() const { | |||
| 1325 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1326 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); | |||
| 1327 | } | |||
| 1328 | ||||
| 1329 | bool isGPR64as32() const { | |||
| 1330 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1331 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); | |||
| 1332 | } | |||
| 1333 | ||||
| 1334 | bool isGPR64x8() const { | |||
| 1335 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1336 | AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains( | |||
| 1337 | Reg.RegNum); | |||
| 1338 | } | |||
| 1339 | ||||
| 1340 | bool isWSeqPair() const { | |||
| 1341 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1342 | AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( | |||
| 1343 | Reg.RegNum); | |||
| 1344 | } | |||
| 1345 | ||||
| 1346 | bool isXSeqPair() const { | |||
| 1347 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1348 | AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( | |||
| 1349 | Reg.RegNum); | |||
| 1350 | } | |||
| 1351 | ||||
| 1352 | bool isSyspXzrPair() const { | |||
| 1353 | return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR; | |||
| 1354 | } | |||
| 1355 | ||||
| 1356 | template<int64_t Angle, int64_t Remainder> | |||
| 1357 | DiagnosticPredicate isComplexRotation() const { | |||
| 1358 | if (!isImm()) return DiagnosticPredicateTy::NoMatch; | |||
| 1359 | ||||
| 1360 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1361 | if (!CE) return DiagnosticPredicateTy::NoMatch; | |||
| 1362 | uint64_t Value = CE->getValue(); | |||
| 1363 | ||||
| 1364 | if (Value % Angle == Remainder && Value <= 270) | |||
| 1365 | return DiagnosticPredicateTy::Match; | |||
| 1366 | return DiagnosticPredicateTy::NearMatch; | |||
| 1367 | } | |||
| 1368 | ||||
| 1369 | template <unsigned RegClassID> bool isGPR64() const { | |||
| 1370 | return Kind == k_Register && Reg.Kind == RegKind::Scalar && | |||
| 1371 | AArch64MCRegisterClasses[RegClassID].contains(getReg()); | |||
| 1372 | } | |||
| 1373 | ||||
| 1374 | template <unsigned RegClassID, int ExtWidth> | |||
| 1375 | DiagnosticPredicate isGPR64WithShiftExtend() const { | |||
| 1376 | if (Kind != k_Register || Reg.Kind != RegKind::Scalar) | |||
| 1377 | return DiagnosticPredicateTy::NoMatch; | |||
| 1378 | ||||
| 1379 | if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && | |||
| 1380 | getShiftExtendAmount() == Log2_32(ExtWidth / 8)) | |||
| 1381 | return DiagnosticPredicateTy::Match; | |||
| 1382 | return DiagnosticPredicateTy::NearMatch; | |||
| 1383 | } | |||
| 1384 | ||||
| 1385 | /// Is this a vector list with the type implicit (presumably attached to the | |||
| 1386 | /// instruction itself)? | |||
| 1387 | template <RegKind VectorKind, unsigned NumRegs> | |||
| 1388 | bool isImplicitlyTypedVectorList() const { | |||
| 1389 | return Kind == k_VectorList && VectorList.Count == NumRegs && | |||
| 1390 | VectorList.NumElements == 0 && | |||
| 1391 | VectorList.RegisterKind == VectorKind; | |||
| 1392 | } | |||
| 1393 | ||||
| 1394 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, | |||
| 1395 | unsigned ElementWidth, unsigned Stride = 1> | |||
| 1396 | bool isTypedVectorList() const { | |||
| 1397 | if (Kind != k_VectorList) | |||
| 1398 | return false; | |||
| 1399 | if (VectorList.Count != NumRegs) | |||
| 1400 | return false; | |||
| 1401 | if (VectorList.RegisterKind != VectorKind) | |||
| 1402 | return false; | |||
| 1403 | if (VectorList.ElementWidth != ElementWidth) | |||
| 1404 | return false; | |||
| 1405 | if (VectorList.Stride != Stride) | |||
| 1406 | return false; | |||
| 1407 | return VectorList.NumElements == NumElements; | |||
| 1408 | } | |||
| 1409 | ||||
| 1410 | template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, | |||
| 1411 | unsigned ElementWidth> | |||
| 1412 | DiagnosticPredicate isTypedVectorListMultiple() const { | |||
| 1413 | bool Res = | |||
| 1414 | isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>(); | |||
| 1415 | if (!Res) | |||
| 1416 | return DiagnosticPredicateTy::NoMatch; | |||
| 1417 | if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0) | |||
| 1418 | return DiagnosticPredicateTy::NearMatch; | |||
| 1419 | return DiagnosticPredicateTy::Match; | |||
| 1420 | } | |||
| 1421 | ||||
| 1422 | template <RegKind VectorKind, unsigned NumRegs, unsigned Stride, | |||
| 1423 | unsigned ElementWidth> | |||
| 1424 | DiagnosticPredicate isTypedVectorListStrided() const { | |||
| 1425 | bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0, | |||
| 1426 | ElementWidth, Stride>(); | |||
| 1427 | if (!Res) | |||
| 1428 | return DiagnosticPredicateTy::NoMatch; | |||
| 1429 | if ((VectorList.RegNum < (AArch64::Z0 + Stride)) || | |||
| 1430 | ((VectorList.RegNum >= AArch64::Z16) && | |||
| 1431 | (VectorList.RegNum < (AArch64::Z16 + Stride)))) | |||
| 1432 | return DiagnosticPredicateTy::Match; | |||
| 1433 | return DiagnosticPredicateTy::NoMatch; | |||
| 1434 | } | |||
| 1435 | ||||
| 1436 | template <int Min, int Max> | |||
| 1437 | DiagnosticPredicate isVectorIndex() const { | |||
| 1438 | if (Kind != k_VectorIndex) | |||
| 1439 | return DiagnosticPredicateTy::NoMatch; | |||
| 1440 | if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) | |||
| 1441 | return DiagnosticPredicateTy::Match; | |||
| 1442 | return DiagnosticPredicateTy::NearMatch; | |||
| 1443 | } | |||
| 1444 | ||||
| 1445 | bool isToken() const override { return Kind == k_Token; } | |||
| 1446 | ||||
| 1447 | bool isTokenEqual(StringRef Str) const { | |||
| 1448 | return Kind == k_Token && getToken() == Str; | |||
| 1449 | } | |||
| 1450 | bool isSysCR() const { return Kind == k_SysCR; } | |||
| 1451 | bool isPrefetch() const { return Kind == k_Prefetch; } | |||
| 1452 | bool isPSBHint() const { return Kind == k_PSBHint; } | |||
| 1453 | bool isBTIHint() const { return Kind == k_BTIHint; } | |||
| 1454 | bool isShiftExtend() const { return Kind == k_ShiftExtend; } | |||
| 1455 | bool isShifter() const { | |||
| 1456 | if (!isShiftExtend()) | |||
| 1457 | return false; | |||
| 1458 | ||||
| 1459 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
| 1460 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
| 1461 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || | |||
| 1462 | ST == AArch64_AM::MSL); | |||
| 1463 | } | |||
| 1464 | ||||
| 1465 | template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { | |||
| 1466 | if (Kind != k_FPImm) | |||
| 1467 | return DiagnosticPredicateTy::NoMatch; | |||
| 1468 | ||||
| 1469 | if (getFPImmIsExact()) { | |||
| 1470 | // Lookup the immediate from table of supported immediates. | |||
| 1471 | auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum); | |||
| 1472 | assert(Desc && "Unknown enum value")(static_cast <bool> (Desc && "Unknown enum value" ) ? void (0) : __assert_fail ("Desc && \"Unknown enum value\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1472 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1473 | ||||
| 1474 | // Calculate its FP value. | |||
| 1475 | APFloat RealVal(APFloat::IEEEdouble()); | |||
| 1476 | auto StatusOrErr = | |||
| 1477 | RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero); | |||
| 1478 | if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK) | |||
| 1479 | llvm_unreachable("FP immediate is not exact")::llvm::llvm_unreachable_internal("FP immediate is not exact" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1479 ); | |||
| 1480 | ||||
| 1481 | if (getFPImm().bitwiseIsEqual(RealVal)) | |||
| 1482 | return DiagnosticPredicateTy::Match; | |||
| 1483 | } | |||
| 1484 | ||||
| 1485 | return DiagnosticPredicateTy::NearMatch; | |||
| 1486 | } | |||
| 1487 | ||||
| 1488 | template <unsigned ImmA, unsigned ImmB> | |||
| 1489 | DiagnosticPredicate isExactFPImm() const { | |||
| 1490 | DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; | |||
| 1491 | if ((Res = isExactFPImm<ImmA>())) | |||
| 1492 | return DiagnosticPredicateTy::Match; | |||
| 1493 | if ((Res = isExactFPImm<ImmB>())) | |||
| 1494 | return DiagnosticPredicateTy::Match; | |||
| 1495 | return Res; | |||
| 1496 | } | |||
| 1497 | ||||
| 1498 | bool isExtend() const { | |||
| 1499 | if (!isShiftExtend()) | |||
| 1500 | return false; | |||
| 1501 | ||||
| 1502 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 1503 | return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || | |||
| 1504 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || | |||
| 1505 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || | |||
| 1506 | ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || | |||
| 1507 | ET == AArch64_AM::LSL) && | |||
| 1508 | getShiftExtendAmount() <= 4; | |||
| 1509 | } | |||
| 1510 | ||||
| 1511 | bool isExtend64() const { | |||
| 1512 | if (!isExtend()) | |||
| 1513 | return false; | |||
| 1514 | // Make sure the extend expects a 32-bit source register. | |||
| 1515 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 1516 | return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || | |||
| 1517 | ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || | |||
| 1518 | ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; | |||
| 1519 | } | |||
| 1520 | ||||
| 1521 | bool isExtendLSL64() const { | |||
| 1522 | if (!isExtend()) | |||
| 1523 | return false; | |||
| 1524 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 1525 | return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || | |||
| 1526 | ET == AArch64_AM::LSL) && | |||
| 1527 | getShiftExtendAmount() <= 4; | |||
| 1528 | } | |||
| 1529 | ||||
| 1530 | template<int Width> bool isMemXExtend() const { | |||
| 1531 | if (!isExtend()) | |||
| 1532 | return false; | |||
| 1533 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 1534 | return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && | |||
| 1535 | (getShiftExtendAmount() == Log2_32(Width / 8) || | |||
| 1536 | getShiftExtendAmount() == 0); | |||
| 1537 | } | |||
| 1538 | ||||
| 1539 | template<int Width> bool isMemWExtend() const { | |||
| 1540 | if (!isExtend()) | |||
| 1541 | return false; | |||
| 1542 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 1543 | return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && | |||
| 1544 | (getShiftExtendAmount() == Log2_32(Width / 8) || | |||
| 1545 | getShiftExtendAmount() == 0); | |||
| 1546 | } | |||
| 1547 | ||||
| 1548 | template <unsigned width> | |||
| 1549 | bool isArithmeticShifter() const { | |||
| 1550 | if (!isShifter()) | |||
| 1551 | return false; | |||
| 1552 | ||||
| 1553 | // An arithmetic shifter is LSL, LSR, or ASR. | |||
| 1554 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
| 1555 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
| 1556 | ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; | |||
| 1557 | } | |||
| 1558 | ||||
| 1559 | template <unsigned width> | |||
| 1560 | bool isLogicalShifter() const { | |||
| 1561 | if (!isShifter()) | |||
| 1562 | return false; | |||
| 1563 | ||||
| 1564 | // A logical shifter is LSL, LSR, ASR or ROR. | |||
| 1565 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
| 1566 | return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || | |||
| 1567 | ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && | |||
| 1568 | getShiftExtendAmount() < width; | |||
| 1569 | } | |||
| 1570 | ||||
| 1571 | bool isMovImm32Shifter() const { | |||
| 1572 | if (!isShifter()) | |||
| 1573 | return false; | |||
| 1574 | ||||
| 1575 | // A MOVi shifter is LSL of 0, 16, 32, or 48. | |||
| 1576 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
| 1577 | if (ST != AArch64_AM::LSL) | |||
| 1578 | return false; | |||
| 1579 | uint64_t Val = getShiftExtendAmount(); | |||
| 1580 | return (Val == 0 || Val == 16); | |||
| 1581 | } | |||
| 1582 | ||||
| 1583 | bool isMovImm64Shifter() const { | |||
| 1584 | if (!isShifter()) | |||
| 1585 | return false; | |||
| 1586 | ||||
| 1587 | // A MOVi shifter is LSL of 0 or 16. | |||
| 1588 | AArch64_AM::ShiftExtendType ST = getShiftExtendType(); | |||
| 1589 | if (ST != AArch64_AM::LSL) | |||
| 1590 | return false; | |||
| 1591 | uint64_t Val = getShiftExtendAmount(); | |||
| 1592 | return (Val == 0 || Val == 16 || Val == 32 || Val == 48); | |||
| 1593 | } | |||
| 1594 | ||||
| 1595 | bool isLogicalVecShifter() const { | |||
| 1596 | if (!isShifter()) | |||
| 1597 | return false; | |||
| 1598 | ||||
| 1599 | // A logical vector shifter is a left shift by 0, 8, 16, or 24. | |||
| 1600 | unsigned Shift = getShiftExtendAmount(); | |||
| 1601 | return getShiftExtendType() == AArch64_AM::LSL && | |||
| 1602 | (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); | |||
| 1603 | } | |||
| 1604 | ||||
| 1605 | bool isLogicalVecHalfWordShifter() const { | |||
| 1606 | if (!isLogicalVecShifter()) | |||
| 1607 | return false; | |||
| 1608 | ||||
| 1609 | // A logical vector shifter is a left shift by 0 or 8. | |||
| 1610 | unsigned Shift = getShiftExtendAmount(); | |||
| 1611 | return getShiftExtendType() == AArch64_AM::LSL && | |||
| 1612 | (Shift == 0 || Shift == 8); | |||
| 1613 | } | |||
| 1614 | ||||
| 1615 | bool isMoveVecShifter() const { | |||
| 1616 | if (!isShiftExtend()) | |||
| 1617 | return false; | |||
| 1618 | ||||
| 1619 | // A logical vector shifter is a left shift by 8 or 16. | |||
| 1620 | unsigned Shift = getShiftExtendAmount(); | |||
| 1621 | return getShiftExtendType() == AArch64_AM::MSL && | |||
| 1622 | (Shift == 8 || Shift == 16); | |||
| 1623 | } | |||
| 1624 | ||||
| 1625 | // Fallback unscaled operands are for aliases of LDR/STR that fall back | |||
| 1626 | // to LDUR/STUR when the offset is not legal for the former but is for | |||
| 1627 | // the latter. As such, in addition to checking for being a legal unscaled | |||
| 1628 | // address, also check that it is not a legal scaled address. This avoids | |||
| 1629 | // ambiguity in the matcher. | |||
| 1630 | template<int Width> | |||
| 1631 | bool isSImm9OffsetFB() const { | |||
| 1632 | return isSImm<9>() && !isUImm12Offset<Width / 8>(); | |||
| 1633 | } | |||
| 1634 | ||||
| 1635 | bool isAdrpLabel() const { | |||
| 1636 | // Validation was handled during parsing, so we just verify that | |||
| 1637 | // something didn't go haywire. | |||
| 1638 | if (!isImm()) | |||
| 1639 | return false; | |||
| 1640 | ||||
| 1641 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
| 1642 | int64_t Val = CE->getValue(); | |||
| 1643 | int64_t Min = - (4096 * (1LL << (21 - 1))); | |||
| 1644 | int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); | |||
| 1645 | return (Val % 4096) == 0 && Val >= Min && Val <= Max; | |||
| 1646 | } | |||
| 1647 | ||||
| 1648 | return true; | |||
| 1649 | } | |||
| 1650 | ||||
| 1651 | bool isAdrLabel() const { | |||
| 1652 | // Validation was handled during parsing, so we just verify that | |||
| 1653 | // something didn't go haywire. | |||
| 1654 | if (!isImm()) | |||
| 1655 | return false; | |||
| 1656 | ||||
| 1657 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { | |||
| 1658 | int64_t Val = CE->getValue(); | |||
| 1659 | int64_t Min = - (1LL << (21 - 1)); | |||
| 1660 | int64_t Max = ((1LL << (21 - 1)) - 1); | |||
| 1661 | return Val >= Min && Val <= Max; | |||
| 1662 | } | |||
| 1663 | ||||
| 1664 | return true; | |||
| 1665 | } | |||
| 1666 | ||||
| 1667 | template <MatrixKind Kind, unsigned EltSize, unsigned RegClass> | |||
| 1668 | DiagnosticPredicate isMatrixRegOperand() const { | |||
| 1669 | if (!isMatrix()) | |||
| 1670 | return DiagnosticPredicateTy::NoMatch; | |||
| 1671 | if (getMatrixKind() != Kind || | |||
| 1672 | !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) || | |||
| 1673 | EltSize != getMatrixElementWidth()) | |||
| 1674 | return DiagnosticPredicateTy::NearMatch; | |||
| 1675 | return DiagnosticPredicateTy::Match; | |||
| 1676 | } | |||
| 1677 | ||||
| 1678 | void addExpr(MCInst &Inst, const MCExpr *Expr) const { | |||
| 1679 | // Add as immediates when possible. Null MCExpr = 0. | |||
| 1680 | if (!Expr) | |||
| 1681 | Inst.addOperand(MCOperand::createImm(0)); | |||
| 1682 | else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) | |||
| 1683 | Inst.addOperand(MCOperand::createImm(CE->getValue())); | |||
| 1684 | else | |||
| 1685 | Inst.addOperand(MCOperand::createExpr(Expr)); | |||
| 1686 | } | |||
| 1687 | ||||
| 1688 | void addRegOperands(MCInst &Inst, unsigned N) const { | |||
| 1689 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1689 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1690 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
| 1691 | } | |||
| 1692 | ||||
| 1693 | void addMatrixOperands(MCInst &Inst, unsigned N) const { | |||
| 1694 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1694 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1695 | Inst.addOperand(MCOperand::createReg(getMatrixReg())); | |||
| 1696 | } | |||
| 1697 | ||||
| 1698 | void addGPR32as64Operands(MCInst &Inst, unsigned N) const { | |||
| 1699 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1699 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1700 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1701 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1701 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR64RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1701 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1702 | ||||
| 1703 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
| 1704 | uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister( | |||
| 1705 | RI->getEncodingValue(getReg())); | |||
| 1706 | ||||
| 1707 | Inst.addOperand(MCOperand::createReg(Reg)); | |||
| 1708 | } | |||
| 1709 | ||||
| 1710 | void addGPR64as32Operands(MCInst &Inst, unsigned N) const { | |||
| 1711 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1711 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1712 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1713 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1713 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: GPR32RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1713 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1714 | ||||
| 1715 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
| 1716 | uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( | |||
| 1717 | RI->getEncodingValue(getReg())); | |||
| 1718 | ||||
| 1719 | Inst.addOperand(MCOperand::createReg(Reg)); | |||
| 1720 | } | |||
| 1721 | ||||
| 1722 | template <int Width> | |||
| 1723 | void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { | |||
| 1724 | unsigned Base; | |||
| 1725 | switch (Width) { | |||
| 1726 | case 8: Base = AArch64::B0; break; | |||
| 1727 | case 16: Base = AArch64::H0; break; | |||
| 1728 | case 32: Base = AArch64::S0; break; | |||
| 1729 | case 64: Base = AArch64::D0; break; | |||
| 1730 | case 128: Base = AArch64::Q0; break; | |||
| 1731 | default: | |||
| 1732 | llvm_unreachable("Unsupported width")::llvm::llvm_unreachable_internal("Unsupported width", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 1732); | |||
| 1733 | } | |||
| 1734 | Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base)); | |||
| 1735 | } | |||
| 1736 | ||||
| 1737 | void addVectorReg64Operands(MCInst &Inst, unsigned N) const { | |||
| 1738 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1738 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1739 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1740 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1740 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1740 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1741 | Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0)); | |||
| 1742 | } | |||
| 1743 | ||||
| 1744 | void addVectorReg128Operands(MCInst &Inst, unsigned N) const { | |||
| 1745 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1745 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1746 | assert((static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1747 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1747 | AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()))(static_cast <bool> (AArch64MCRegisterClasses[AArch64:: FPR128RegClassID].contains(getReg())) ? void (0) : __assert_fail ("AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1747 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1748 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
| 1749 | } | |||
| 1750 | ||||
| 1751 | void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { | |||
| 1752 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1752 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1753 | Inst.addOperand(MCOperand::createReg(getReg())); | |||
| 1754 | } | |||
| 1755 | ||||
| 1756 | enum VecListIndexType { | |||
| 1757 | VecListIdx_DReg = 0, | |||
| 1758 | VecListIdx_QReg = 1, | |||
| 1759 | VecListIdx_ZReg = 2, | |||
| 1760 | VecListIdx_PReg = 3, | |||
| 1761 | }; | |||
| 1762 | ||||
| 1763 | template <VecListIndexType RegTy, unsigned NumRegs> | |||
| 1764 | void addVectorListOperands(MCInst &Inst, unsigned N) const { | |||
| 1765 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1765 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1766 | static const unsigned FirstRegs[][5] = { | |||
| 1767 | /* DReg */ { AArch64::Q0, | |||
| 1768 | AArch64::D0, AArch64::D0_D1, | |||
| 1769 | AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, | |||
| 1770 | /* QReg */ { AArch64::Q0, | |||
| 1771 | AArch64::Q0, AArch64::Q0_Q1, | |||
| 1772 | AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, | |||
| 1773 | /* ZReg */ { AArch64::Z0, | |||
| 1774 | AArch64::Z0, AArch64::Z0_Z1, | |||
| 1775 | AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }, | |||
| 1776 | /* PReg */ { AArch64::P0, | |||
| 1777 | AArch64::P0, AArch64::P0_P1 } | |||
| 1778 | }; | |||
| 1779 | ||||
| 1780 | assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1781 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1781 | " NumRegs must be <= 4 for ZRegs")(static_cast <bool> ((RegTy != VecListIdx_ZReg || NumRegs <= 4) && " NumRegs must be <= 4 for ZRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_ZReg || NumRegs <= 4) && \" NumRegs must be <= 4 for ZRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1781 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1782 | ||||
| 1783 | assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs <= 2) && " NumRegs must be <= 2 for PRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1784 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1784 | " NumRegs must be <= 2 for PRegs")(static_cast <bool> ((RegTy != VecListIdx_PReg || NumRegs <= 2) && " NumRegs must be <= 2 for PRegs") ? void (0) : __assert_fail ("(RegTy != VecListIdx_PReg || NumRegs <= 2) && \" NumRegs must be <= 2 for PRegs\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1784 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1785 | ||||
| 1786 | unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; | |||
| 1787 | Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() - | |||
| 1788 | FirstRegs[(unsigned)RegTy][0])); | |||
| 1789 | } | |||
| 1790 | ||||
| 1791 | template <unsigned NumRegs> | |||
| 1792 | void addStridedVectorListOperands(MCInst &Inst, unsigned N) const { | |||
| 1793 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1793 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1794 | assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4")(static_cast <bool> ((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4") ? void (0) : __assert_fail ("(NumRegs == 2 || NumRegs == 4) && \" NumRegs must be 2 or 4\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1794 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1795 | ||||
| 1796 | switch (NumRegs) { | |||
| 1797 | case 2: | |||
| 1798 | if (getVectorListStart() < AArch64::Z16) { | |||
| 1799 | assert((getVectorListStart() < AArch64::Z8) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z8) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z8) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1800 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1800 | (getVectorListStart() >= AArch64::Z0) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z8) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z8) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1800 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1801 | Inst.addOperand(MCOperand::createReg( | |||
| 1802 | AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0)); | |||
| 1803 | } else { | |||
| 1804 | assert((getVectorListStart() < AArch64::Z24) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z24) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z24) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1805 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1805 | (getVectorListStart() >= AArch64::Z16) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z24) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z24) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1805 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1806 | Inst.addOperand(MCOperand::createReg( | |||
| 1807 | AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16)); | |||
| 1808 | } | |||
| 1809 | break; | |||
| 1810 | case 4: | |||
| 1811 | if (getVectorListStart() < AArch64::Z16) { | |||
| 1812 | assert((getVectorListStart() < AArch64::Z4) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z4) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z4) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1813 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1813 | (getVectorListStart() >= AArch64::Z0) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z4) && (getVectorListStart() >= AArch64::Z0) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z4) && (getVectorListStart() >= AArch64::Z0) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1813 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1814 | Inst.addOperand(MCOperand::createReg( | |||
| 1815 | AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0)); | |||
| 1816 | } else { | |||
| 1817 | assert((getVectorListStart() < AArch64::Z20) &&(static_cast <bool> ((getVectorListStart() < AArch64 ::Z20) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z20) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1818 , __extension__ __PRETTY_FUNCTION__)) | |||
| 1818 | (getVectorListStart() >= AArch64::Z16) && "Invalid Register")(static_cast <bool> ((getVectorListStart() < AArch64 ::Z20) && (getVectorListStart() >= AArch64::Z16) && "Invalid Register") ? void (0) : __assert_fail ("(getVectorListStart() < AArch64::Z20) && (getVectorListStart() >= AArch64::Z16) && \"Invalid Register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1818 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1819 | Inst.addOperand(MCOperand::createReg( | |||
| 1820 | AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16)); | |||
| 1821 | } | |||
| 1822 | break; | |||
| 1823 | default: | |||
| 1824 | llvm_unreachable("Unsupported number of registers for strided vec list")::llvm::llvm_unreachable_internal("Unsupported number of registers for strided vec list" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1824 ); | |||
| 1825 | } | |||
| 1826 | } | |||
| 1827 | ||||
| 1828 | void addMatrixTileListOperands(MCInst &Inst, unsigned N) const { | |||
| 1829 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1829 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1830 | unsigned RegMask = getMatrixTileListRegMask(); | |||
| 1831 | assert(RegMask <= 0xFF && "Invalid mask!")(static_cast <bool> (RegMask <= 0xFF && "Invalid mask!" ) ? void (0) : __assert_fail ("RegMask <= 0xFF && \"Invalid mask!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1831 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1832 | Inst.addOperand(MCOperand::createImm(RegMask)); | |||
| 1833 | } | |||
| 1834 | ||||
| 1835 | void addVectorIndexOperands(MCInst &Inst, unsigned N) const { | |||
| 1836 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1836 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1837 | Inst.addOperand(MCOperand::createImm(getVectorIndex())); | |||
| 1838 | } | |||
| 1839 | ||||
| 1840 | template <unsigned ImmIs0, unsigned ImmIs1> | |||
| 1841 | void addExactFPImmOperands(MCInst &Inst, unsigned N) const { | |||
| 1842 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1842 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1843 | assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand")(static_cast <bool> (bool(isExactFPImm<ImmIs0, ImmIs1 >()) && "Invalid operand") ? void (0) : __assert_fail ("bool(isExactFPImm<ImmIs0, ImmIs1>()) && \"Invalid operand\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1843 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1844 | Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>()))); | |||
| 1845 | } | |||
| 1846 | ||||
| 1847 | void addImmOperands(MCInst &Inst, unsigned N) const { | |||
| 1848 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1848 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1849 | // If this is a pageoff symrefexpr with an addend, adjust the addend | |||
| 1850 | // to be only the page-offset portion. Otherwise, just add the expr | |||
| 1851 | // as-is. | |||
| 1852 | addExpr(Inst, getImm()); | |||
| 1853 | } | |||
| 1854 | ||||
| 1855 | template <int Shift> | |||
| 1856 | void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { | |||
| 1857 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1857 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1858 | if (auto ShiftedVal = getShiftedVal<Shift>()) { | |||
| 1859 | Inst.addOperand(MCOperand::createImm(ShiftedVal->first)); | |||
| 1860 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); | |||
| 1861 | } else if (isShiftedImm()) { | |||
| 1862 | addExpr(Inst, getShiftedImmVal()); | |||
| 1863 | Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); | |||
| 1864 | } else { | |||
| 1865 | addExpr(Inst, getImm()); | |||
| 1866 | Inst.addOperand(MCOperand::createImm(0)); | |||
| 1867 | } | |||
| 1868 | } | |||
| 1869 | ||||
| 1870 | template <int Shift> | |||
| 1871 | void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { | |||
| 1872 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1872 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1873 | if (auto ShiftedVal = getShiftedVal<Shift>()) { | |||
| 1874 | Inst.addOperand(MCOperand::createImm(-ShiftedVal->first)); | |||
| 1875 | Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); | |||
| 1876 | } else | |||
| 1877 | llvm_unreachable("Not a shifted negative immediate")::llvm::llvm_unreachable_internal("Not a shifted negative immediate" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1877 ); | |||
| 1878 | } | |||
| 1879 | ||||
| 1880 | void addCondCodeOperands(MCInst &Inst, unsigned N) const { | |||
| 1881 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1881 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1882 | Inst.addOperand(MCOperand::createImm(getCondCode())); | |||
| 1883 | } | |||
| 1884 | ||||
| 1885 | void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { | |||
| 1886 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1886 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1887 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1888 | if (!MCE) | |||
| 1889 | addExpr(Inst, getImm()); | |||
| 1890 | else | |||
| 1891 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12)); | |||
| 1892 | } | |||
| 1893 | ||||
| 1894 | void addAdrLabelOperands(MCInst &Inst, unsigned N) const { | |||
| 1895 | addImmOperands(Inst, N); | |||
| 1896 | } | |||
| 1897 | ||||
| 1898 | template<int Scale> | |||
| 1899 | void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { | |||
| 1900 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1900 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1901 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1902 | ||||
| 1903 | if (!MCE) { | |||
| 1904 | Inst.addOperand(MCOperand::createExpr(getImm())); | |||
| 1905 | return; | |||
| 1906 | } | |||
| 1907 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); | |||
| 1908 | } | |||
| 1909 | ||||
| 1910 | void addUImm6Operands(MCInst &Inst, unsigned N) const { | |||
| 1911 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1911 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1912 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 1913 | Inst.addOperand(MCOperand::createImm(MCE->getValue())); | |||
| 1914 | } | |||
| 1915 | ||||
| 1916 | template <int Scale> | |||
| 1917 | void addImmScaledOperands(MCInst &Inst, unsigned N) const { | |||
| 1918 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1918 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1919 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 1920 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); | |||
| 1921 | } | |||
| 1922 | ||||
| 1923 | template <int Scale> | |||
| 1924 | void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const { | |||
| 1925 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1925 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1926 | Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale)); | |||
| 1927 | } | |||
| 1928 | ||||
| 1929 | template <typename T> | |||
| 1930 | void addLogicalImmOperands(MCInst &Inst, unsigned N) const { | |||
| 1931 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1931 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1932 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 1933 | std::make_unsigned_t<T> Val = MCE->getValue(); | |||
| 1934 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); | |||
| 1935 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
| 1936 | } | |||
| 1937 | ||||
| 1938 | template <typename T> | |||
| 1939 | void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { | |||
| 1940 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1940 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1941 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 1942 | std::make_unsigned_t<T> Val = ~MCE->getValue(); | |||
| 1943 | uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); | |||
| 1944 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
| 1945 | } | |||
| 1946 | ||||
| 1947 | void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { | |||
| 1948 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1948 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1949 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 1950 | uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); | |||
| 1951 | Inst.addOperand(MCOperand::createImm(encoding)); | |||
| 1952 | } | |||
| 1953 | ||||
| 1954 | void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { | |||
| 1955 | // Branch operands don't encode the low bits, so shift them off | |||
| 1956 | // here. If it's a label, however, just put it on directly as there's | |||
| 1957 | // not enough information now to do anything. | |||
| 1958 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1958 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1959 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1960 | if (!MCE) { | |||
| 1961 | addExpr(Inst, getImm()); | |||
| 1962 | return; | |||
| 1963 | } | |||
| 1964 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1964 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1965 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
| 1966 | } | |||
| 1967 | ||||
| 1968 | void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { | |||
| 1969 | // Branch operands don't encode the low bits, so shift them off | |||
| 1970 | // here. If it's a label, however, just put it on directly as there's | |||
| 1971 | // not enough information now to do anything. | |||
| 1972 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1972 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1973 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1974 | if (!MCE) { | |||
| 1975 | addExpr(Inst, getImm()); | |||
| 1976 | return; | |||
| 1977 | } | |||
| 1978 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1978 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1979 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
| 1980 | } | |||
| 1981 | ||||
| 1982 | void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { | |||
| 1983 | // Branch operands don't encode the low bits, so shift them off | |||
| 1984 | // here. If it's a label, however, just put it on directly as there's | |||
| 1985 | // not enough information now to do anything. | |||
| 1986 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1986 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1987 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 1988 | if (!MCE) { | |||
| 1989 | addExpr(Inst, getImm()); | |||
| 1990 | return; | |||
| 1991 | } | |||
| 1992 | assert(MCE && "Invalid constant immediate operand!")(static_cast <bool> (MCE && "Invalid constant immediate operand!" ) ? void (0) : __assert_fail ("MCE && \"Invalid constant immediate operand!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1992 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1993 | Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); | |||
| 1994 | } | |||
| 1995 | ||||
| 1996 | void addFPImmOperands(MCInst &Inst, unsigned N) const { | |||
| 1997 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 1997 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1998 | Inst.addOperand(MCOperand::createImm( | |||
| 1999 | AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()))); | |||
| 2000 | } | |||
| 2001 | ||||
| 2002 | void addBarrierOperands(MCInst &Inst, unsigned N) const { | |||
| 2003 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2003 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2004 | Inst.addOperand(MCOperand::createImm(getBarrier())); | |||
| 2005 | } | |||
| 2006 | ||||
| 2007 | void addBarriernXSOperands(MCInst &Inst, unsigned N) const { | |||
| 2008 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2008 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2009 | Inst.addOperand(MCOperand::createImm(getBarrier())); | |||
| 2010 | } | |||
| 2011 | ||||
| 2012 | void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { | |||
| 2013 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2013 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2014 | ||||
| 2015 | Inst.addOperand(MCOperand::createImm(SysReg.MRSReg)); | |||
| 2016 | } | |||
| 2017 | ||||
| 2018 | void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { | |||
| 2019 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2019 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2020 | ||||
| 2021 | Inst.addOperand(MCOperand::createImm(SysReg.MSRReg)); | |||
| 2022 | } | |||
| 2023 | ||||
| 2024 | void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { | |||
| 2025 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2025 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2026 | ||||
| 2027 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); | |||
| 2028 | } | |||
| 2029 | ||||
| 2030 | void addSVCROperands(MCInst &Inst, unsigned N) const { | |||
| 2031 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2031 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2032 | ||||
| 2033 | Inst.addOperand(MCOperand::createImm(SVCR.PStateField)); | |||
| 2034 | } | |||
| 2035 | ||||
| 2036 | void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { | |||
| 2037 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2037 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2038 | ||||
| 2039 | Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); | |||
| 2040 | } | |||
| 2041 | ||||
| 2042 | void addSysCROperands(MCInst &Inst, unsigned N) const { | |||
| 2043 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2043 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2044 | Inst.addOperand(MCOperand::createImm(getSysCR())); | |||
| 2045 | } | |||
| 2046 | ||||
| 2047 | void addPrefetchOperands(MCInst &Inst, unsigned N) const { | |||
| 2048 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2048 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2049 | Inst.addOperand(MCOperand::createImm(getPrefetch())); | |||
| 2050 | } | |||
| 2051 | ||||
| 2052 | void addPSBHintOperands(MCInst &Inst, unsigned N) const { | |||
| 2053 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2053 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2054 | Inst.addOperand(MCOperand::createImm(getPSBHint())); | |||
| 2055 | } | |||
| 2056 | ||||
| 2057 | void addBTIHintOperands(MCInst &Inst, unsigned N) const { | |||
| 2058 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2058 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2059 | Inst.addOperand(MCOperand::createImm(getBTIHint())); | |||
| 2060 | } | |||
| 2061 | ||||
| 2062 | void addShifterOperands(MCInst &Inst, unsigned N) const { | |||
| 2063 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2063 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2064 | unsigned Imm = | |||
| 2065 | AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount()); | |||
| 2066 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
| 2067 | } | |||
| 2068 | ||||
| 2069 | void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const { | |||
| 2070 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2070 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2071 | ||||
| 2072 | if (!isScalarReg()) | |||
| 2073 | return; | |||
| 2074 | ||||
| 2075 | const MCRegisterInfo *RI = Ctx.getRegisterInfo(); | |||
| 2076 | uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID) | |||
| 2077 | .getRegister(RI->getEncodingValue(getReg())); | |||
| 2078 | if (Reg != AArch64::XZR) | |||
| 2079 | llvm_unreachable("wrong register")::llvm::llvm_unreachable_internal("wrong register", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2079); | |||
| 2080 | ||||
| 2081 | Inst.addOperand(MCOperand::createReg(AArch64::XZR)); | |||
| 2082 | } | |||
| 2083 | ||||
| 2084 | void addExtendOperands(MCInst &Inst, unsigned N) const { | |||
| 2085 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2085 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2086 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 2087 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; | |||
| 2088 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); | |||
| 2089 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
| 2090 | } | |||
| 2091 | ||||
| 2092 | void addExtend64Operands(MCInst &Inst, unsigned N) const { | |||
| 2093 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2093 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2094 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 2095 | if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; | |||
| 2096 | unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); | |||
| 2097 | Inst.addOperand(MCOperand::createImm(Imm)); | |||
| 2098 | } | |||
| 2099 | ||||
| 2100 | void addMemExtendOperands(MCInst &Inst, unsigned N) const { | |||
| 2101 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2101 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2102 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 2103 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; | |||
| 2104 | Inst.addOperand(MCOperand::createImm(IsSigned)); | |||
| 2105 | Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0)); | |||
| 2106 | } | |||
| 2107 | ||||
| 2108 | // For 8-bit load/store instructions with a register offset, both the | |||
| 2109 | // "DoShift" and "NoShift" variants have a shift of 0. Because of this, | |||
| 2110 | // they're disambiguated by whether the shift was explicit or implicit rather | |||
| 2111 | // than its size. | |||
| 2112 | void addMemExtend8Operands(MCInst &Inst, unsigned N) const { | |||
| 2113 | assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2113 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2114 | AArch64_AM::ShiftExtendType ET = getShiftExtendType(); | |||
| 2115 | bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; | |||
| 2116 | Inst.addOperand(MCOperand::createImm(IsSigned)); | |||
| 2117 | Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount())); | |||
| 2118 | } | |||
| 2119 | ||||
| 2120 | template<int Shift> | |||
| 2121 | void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { | |||
| 2122 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2122 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2123 | ||||
| 2124 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); | |||
| 2125 | if (CE) { | |||
| 2126 | uint64_t Value = CE->getValue(); | |||
| 2127 | Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); | |||
| 2128 | } else { | |||
| 2129 | addExpr(Inst, getImm()); | |||
| 2130 | } | |||
| 2131 | } | |||
| 2132 | ||||
| 2133 | template<int Shift> | |||
| 2134 | void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { | |||
| 2135 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2135 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2136 | ||||
| 2137 | const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); | |||
| 2138 | uint64_t Value = CE->getValue(); | |||
| 2139 | Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); | |||
| 2140 | } | |||
| 2141 | ||||
| 2142 | void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { | |||
| 2143 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2143 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2144 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 2145 | Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90)); | |||
| 2146 | } | |||
| 2147 | ||||
| 2148 | void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { | |||
| 2149 | assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!" ) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2149 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2150 | const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); | |||
| 2151 | Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180)); | |||
| 2152 | } | |||
| 2153 | ||||
| 2154 | void print(raw_ostream &OS) const override; | |||
| 2155 | ||||
| 2156 | static std::unique_ptr<AArch64Operand> | |||
| 2157 | CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) { | |||
| 2158 | auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx); | |||
| 2159 | Op->Tok.Data = Str.data(); | |||
| 2160 | Op->Tok.Length = Str.size(); | |||
| 2161 | Op->Tok.IsSuffix = IsSuffix; | |||
| 2162 | Op->StartLoc = S; | |||
| 2163 | Op->EndLoc = S; | |||
| 2164 | return Op; | |||
| 2165 | } | |||
| 2166 | ||||
| 2167 | static std::unique_ptr<AArch64Operand> | |||
| 2168 | CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, | |||
| 2169 | RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, | |||
| 2170 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, | |||
| 2171 | unsigned ShiftAmount = 0, | |||
| 2172 | unsigned HasExplicitAmount = false) { | |||
| 2173 | auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx); | |||
| 2174 | Op->Reg.RegNum = RegNum; | |||
| 2175 | Op->Reg.Kind = Kind; | |||
| 2176 | Op->Reg.ElementWidth = 0; | |||
| 2177 | Op->Reg.EqualityTy = EqTy; | |||
| 2178 | Op->Reg.ShiftExtend.Type = ExtTy; | |||
| 2179 | Op->Reg.ShiftExtend.Amount = ShiftAmount; | |||
| 2180 | Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; | |||
| 2181 | Op->StartLoc = S; | |||
| 2182 | Op->EndLoc = E; | |||
| 2183 | return Op; | |||
| 2184 | } | |||
| 2185 | ||||
| 2186 | static std::unique_ptr<AArch64Operand> | |||
| 2187 | CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, | |||
| 2188 | SMLoc S, SMLoc E, MCContext &Ctx, | |||
| 2189 | AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, | |||
| 2190 | unsigned ShiftAmount = 0, | |||
| 2191 | unsigned HasExplicitAmount = false) { | |||
| 2192 | assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)) | |||
| 2193 | Kind == RegKind::SVEPredicateVector ||(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)) | |||
| 2194 | Kind == RegKind::SVEPredicateAsCounter) &&(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)) | |||
| 2195 | "Invalid vector kind")(static_cast <bool> ((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && "Invalid vector kind" ) ? void (0) : __assert_fail ("(Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || Kind == RegKind::SVEPredicateVector || Kind == RegKind::SVEPredicateAsCounter) && \"Invalid vector kind\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2195 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2196 | auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, | |||
| 2197 | HasExplicitAmount); | |||
| 2198 | Op->Reg.ElementWidth = ElementWidth; | |||
| 2199 | return Op; | |||
| 2200 | } | |||
| 2201 | ||||
| 2202 | static std::unique_ptr<AArch64Operand> | |||
| 2203 | CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride, | |||
| 2204 | unsigned NumElements, unsigned ElementWidth, | |||
| 2205 | RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
| 2206 | auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx); | |||
| 2207 | Op->VectorList.RegNum = RegNum; | |||
| 2208 | Op->VectorList.Count = Count; | |||
| 2209 | Op->VectorList.Stride = Stride; | |||
| 2210 | Op->VectorList.NumElements = NumElements; | |||
| 2211 | Op->VectorList.ElementWidth = ElementWidth; | |||
| 2212 | Op->VectorList.RegisterKind = RegisterKind; | |||
| 2213 | Op->StartLoc = S; | |||
| 2214 | Op->EndLoc = E; | |||
| 2215 | return Op; | |||
| 2216 | } | |||
| 2217 | ||||
| 2218 | static std::unique_ptr<AArch64Operand> | |||
| 2219 | CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
| 2220 | auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx); | |||
| 2221 | Op->VectorIndex.Val = Idx; | |||
| 2222 | Op->StartLoc = S; | |||
| 2223 | Op->EndLoc = E; | |||
| 2224 | return Op; | |||
| 2225 | } | |||
| 2226 | ||||
| 2227 | static std::unique_ptr<AArch64Operand> | |||
| 2228 | CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
| 2229 | auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx); | |||
| 2230 | Op->MatrixTileList.RegMask = RegMask; | |||
| 2231 | Op->StartLoc = S; | |||
| 2232 | Op->EndLoc = E; | |||
| 2233 | return Op; | |||
| 2234 | } | |||
| 2235 | ||||
| 2236 | static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs, | |||
| 2237 | const unsigned ElementWidth) { | |||
| 2238 | static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>> | |||
| 2239 | RegMap = { | |||
| 2240 | {{0, AArch64::ZAB0}, | |||
| 2241 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, | |||
| 2242 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, | |||
| 2243 | {{8, AArch64::ZAB0}, | |||
| 2244 | {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3, | |||
| 2245 | AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}}, | |||
| 2246 | {{16, AArch64::ZAH0}, | |||
| 2247 | {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}}, | |||
| 2248 | {{16, AArch64::ZAH1}, | |||
| 2249 | {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}}, | |||
| 2250 | {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}}, | |||
| 2251 | {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}}, | |||
| 2252 | {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}}, | |||
| 2253 | {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}}, | |||
| 2254 | }; | |||
| 2255 | ||||
| 2256 | if (ElementWidth == 64) | |||
| 2257 | OutRegs.insert(Reg); | |||
| 2258 | else { | |||
| 2259 | std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)]; | |||
| 2260 | assert(!Regs.empty() && "Invalid tile or element width!")(static_cast <bool> (!Regs.empty() && "Invalid tile or element width!" ) ? void (0) : __assert_fail ("!Regs.empty() && \"Invalid tile or element width!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 2260 , __extension__ __PRETTY_FUNCTION__)); | |||
| 2261 | for (auto OutReg : Regs) | |||
| 2262 | OutRegs.insert(OutReg); | |||
| 2263 | } | |||
| 2264 | } | |||
| 2265 | ||||
| 2266 | static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, | |||
| 2267 | SMLoc E, MCContext &Ctx) { | |||
| 2268 | auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx); | |||
| 2269 | Op->Imm.Val = Val; | |||
| 2270 | Op->StartLoc = S; | |||
| 2271 | Op->EndLoc = E; | |||
| 2272 | return Op; | |||
| 2273 | } | |||
| 2274 | ||||
| 2275 | static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, | |||
| 2276 | unsigned ShiftAmount, | |||
| 2277 | SMLoc S, SMLoc E, | |||
| 2278 | MCContext &Ctx) { | |||
| 2279 | auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx); | |||
| 2280 | Op->ShiftedImm .Val = Val; | |||
| 2281 | Op->ShiftedImm.ShiftAmount = ShiftAmount; | |||
| 2282 | Op->StartLoc = S; | |||
| 2283 | Op->EndLoc = E; | |||
| 2284 | return Op; | |||
| 2285 | } | |||
| 2286 | ||||
| 2287 | static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First, | |||
| 2288 | unsigned Last, SMLoc S, | |||
| 2289 | SMLoc E, | |||
| 2290 | MCContext &Ctx) { | |||
| 2291 | auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx); | |||
| 2292 | Op->ImmRange.First = First; | |||
| 2293 | Op->ImmRange.Last = Last; | |||
| 2294 | Op->EndLoc = E; | |||
| 2295 | return Op; | |||
| 2296 | } | |||
| 2297 | ||||
| 2298 | static std::unique_ptr<AArch64Operand> | |||
| 2299 | CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
| 2300 | auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx); | |||
| 2301 | Op->CondCode.Code = Code; | |||
| 2302 | Op->StartLoc = S; | |||
| 2303 | Op->EndLoc = E; | |||
| 2304 | return Op; | |||
| 2305 | } | |||
| 2306 | ||||
| 2307 | static std::unique_ptr<AArch64Operand> | |||
| 2308 | CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { | |||
| 2309 | auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx); | |||
| 2310 | Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); | |||
| 2311 | Op->FPImm.IsExact = IsExact; | |||
| 2312 | Op->StartLoc = S; | |||
| 2313 | Op->EndLoc = S; | |||
| 2314 | return Op; | |||
| 2315 | } | |||
| 2316 | ||||
| 2317 | static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, | |||
| 2318 | StringRef Str, | |||
| 2319 | SMLoc S, | |||
| 2320 | MCContext &Ctx, | |||
| 2321 | bool HasnXSModifier) { | |||
| 2322 | auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx); | |||
| 2323 | Op->Barrier.Val = Val; | |||
| 2324 | Op->Barrier.Data = Str.data(); | |||
| 2325 | Op->Barrier.Length = Str.size(); | |||
| 2326 | Op->Barrier.HasnXSModifier = HasnXSModifier; | |||
| 2327 | Op->StartLoc = S; | |||
| 2328 | Op->EndLoc = S; | |||
| 2329 | return Op; | |||
| 2330 | } | |||
| 2331 | ||||
| 2332 | static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, | |||
| 2333 | uint32_t MRSReg, | |||
| 2334 | uint32_t MSRReg, | |||
| 2335 | uint32_t PStateField, | |||
| 2336 | MCContext &Ctx) { | |||
| 2337 | auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx); | |||
| 2338 | Op->SysReg.Data = Str.data(); | |||
| 2339 | Op->SysReg.Length = Str.size(); | |||
| 2340 | Op->SysReg.MRSReg = MRSReg; | |||
| 2341 | Op->SysReg.MSRReg = MSRReg; | |||
| 2342 | Op->SysReg.PStateField = PStateField; | |||
| 2343 | Op->StartLoc = S; | |||
| 2344 | Op->EndLoc = S; | |||
| 2345 | return Op; | |||
| 2346 | } | |||
| 2347 | ||||
| 2348 | static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, | |||
| 2349 | SMLoc E, MCContext &Ctx) { | |||
| 2350 | auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx); | |||
| 2351 | Op->SysCRImm.Val = Val; | |||
| 2352 | Op->StartLoc = S; | |||
| 2353 | Op->EndLoc = E; | |||
| 2354 | return Op; | |||
| 2355 | } | |||
| 2356 | ||||
| 2357 | static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, | |||
| 2358 | StringRef Str, | |||
| 2359 | SMLoc S, | |||
| 2360 | MCContext &Ctx) { | |||
| 2361 | auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx); | |||
| 2362 | Op->Prefetch.Val = Val; | |||
| 2363 | Op->Barrier.Data = Str.data(); | |||
| 2364 | Op->Barrier.Length = Str.size(); | |||
| 2365 | Op->StartLoc = S; | |||
| 2366 | Op->EndLoc = S; | |||
| 2367 | return Op; | |||
| 2368 | } | |||
| 2369 | ||||
| 2370 | static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, | |||
| 2371 | StringRef Str, | |||
| 2372 | SMLoc S, | |||
| 2373 | MCContext &Ctx) { | |||
| 2374 | auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx); | |||
| 2375 | Op->PSBHint.Val = Val; | |||
| 2376 | Op->PSBHint.Data = Str.data(); | |||
| 2377 | Op->PSBHint.Length = Str.size(); | |||
| 2378 | Op->StartLoc = S; | |||
| 2379 | Op->EndLoc = S; | |||
| 2380 | return Op; | |||
| 2381 | } | |||
| 2382 | ||||
| 2383 | static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val, | |||
| 2384 | StringRef Str, | |||
| 2385 | SMLoc S, | |||
| 2386 | MCContext &Ctx) { | |||
| 2387 | auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx); | |||
| 2388 | Op->BTIHint.Val = Val | 32; | |||
| 2389 | Op->BTIHint.Data = Str.data(); | |||
| 2390 | Op->BTIHint.Length = Str.size(); | |||
| 2391 | Op->StartLoc = S; | |||
| 2392 | Op->EndLoc = S; | |||
| 2393 | return Op; | |||
| 2394 | } | |||
| 2395 | ||||
| 2396 | static std::unique_ptr<AArch64Operand> | |||
| 2397 | CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind, | |||
| 2398 | SMLoc S, SMLoc E, MCContext &Ctx) { | |||
| 2399 | auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx); | |||
| 2400 | Op->MatrixReg.RegNum = RegNum; | |||
| 2401 | Op->MatrixReg.ElementWidth = ElementWidth; | |||
| 2402 | Op->MatrixReg.Kind = Kind; | |||
| 2403 | Op->StartLoc = S; | |||
| 2404 | Op->EndLoc = E; | |||
| 2405 | return Op; | |||
| 2406 | } | |||
| 2407 | ||||
| 2408 | static std::unique_ptr<AArch64Operand> | |||
| 2409 | CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) { | |||
| 2410 | auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx); | |||
| 2411 | Op->SVCR.PStateField = PStateField; | |||
| 2412 | Op->SVCR.Data = Str.data(); | |||
| 2413 | Op->SVCR.Length = Str.size(); | |||
| 2414 | Op->StartLoc = S; | |||
| 2415 | Op->EndLoc = S; | |||
| 2416 | return Op; | |||
| 2417 | } | |||
| 2418 | ||||
| 2419 | static std::unique_ptr<AArch64Operand> | |||
| 2420 | CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, | |||
| 2421 | bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { | |||
| 2422 | auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx); | |||
| 2423 | Op->ShiftExtend.Type = ShOp; | |||
| 2424 | Op->ShiftExtend.Amount = Val; | |||
| 2425 | Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; | |||
| 2426 | Op->StartLoc = S; | |||
| 2427 | Op->EndLoc = E; | |||
| 2428 | return Op; | |||
| 2429 | } | |||
| 2430 | }; | |||
| 2431 | ||||
| 2432 | } // end anonymous namespace. | |||
| 2433 | ||||
| 2434 | void AArch64Operand::print(raw_ostream &OS) const { | |||
| 2435 | switch (Kind) { | |||
| 2436 | case k_FPImm: | |||
| 2437 | OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); | |||
| 2438 | if (!getFPImmIsExact()) | |||
| 2439 | OS << " (inexact)"; | |||
| 2440 | OS << ">"; | |||
| 2441 | break; | |||
| 2442 | case k_Barrier: { | |||
| 2443 | StringRef Name = getBarrierName(); | |||
| 2444 | if (!Name.empty()) | |||
| 2445 | OS << "<barrier " << Name << ">"; | |||
| 2446 | else | |||
| 2447 | OS << "<barrier invalid #" << getBarrier() << ">"; | |||
| 2448 | break; | |||
| 2449 | } | |||
| 2450 | case k_Immediate: | |||
| 2451 | OS << *getImm(); | |||
| 2452 | break; | |||
| 2453 | case k_ShiftedImm: { | |||
| 2454 | unsigned Shift = getShiftedImmShift(); | |||
| 2455 | OS << "<shiftedimm "; | |||
| 2456 | OS << *getShiftedImmVal(); | |||
| 2457 | OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">"; | |||
| 2458 | break; | |||
| 2459 | } | |||
| 2460 | case k_ImmRange: { | |||
| 2461 | OS << "<immrange "; | |||
| 2462 | OS << getFirstImmVal(); | |||
| 2463 | OS << ":" << getLastImmVal() << ">"; | |||
| 2464 | break; | |||
| 2465 | } | |||
| 2466 | case k_CondCode: | |||
| 2467 | OS << "<condcode " << getCondCode() << ">"; | |||
| 2468 | break; | |||
| 2469 | case k_VectorList: { | |||
| 2470 | OS << "<vectorlist "; | |||
| 2471 | unsigned Reg = getVectorListStart(); | |||
| 2472 | for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) | |||
| 2473 | OS << Reg + i * getVectorListStride() << " "; | |||
| 2474 | OS << ">"; | |||
| 2475 | break; | |||
| 2476 | } | |||
| 2477 | case k_VectorIndex: | |||
| 2478 | OS << "<vectorindex " << getVectorIndex() << ">"; | |||
| 2479 | break; | |||
| 2480 | case k_SysReg: | |||
| 2481 | OS << "<sysreg: " << getSysReg() << '>'; | |||
| 2482 | break; | |||
| 2483 | case k_Token: | |||
| 2484 | OS << "'" << getToken() << "'"; | |||
| 2485 | break; | |||
| 2486 | case k_SysCR: | |||
| 2487 | OS << "c" << getSysCR(); | |||
| 2488 | break; | |||
| 2489 | case k_Prefetch: { | |||
| 2490 | StringRef Name = getPrefetchName(); | |||
| 2491 | if (!Name.empty()) | |||
| 2492 | OS << "<prfop " << Name << ">"; | |||
| 2493 | else | |||
| 2494 | OS << "<prfop invalid #" << getPrefetch() << ">"; | |||
| 2495 | break; | |||
| 2496 | } | |||
| 2497 | case k_PSBHint: | |||
| 2498 | OS << getPSBHintName(); | |||
| 2499 | break; | |||
| 2500 | case k_BTIHint: | |||
| 2501 | OS << getBTIHintName(); | |||
| 2502 | break; | |||
| 2503 | case k_MatrixRegister: | |||
| 2504 | OS << "<matrix " << getMatrixReg() << ">"; | |||
| 2505 | break; | |||
| 2506 | case k_MatrixTileList: { | |||
| 2507 | OS << "<matrixlist "; | |||
| 2508 | unsigned RegMask = getMatrixTileListRegMask(); | |||
| 2509 | unsigned MaxBits = 8; | |||
| 2510 | for (unsigned I = MaxBits; I > 0; --I) | |||
| 2511 | OS << ((RegMask & (1 << (I - 1))) >> (I - 1)); | |||
| 2512 | OS << '>'; | |||
| 2513 | break; | |||
| 2514 | } | |||
| 2515 | case k_SVCR: { | |||
| 2516 | OS << getSVCR(); | |||
| 2517 | break; | |||
| 2518 | } | |||
| 2519 | case k_Register: | |||
| 2520 | OS << "<register " << getReg() << ">"; | |||
| 2521 | if (!getShiftExtendAmount() && !hasShiftExtendAmount()) | |||
| 2522 | break; | |||
| 2523 | [[fallthrough]]; | |||
| 2524 | case k_ShiftExtend: | |||
| 2525 | OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" | |||
| 2526 | << getShiftExtendAmount(); | |||
| 2527 | if (!hasShiftExtendAmount()) | |||
| 2528 | OS << "<imp>"; | |||
| 2529 | OS << '>'; | |||
| 2530 | break; | |||
| 2531 | } | |||
| 2532 | } | |||
| 2533 | ||||
| 2534 | /// @name Auto-generated Match Functions | |||
| 2535 | /// { | |||
| 2536 | ||||
| 2537 | static unsigned MatchRegisterName(StringRef Name); | |||
| 2538 | ||||
| 2539 | /// } | |||
| 2540 | ||||
| 2541 | static unsigned MatchNeonVectorRegName(StringRef Name) { | |||
| 2542 | return StringSwitch<unsigned>(Name.lower()) | |||
| 2543 | .Case("v0", AArch64::Q0) | |||
| 2544 | .Case("v1", AArch64::Q1) | |||
| 2545 | .Case("v2", AArch64::Q2) | |||
| 2546 | .Case("v3", AArch64::Q3) | |||
| 2547 | .Case("v4", AArch64::Q4) | |||
| 2548 | .Case("v5", AArch64::Q5) | |||
| 2549 | .Case("v6", AArch64::Q6) | |||
| 2550 | .Case("v7", AArch64::Q7) | |||
| 2551 | .Case("v8", AArch64::Q8) | |||
| 2552 | .Case("v9", AArch64::Q9) | |||
| 2553 | .Case("v10", AArch64::Q10) | |||
| 2554 | .Case("v11", AArch64::Q11) | |||
| 2555 | .Case("v12", AArch64::Q12) | |||
| 2556 | .Case("v13", AArch64::Q13) | |||
| 2557 | .Case("v14", AArch64::Q14) | |||
| 2558 | .Case("v15", AArch64::Q15) | |||
| 2559 | .Case("v16", AArch64::Q16) | |||
| 2560 | .Case("v17", AArch64::Q17) | |||
| 2561 | .Case("v18", AArch64::Q18) | |||
| 2562 | .Case("v19", AArch64::Q19) | |||
| 2563 | .Case("v20", AArch64::Q20) | |||
| 2564 | .Case("v21", AArch64::Q21) | |||
| 2565 | .Case("v22", AArch64::Q22) | |||
| 2566 | .Case("v23", AArch64::Q23) | |||
| 2567 | .Case("v24", AArch64::Q24) | |||
| 2568 | .Case("v25", AArch64::Q25) | |||
| 2569 | .Case("v26", AArch64::Q26) | |||
| 2570 | .Case("v27", AArch64::Q27) | |||
| 2571 | .Case("v28", AArch64::Q28) | |||
| 2572 | .Case("v29", AArch64::Q29) | |||
| 2573 | .Case("v30", AArch64::Q30) | |||
| 2574 | .Case("v31", AArch64::Q31) | |||
| 2575 | .Default(0); | |||
| 2576 | } | |||
| 2577 | ||||
| 2578 | /// Returns an optional pair of (#elements, element-width) if Suffix | |||
| 2579 | /// is a valid vector kind. Where the number of elements in a vector | |||
| 2580 | /// or the vector width is implicit or explicitly unknown (but still a | |||
| 2581 | /// valid suffix kind), 0 is used. | |||
| 2582 | static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, | |||
| 2583 | RegKind VectorKind) { | |||
| 2584 | std::pair<int, int> Res = {-1, -1}; | |||
| 2585 | ||||
| 2586 | switch (VectorKind) { | |||
| 2587 | case RegKind::NeonVector: | |||
| 2588 | Res = | |||
| 2589 | StringSwitch<std::pair<int, int>>(Suffix.lower()) | |||
| 2590 | .Case("", {0, 0}) | |||
| 2591 | .Case(".1d", {1, 64}) | |||
| 2592 | .Case(".1q", {1, 128}) | |||
| 2593 | // '.2h' needed for fp16 scalar pairwise reductions | |||
| 2594 | .Case(".2h", {2, 16}) | |||
| 2595 | .Case(".2s", {2, 32}) | |||
| 2596 | .Case(".2d", {2, 64}) | |||
| 2597 | // '.4b' is another special case for the ARMv8.2a dot product | |||
| 2598 | // operand | |||
| 2599 | .Case(".4b", {4, 8}) | |||
| 2600 | .Case(".4h", {4, 16}) | |||
| 2601 | .Case(".4s", {4, 32}) | |||
| 2602 | .Case(".8b", {8, 8}) | |||
| 2603 | .Case(".8h", {8, 16}) | |||
| 2604 | .Case(".16b", {16, 8}) | |||
| 2605 | // Accept the width neutral ones, too, for verbose syntax. If those | |||
| 2606 | // aren't used in the right places, the token operand won't match so | |||
| 2607 | // all will work out. | |||
| 2608 | .Case(".b", {0, 8}) | |||
| 2609 | .Case(".h", {0, 16}) | |||
| 2610 | .Case(".s", {0, 32}) | |||
| 2611 | .Case(".d", {0, 64}) | |||
| 2612 | .Default({-1, -1}); | |||
| 2613 | break; | |||
| 2614 | case RegKind::SVEPredicateAsCounter: | |||
| 2615 | case RegKind::SVEPredicateVector: | |||
| 2616 | case RegKind::SVEDataVector: | |||
| 2617 | case RegKind::Matrix: | |||
| 2618 | Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) | |||
| 2619 | .Case("", {0, 0}) | |||
| 2620 | .Case(".b", {0, 8}) | |||
| 2621 | .Case(".h", {0, 16}) | |||
| 2622 | .Case(".s", {0, 32}) | |||
| 2623 | .Case(".d", {0, 64}) | |||
| 2624 | .Case(".q", {0, 128}) | |||
| 2625 | .Default({-1, -1}); | |||
| 2626 | break; | |||
| 2627 | default: | |||
| 2628 | llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2628); | |||
| 2629 | } | |||
| 2630 | ||||
| 2631 | if (Res == std::make_pair(-1, -1)) | |||
| 2632 | return std::nullopt; | |||
| 2633 | ||||
| 2634 | return std::optional<std::pair<int, int>>(Res); | |||
| 2635 | } | |||
| 2636 | ||||
| 2637 | static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { | |||
| 2638 | return parseVectorKind(Suffix, VectorKind).has_value(); | |||
| 2639 | } | |||
| 2640 | ||||
| 2641 | static unsigned matchSVEDataVectorRegName(StringRef Name) { | |||
| 2642 | return StringSwitch<unsigned>(Name.lower()) | |||
| 2643 | .Case("z0", AArch64::Z0) | |||
| 2644 | .Case("z1", AArch64::Z1) | |||
| 2645 | .Case("z2", AArch64::Z2) | |||
| 2646 | .Case("z3", AArch64::Z3) | |||
| 2647 | .Case("z4", AArch64::Z4) | |||
| 2648 | .Case("z5", AArch64::Z5) | |||
| 2649 | .Case("z6", AArch64::Z6) | |||
| 2650 | .Case("z7", AArch64::Z7) | |||
| 2651 | .Case("z8", AArch64::Z8) | |||
| 2652 | .Case("z9", AArch64::Z9) | |||
| 2653 | .Case("z10", AArch64::Z10) | |||
| 2654 | .Case("z11", AArch64::Z11) | |||
| 2655 | .Case("z12", AArch64::Z12) | |||
| 2656 | .Case("z13", AArch64::Z13) | |||
| 2657 | .Case("z14", AArch64::Z14) | |||
| 2658 | .Case("z15", AArch64::Z15) | |||
| 2659 | .Case("z16", AArch64::Z16) | |||
| 2660 | .Case("z17", AArch64::Z17) | |||
| 2661 | .Case("z18", AArch64::Z18) | |||
| 2662 | .Case("z19", AArch64::Z19) | |||
| 2663 | .Case("z20", AArch64::Z20) | |||
| 2664 | .Case("z21", AArch64::Z21) | |||
| 2665 | .Case("z22", AArch64::Z22) | |||
| 2666 | .Case("z23", AArch64::Z23) | |||
| 2667 | .Case("z24", AArch64::Z24) | |||
| 2668 | .Case("z25", AArch64::Z25) | |||
| 2669 | .Case("z26", AArch64::Z26) | |||
| 2670 | .Case("z27", AArch64::Z27) | |||
| 2671 | .Case("z28", AArch64::Z28) | |||
| 2672 | .Case("z29", AArch64::Z29) | |||
| 2673 | .Case("z30", AArch64::Z30) | |||
| 2674 | .Case("z31", AArch64::Z31) | |||
| 2675 | .Default(0); | |||
| 2676 | } | |||
| 2677 | ||||
| 2678 | static unsigned matchSVEPredicateVectorRegName(StringRef Name) { | |||
| 2679 | return StringSwitch<unsigned>(Name.lower()) | |||
| 2680 | .Case("p0", AArch64::P0) | |||
| 2681 | .Case("p1", AArch64::P1) | |||
| 2682 | .Case("p2", AArch64::P2) | |||
| 2683 | .Case("p3", AArch64::P3) | |||
| 2684 | .Case("p4", AArch64::P4) | |||
| 2685 | .Case("p5", AArch64::P5) | |||
| 2686 | .Case("p6", AArch64::P6) | |||
| 2687 | .Case("p7", AArch64::P7) | |||
| 2688 | .Case("p8", AArch64::P8) | |||
| 2689 | .Case("p9", AArch64::P9) | |||
| 2690 | .Case("p10", AArch64::P10) | |||
| 2691 | .Case("p11", AArch64::P11) | |||
| 2692 | .Case("p12", AArch64::P12) | |||
| 2693 | .Case("p13", AArch64::P13) | |||
| 2694 | .Case("p14", AArch64::P14) | |||
| 2695 | .Case("p15", AArch64::P15) | |||
| 2696 | .Default(0); | |||
| 2697 | } | |||
| 2698 | ||||
| 2699 | static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) { | |||
| 2700 | return StringSwitch<unsigned>(Name.lower()) | |||
| 2701 | .Case("pn0", AArch64::P0) | |||
| 2702 | .Case("pn1", AArch64::P1) | |||
| 2703 | .Case("pn2", AArch64::P2) | |||
| 2704 | .Case("pn3", AArch64::P3) | |||
| 2705 | .Case("pn4", AArch64::P4) | |||
| 2706 | .Case("pn5", AArch64::P5) | |||
| 2707 | .Case("pn6", AArch64::P6) | |||
| 2708 | .Case("pn7", AArch64::P7) | |||
| 2709 | .Case("pn8", AArch64::P8) | |||
| 2710 | .Case("pn9", AArch64::P9) | |||
| 2711 | .Case("pn10", AArch64::P10) | |||
| 2712 | .Case("pn11", AArch64::P11) | |||
| 2713 | .Case("pn12", AArch64::P12) | |||
| 2714 | .Case("pn13", AArch64::P13) | |||
| 2715 | .Case("pn14", AArch64::P14) | |||
| 2716 | .Case("pn15", AArch64::P15) | |||
| 2717 | .Default(0); | |||
| 2718 | } | |||
| 2719 | ||||
| 2720 | static unsigned matchMatrixTileListRegName(StringRef Name) { | |||
| 2721 | return StringSwitch<unsigned>(Name.lower()) | |||
| 2722 | .Case("za0.d", AArch64::ZAD0) | |||
| 2723 | .Case("za1.d", AArch64::ZAD1) | |||
| 2724 | .Case("za2.d", AArch64::ZAD2) | |||
| 2725 | .Case("za3.d", AArch64::ZAD3) | |||
| 2726 | .Case("za4.d", AArch64::ZAD4) | |||
| 2727 | .Case("za5.d", AArch64::ZAD5) | |||
| 2728 | .Case("za6.d", AArch64::ZAD6) | |||
| 2729 | .Case("za7.d", AArch64::ZAD7) | |||
| 2730 | .Case("za0.s", AArch64::ZAS0) | |||
| 2731 | .Case("za1.s", AArch64::ZAS1) | |||
| 2732 | .Case("za2.s", AArch64::ZAS2) | |||
| 2733 | .Case("za3.s", AArch64::ZAS3) | |||
| 2734 | .Case("za0.h", AArch64::ZAH0) | |||
| 2735 | .Case("za1.h", AArch64::ZAH1) | |||
| 2736 | .Case("za0.b", AArch64::ZAB0) | |||
| 2737 | .Default(0); | |||
| 2738 | } | |||
| 2739 | ||||
| 2740 | static unsigned matchMatrixRegName(StringRef Name) { | |||
| 2741 | return StringSwitch<unsigned>(Name.lower()) | |||
| 2742 | .Case("za", AArch64::ZA) | |||
| 2743 | .Case("za0.q", AArch64::ZAQ0) | |||
| 2744 | .Case("za1.q", AArch64::ZAQ1) | |||
| 2745 | .Case("za2.q", AArch64::ZAQ2) | |||
| 2746 | .Case("za3.q", AArch64::ZAQ3) | |||
| 2747 | .Case("za4.q", AArch64::ZAQ4) | |||
| 2748 | .Case("za5.q", AArch64::ZAQ5) | |||
| 2749 | .Case("za6.q", AArch64::ZAQ6) | |||
| 2750 | .Case("za7.q", AArch64::ZAQ7) | |||
| 2751 | .Case("za8.q", AArch64::ZAQ8) | |||
| 2752 | .Case("za9.q", AArch64::ZAQ9) | |||
| 2753 | .Case("za10.q", AArch64::ZAQ10) | |||
| 2754 | .Case("za11.q", AArch64::ZAQ11) | |||
| 2755 | .Case("za12.q", AArch64::ZAQ12) | |||
| 2756 | .Case("za13.q", AArch64::ZAQ13) | |||
| 2757 | .Case("za14.q", AArch64::ZAQ14) | |||
| 2758 | .Case("za15.q", AArch64::ZAQ15) | |||
| 2759 | .Case("za0.d", AArch64::ZAD0) | |||
| 2760 | .Case("za1.d", AArch64::ZAD1) | |||
| 2761 | .Case("za2.d", AArch64::ZAD2) | |||
| 2762 | .Case("za3.d", AArch64::ZAD3) | |||
| 2763 | .Case("za4.d", AArch64::ZAD4) | |||
| 2764 | .Case("za5.d", AArch64::ZAD5) | |||
| 2765 | .Case("za6.d", AArch64::ZAD6) | |||
| 2766 | .Case("za7.d", AArch64::ZAD7) | |||
| 2767 | .Case("za0.s", AArch64::ZAS0) | |||
| 2768 | .Case("za1.s", AArch64::ZAS1) | |||
| 2769 | .Case("za2.s", AArch64::ZAS2) | |||
| 2770 | .Case("za3.s", AArch64::ZAS3) | |||
| 2771 | .Case("za0.h", AArch64::ZAH0) | |||
| 2772 | .Case("za1.h", AArch64::ZAH1) | |||
| 2773 | .Case("za0.b", AArch64::ZAB0) | |||
| 2774 | .Case("za0h.q", AArch64::ZAQ0) | |||
| 2775 | .Case("za1h.q", AArch64::ZAQ1) | |||
| 2776 | .Case("za2h.q", AArch64::ZAQ2) | |||
| 2777 | .Case("za3h.q", AArch64::ZAQ3) | |||
| 2778 | .Case("za4h.q", AArch64::ZAQ4) | |||
| 2779 | .Case("za5h.q", AArch64::ZAQ5) | |||
| 2780 | .Case("za6h.q", AArch64::ZAQ6) | |||
| 2781 | .Case("za7h.q", AArch64::ZAQ7) | |||
| 2782 | .Case("za8h.q", AArch64::ZAQ8) | |||
| 2783 | .Case("za9h.q", AArch64::ZAQ9) | |||
| 2784 | .Case("za10h.q", AArch64::ZAQ10) | |||
| 2785 | .Case("za11h.q", AArch64::ZAQ11) | |||
| 2786 | .Case("za12h.q", AArch64::ZAQ12) | |||
| 2787 | .Case("za13h.q", AArch64::ZAQ13) | |||
| 2788 | .Case("za14h.q", AArch64::ZAQ14) | |||
| 2789 | .Case("za15h.q", AArch64::ZAQ15) | |||
| 2790 | .Case("za0h.d", AArch64::ZAD0) | |||
| 2791 | .Case("za1h.d", AArch64::ZAD1) | |||
| 2792 | .Case("za2h.d", AArch64::ZAD2) | |||
| 2793 | .Case("za3h.d", AArch64::ZAD3) | |||
| 2794 | .Case("za4h.d", AArch64::ZAD4) | |||
| 2795 | .Case("za5h.d", AArch64::ZAD5) | |||
| 2796 | .Case("za6h.d", AArch64::ZAD6) | |||
| 2797 | .Case("za7h.d", AArch64::ZAD7) | |||
| 2798 | .Case("za0h.s", AArch64::ZAS0) | |||
| 2799 | .Case("za1h.s", AArch64::ZAS1) | |||
| 2800 | .Case("za2h.s", AArch64::ZAS2) | |||
| 2801 | .Case("za3h.s", AArch64::ZAS3) | |||
| 2802 | .Case("za0h.h", AArch64::ZAH0) | |||
| 2803 | .Case("za1h.h", AArch64::ZAH1) | |||
| 2804 | .Case("za0h.b", AArch64::ZAB0) | |||
| 2805 | .Case("za0v.q", AArch64::ZAQ0) | |||
| 2806 | .Case("za1v.q", AArch64::ZAQ1) | |||
| 2807 | .Case("za2v.q", AArch64::ZAQ2) | |||
| 2808 | .Case("za3v.q", AArch64::ZAQ3) | |||
| 2809 | .Case("za4v.q", AArch64::ZAQ4) | |||
| 2810 | .Case("za5v.q", AArch64::ZAQ5) | |||
| 2811 | .Case("za6v.q", AArch64::ZAQ6) | |||
| 2812 | .Case("za7v.q", AArch64::ZAQ7) | |||
| 2813 | .Case("za8v.q", AArch64::ZAQ8) | |||
| 2814 | .Case("za9v.q", AArch64::ZAQ9) | |||
| 2815 | .Case("za10v.q", AArch64::ZAQ10) | |||
| 2816 | .Case("za11v.q", AArch64::ZAQ11) | |||
| 2817 | .Case("za12v.q", AArch64::ZAQ12) | |||
| 2818 | .Case("za13v.q", AArch64::ZAQ13) | |||
| 2819 | .Case("za14v.q", AArch64::ZAQ14) | |||
| 2820 | .Case("za15v.q", AArch64::ZAQ15) | |||
| 2821 | .Case("za0v.d", AArch64::ZAD0) | |||
| 2822 | .Case("za1v.d", AArch64::ZAD1) | |||
| 2823 | .Case("za2v.d", AArch64::ZAD2) | |||
| 2824 | .Case("za3v.d", AArch64::ZAD3) | |||
| 2825 | .Case("za4v.d", AArch64::ZAD4) | |||
| 2826 | .Case("za5v.d", AArch64::ZAD5) | |||
| 2827 | .Case("za6v.d", AArch64::ZAD6) | |||
| 2828 | .Case("za7v.d", AArch64::ZAD7) | |||
| 2829 | .Case("za0v.s", AArch64::ZAS0) | |||
| 2830 | .Case("za1v.s", AArch64::ZAS1) | |||
| 2831 | .Case("za2v.s", AArch64::ZAS2) | |||
| 2832 | .Case("za3v.s", AArch64::ZAS3) | |||
| 2833 | .Case("za0v.h", AArch64::ZAH0) | |||
| 2834 | .Case("za1v.h", AArch64::ZAH1) | |||
| 2835 | .Case("za0v.b", AArch64::ZAB0) | |||
| 2836 | .Default(0); | |||
| 2837 | } | |||
| 2838 | ||||
| 2839 | bool AArch64AsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc, | |||
| 2840 | SMLoc &EndLoc) { | |||
| 2841 | return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success; | |||
| 2842 | } | |||
| 2843 | ||||
| 2844 | OperandMatchResultTy AArch64AsmParser::tryParseRegister(MCRegister &RegNo, | |||
| 2845 | SMLoc &StartLoc, | |||
| 2846 | SMLoc &EndLoc) { | |||
| 2847 | StartLoc = getLoc(); | |||
| 2848 | auto Res = tryParseScalarRegister(RegNo); | |||
| 2849 | EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 2850 | return Res; | |||
| 2851 | } | |||
| 2852 | ||||
| 2853 | // Matches a register name or register alias previously defined by '.req' | |||
| 2854 | unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, | |||
| 2855 | RegKind Kind) { | |||
| 2856 | unsigned RegNum = 0; | |||
| 2857 | if ((RegNum = matchSVEDataVectorRegName(Name))) | |||
| 2858 | return Kind == RegKind::SVEDataVector ? RegNum : 0; | |||
| 2859 | ||||
| 2860 | if ((RegNum = matchSVEPredicateVectorRegName(Name))) | |||
| 2861 | return Kind == RegKind::SVEPredicateVector ? RegNum : 0; | |||
| 2862 | ||||
| 2863 | if ((RegNum = matchSVEPredicateAsCounterRegName(Name))) | |||
| 2864 | return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0; | |||
| 2865 | ||||
| 2866 | if ((RegNum = MatchNeonVectorRegName(Name))) | |||
| 2867 | return Kind == RegKind::NeonVector ? RegNum : 0; | |||
| 2868 | ||||
| 2869 | if ((RegNum = matchMatrixRegName(Name))) | |||
| 2870 | return Kind == RegKind::Matrix ? RegNum : 0; | |||
| 2871 | ||||
| 2872 | if (Name.equals_insensitive("zt0")) | |||
| 2873 | return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0; | |||
| 2874 | ||||
| 2875 | // The parsed register must be of RegKind Scalar | |||
| 2876 | if ((RegNum = MatchRegisterName(Name))) | |||
| 2877 | return (Kind == RegKind::Scalar) ? RegNum : 0; | |||
| 2878 | ||||
| 2879 | if (!RegNum) { | |||
| 2880 | // Handle a few common aliases of registers. | |||
| 2881 | if (auto RegNum = StringSwitch<unsigned>(Name.lower()) | |||
| 2882 | .Case("fp", AArch64::FP) | |||
| 2883 | .Case("lr", AArch64::LR) | |||
| 2884 | .Case("x31", AArch64::XZR) | |||
| 2885 | .Case("w31", AArch64::WZR) | |||
| 2886 | .Default(0)) | |||
| 2887 | return Kind == RegKind::Scalar ? RegNum : 0; | |||
| 2888 | ||||
| 2889 | // Check for aliases registered via .req. Canonicalize to lower case. | |||
| 2890 | // That's more consistent since register names are case insensitive, and | |||
| 2891 | // it's how the original entry was passed in from MC/MCParser/AsmParser. | |||
| 2892 | auto Entry = RegisterReqs.find(Name.lower()); | |||
| 2893 | if (Entry == RegisterReqs.end()) | |||
| 2894 | return 0; | |||
| 2895 | ||||
| 2896 | // set RegNum if the match is the right kind of register | |||
| 2897 | if (Kind == Entry->getValue().first) | |||
| 2898 | RegNum = Entry->getValue().second; | |||
| 2899 | } | |||
| 2900 | return RegNum; | |||
| 2901 | } | |||
| 2902 | ||||
| 2903 | unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) { | |||
| 2904 | switch (K) { | |||
| 2905 | case RegKind::Scalar: | |||
| 2906 | case RegKind::NeonVector: | |||
| 2907 | case RegKind::SVEDataVector: | |||
| 2908 | return 32; | |||
| 2909 | case RegKind::Matrix: | |||
| 2910 | case RegKind::SVEPredicateVector: | |||
| 2911 | case RegKind::SVEPredicateAsCounter: | |||
| 2912 | return 16; | |||
| 2913 | case RegKind::LookupTable: | |||
| 2914 | return 1; | |||
| 2915 | } | |||
| 2916 | llvm_unreachable("Unsupported RegKind")::llvm::llvm_unreachable_internal("Unsupported RegKind", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 2916); | |||
| 2917 | } | |||
| 2918 | ||||
| 2919 | /// tryParseScalarRegister - Try to parse a register name. The token must be an | |||
| 2920 | /// Identifier when called, and if it is a register name the token is eaten and | |||
| 2921 | /// the register is added to the operand list. | |||
| 2922 | OperandMatchResultTy | |||
| 2923 | AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) { | |||
| 2924 | const AsmToken &Tok = getTok(); | |||
| 2925 | if (Tok.isNot(AsmToken::Identifier)) | |||
| 2926 | return MatchOperand_NoMatch; | |||
| 2927 | ||||
| 2928 | std::string lowerCase = Tok.getString().lower(); | |||
| 2929 | unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar); | |||
| 2930 | if (Reg == 0) | |||
| 2931 | return MatchOperand_NoMatch; | |||
| 2932 | ||||
| 2933 | RegNum = Reg; | |||
| 2934 | Lex(); // Eat identifier token. | |||
| 2935 | return MatchOperand_Success; | |||
| 2936 | } | |||
| 2937 | ||||
| 2938 | /// tryParseSysCROperand - Try to parse a system instruction CR operand name. | |||
| 2939 | OperandMatchResultTy | |||
| 2940 | AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { | |||
| 2941 | SMLoc S = getLoc(); | |||
| 2942 | ||||
| 2943 | if (getTok().isNot(AsmToken::Identifier)) { | |||
| 2944 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
| 2945 | return MatchOperand_ParseFail; | |||
| 2946 | } | |||
| 2947 | ||||
| 2948 | StringRef Tok = getTok().getIdentifier(); | |||
| 2949 | if (Tok[0] != 'c' && Tok[0] != 'C') { | |||
| 2950 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
| 2951 | return MatchOperand_ParseFail; | |||
| 2952 | } | |||
| 2953 | ||||
| 2954 | uint32_t CRNum; | |||
| 2955 | bool BadNum = Tok.drop_front().getAsInteger(10, CRNum); | |||
| 2956 | if (BadNum || CRNum > 15) { | |||
| 2957 | Error(S, "Expected cN operand where 0 <= N <= 15"); | |||
| 2958 | return MatchOperand_ParseFail; | |||
| 2959 | } | |||
| 2960 | ||||
| 2961 | Lex(); // Eat identifier token. | |||
| 2962 | Operands.push_back( | |||
| 2963 | AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext())); | |||
| 2964 | return MatchOperand_Success; | |||
| 2965 | } | |||
| 2966 | ||||
| 2967 | // Either an identifier for named values or a 6-bit immediate. | |||
| 2968 | OperandMatchResultTy | |||
| 2969 | AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) { | |||
| 2970 | SMLoc S = getLoc(); | |||
| 2971 | const AsmToken &Tok = getTok(); | |||
| 2972 | ||||
| 2973 | unsigned MaxVal = 63; | |||
| 2974 | ||||
| 2975 | // Immediate case, with optional leading hash: | |||
| 2976 | if (parseOptionalToken(AsmToken::Hash) || | |||
| 2977 | Tok.is(AsmToken::Integer)) { | |||
| 2978 | const MCExpr *ImmVal; | |||
| 2979 | if (getParser().parseExpression(ImmVal)) | |||
| 2980 | return MatchOperand_ParseFail; | |||
| 2981 | ||||
| 2982 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 2983 | if (!MCE) { | |||
| 2984 | TokError("immediate value expected for prefetch operand"); | |||
| 2985 | return MatchOperand_ParseFail; | |||
| 2986 | } | |||
| 2987 | unsigned prfop = MCE->getValue(); | |||
| 2988 | if (prfop > MaxVal) { | |||
| 2989 | TokError("prefetch operand out of range, [0," + utostr(MaxVal) + | |||
| 2990 | "] expected"); | |||
| 2991 | return MatchOperand_ParseFail; | |||
| 2992 | } | |||
| 2993 | ||||
| 2994 | auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue()); | |||
| 2995 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
| 2996 | prfop, RPRFM ? RPRFM->Name : "", S, getContext())); | |||
| 2997 | return MatchOperand_Success; | |||
| 2998 | } | |||
| 2999 | ||||
| 3000 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 3001 | TokError("prefetch hint expected"); | |||
| 3002 | return MatchOperand_ParseFail; | |||
| 3003 | } | |||
| 3004 | ||||
| 3005 | auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString()); | |||
| 3006 | if (!RPRFM) { | |||
| 3007 | TokError("prefetch hint expected"); | |||
| 3008 | return MatchOperand_ParseFail; | |||
| 3009 | } | |||
| 3010 | ||||
| 3011 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
| 3012 | RPRFM->Encoding, Tok.getString(), S, getContext())); | |||
| 3013 | Lex(); // Eat identifier token. | |||
| 3014 | return MatchOperand_Success; | |||
| 3015 | } | |||
| 3016 | ||||
| 3017 | /// tryParsePrefetch - Try to parse a prefetch operand. | |||
| 3018 | template <bool IsSVEPrefetch> | |||
| 3019 | OperandMatchResultTy | |||
| 3020 | AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { | |||
| 3021 | SMLoc S = getLoc(); | |||
| 3022 | const AsmToken &Tok = getTok(); | |||
| 3023 | ||||
| 3024 | auto LookupByName = [](StringRef N) { | |||
| 3025 | if (IsSVEPrefetch) { | |||
| 3026 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) | |||
| 3027 | return std::optional<unsigned>(Res->Encoding); | |||
| 3028 | } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) | |||
| 3029 | return std::optional<unsigned>(Res->Encoding); | |||
| 3030 | return std::optional<unsigned>(); | |||
| 3031 | }; | |||
| 3032 | ||||
| 3033 | auto LookupByEncoding = [](unsigned E) { | |||
| 3034 | if (IsSVEPrefetch) { | |||
| 3035 | if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) | |||
| 3036 | return std::optional<StringRef>(Res->Name); | |||
| 3037 | } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) | |||
| 3038 | return std::optional<StringRef>(Res->Name); | |||
| 3039 | return std::optional<StringRef>(); | |||
| 3040 | }; | |||
| 3041 | unsigned MaxVal = IsSVEPrefetch ? 15 : 31; | |||
| 3042 | ||||
| 3043 | // Either an identifier for named values or a 5-bit immediate. | |||
| 3044 | // Eat optional hash. | |||
| 3045 | if (parseOptionalToken(AsmToken::Hash) || | |||
| 3046 | Tok.is(AsmToken::Integer)) { | |||
| 3047 | const MCExpr *ImmVal; | |||
| 3048 | if (getParser().parseExpression(ImmVal)) | |||
| 3049 | return MatchOperand_ParseFail; | |||
| 3050 | ||||
| 3051 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 3052 | if (!MCE) { | |||
| 3053 | TokError("immediate value expected for prefetch operand"); | |||
| 3054 | return MatchOperand_ParseFail; | |||
| 3055 | } | |||
| 3056 | unsigned prfop = MCE->getValue(); | |||
| 3057 | if (prfop > MaxVal) { | |||
| 3058 | TokError("prefetch operand out of range, [0," + utostr(MaxVal) + | |||
| 3059 | "] expected"); | |||
| 3060 | return MatchOperand_ParseFail; | |||
| 3061 | } | |||
| 3062 | ||||
| 3063 | auto PRFM = LookupByEncoding(MCE->getValue()); | |||
| 3064 | Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""), | |||
| 3065 | S, getContext())); | |||
| 3066 | return MatchOperand_Success; | |||
| 3067 | } | |||
| 3068 | ||||
| 3069 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 3070 | TokError("prefetch hint expected"); | |||
| 3071 | return MatchOperand_ParseFail; | |||
| 3072 | } | |||
| 3073 | ||||
| 3074 | auto PRFM = LookupByName(Tok.getString()); | |||
| 3075 | if (!PRFM) { | |||
| 3076 | TokError("prefetch hint expected"); | |||
| 3077 | return MatchOperand_ParseFail; | |||
| 3078 | } | |||
| 3079 | ||||
| 3080 | Operands.push_back(AArch64Operand::CreatePrefetch( | |||
| 3081 | *PRFM, Tok.getString(), S, getContext())); | |||
| 3082 | Lex(); // Eat identifier token. | |||
| 3083 | return MatchOperand_Success; | |||
| 3084 | } | |||
| 3085 | ||||
| 3086 | /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command | |||
| 3087 | OperandMatchResultTy | |||
| 3088 | AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { | |||
| 3089 | SMLoc S = getLoc(); | |||
| 3090 | const AsmToken &Tok = getTok(); | |||
| 3091 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 3092 | TokError("invalid operand for instruction"); | |||
| 3093 | return MatchOperand_ParseFail; | |||
| 3094 | } | |||
| 3095 | ||||
| 3096 | auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString()); | |||
| 3097 | if (!PSB) { | |||
| 3098 | TokError("invalid operand for instruction"); | |||
| 3099 | return MatchOperand_ParseFail; | |||
| 3100 | } | |||
| 3101 | ||||
| 3102 | Operands.push_back(AArch64Operand::CreatePSBHint( | |||
| 3103 | PSB->Encoding, Tok.getString(), S, getContext())); | |||
| 3104 | Lex(); // Eat identifier token. | |||
| 3105 | return MatchOperand_Success; | |||
| 3106 | } | |||
| 3107 | ||||
| 3108 | OperandMatchResultTy | |||
| 3109 | AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) { | |||
| 3110 | SMLoc StartLoc = getLoc(); | |||
| 3111 | ||||
| 3112 | MCRegister RegNum; | |||
| 3113 | ||||
| 3114 | // The case where xzr, xzr is not present is handled by an InstAlias. | |||
| 3115 | ||||
| 3116 | auto RegTok = getTok(); // in case we need to backtrack | |||
| 3117 | if (tryParseScalarRegister(RegNum) != MatchOperand_Success) | |||
| 3118 | return MatchOperand_NoMatch; | |||
| 3119 | ||||
| 3120 | if (RegNum != AArch64::XZR) { | |||
| 3121 | getLexer().UnLex(RegTok); | |||
| 3122 | return MatchOperand_NoMatch; | |||
| 3123 | } | |||
| 3124 | ||||
| 3125 | if (parseComma()) | |||
| 3126 | return MatchOperand_ParseFail; | |||
| 3127 | ||||
| 3128 | if (tryParseScalarRegister(RegNum) != MatchOperand_Success) { | |||
| 3129 | TokError("expected register operand"); | |||
| 3130 | return MatchOperand_ParseFail; | |||
| 3131 | } | |||
| 3132 | ||||
| 3133 | if (RegNum != AArch64::XZR) { | |||
| 3134 | TokError("xzr must be followed by xzr"); | |||
| 3135 | return MatchOperand_ParseFail; | |||
| 3136 | } | |||
| 3137 | ||||
| 3138 | // We need to push something, since we claim this is an operand in .td. | |||
| 3139 | // See also AArch64AsmParser::parseKeywordOperand. | |||
| 3140 | Operands.push_back(AArch64Operand::CreateReg( | |||
| 3141 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
| 3142 | ||||
| 3143 | return MatchOperand_Success; | |||
| 3144 | } | |||
| 3145 | ||||
| 3146 | /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command | |||
| 3147 | OperandMatchResultTy | |||
| 3148 | AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) { | |||
| 3149 | SMLoc S = getLoc(); | |||
| 3150 | const AsmToken &Tok = getTok(); | |||
| 3151 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 3152 | TokError("invalid operand for instruction"); | |||
| 3153 | return MatchOperand_ParseFail; | |||
| 3154 | } | |||
| 3155 | ||||
| 3156 | auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString()); | |||
| 3157 | if (!BTI) { | |||
| 3158 | TokError("invalid operand for instruction"); | |||
| 3159 | return MatchOperand_ParseFail; | |||
| 3160 | } | |||
| 3161 | ||||
| 3162 | Operands.push_back(AArch64Operand::CreateBTIHint( | |||
| 3163 | BTI->Encoding, Tok.getString(), S, getContext())); | |||
| 3164 | Lex(); // Eat identifier token. | |||
| 3165 | return MatchOperand_Success; | |||
| 3166 | } | |||
| 3167 | ||||
| 3168 | /// tryParseAdrpLabel - Parse and validate a source label for the ADRP | |||
| 3169 | /// instruction. | |||
| 3170 | OperandMatchResultTy | |||
| 3171 | AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { | |||
| 3172 | SMLoc S = getLoc(); | |||
| 3173 | const MCExpr *Expr = nullptr; | |||
| 3174 | ||||
| 3175 | if (getTok().is(AsmToken::Hash)) { | |||
| 3176 | Lex(); // Eat hash token. | |||
| 3177 | } | |||
| 3178 | ||||
| 3179 | if (parseSymbolicImmVal(Expr)) | |||
| 3180 | return MatchOperand_ParseFail; | |||
| 3181 | ||||
| 3182 | AArch64MCExpr::VariantKind ELFRefKind; | |||
| 3183 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
| 3184 | int64_t Addend; | |||
| 3185 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
| 3186 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && | |||
| 3187 | ELFRefKind == AArch64MCExpr::VK_INVALID) { | |||
| 3188 | // No modifier was specified at all; this is the syntax for an ELF basic | |||
| 3189 | // ADRP relocation (unfortunately). | |||
| 3190 | Expr = | |||
| 3191 | AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext()); | |||
| 3192 | } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || | |||
| 3193 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && | |||
| 3194 | Addend != 0) { | |||
| 3195 | Error(S, "gotpage label reference not allowed an addend"); | |||
| 3196 | return MatchOperand_ParseFail; | |||
| 3197 | } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && | |||
| 3198 | DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && | |||
| 3199 | DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && | |||
| 3200 | ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC && | |||
| 3201 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && | |||
| 3202 | ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 && | |||
| 3203 | ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && | |||
| 3204 | ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { | |||
| 3205 | // The operand must be an @page or @gotpage qualified symbolref. | |||
| 3206 | Error(S, "page or gotpage label reference expected"); | |||
| 3207 | return MatchOperand_ParseFail; | |||
| 3208 | } | |||
| 3209 | } | |||
| 3210 | ||||
| 3211 | // We have either a label reference possibly with addend or an immediate. The | |||
| 3212 | // addend is a raw value here. The linker will adjust it to only reference the | |||
| 3213 | // page. | |||
| 3214 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 3215 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
| 3216 | ||||
| 3217 | return MatchOperand_Success; | |||
| 3218 | } | |||
| 3219 | ||||
| 3220 | /// tryParseAdrLabel - Parse and validate a source label for the ADR | |||
| 3221 | /// instruction. | |||
| 3222 | OperandMatchResultTy | |||
| 3223 | AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { | |||
| 3224 | SMLoc S = getLoc(); | |||
| 3225 | const MCExpr *Expr = nullptr; | |||
| 3226 | ||||
| 3227 | // Leave anything with a bracket to the default for SVE | |||
| 3228 | if (getTok().is(AsmToken::LBrac)) | |||
| 3229 | return MatchOperand_NoMatch; | |||
| 3230 | ||||
| 3231 | if (getTok().is(AsmToken::Hash)) | |||
| 3232 | Lex(); // Eat hash token. | |||
| 3233 | ||||
| 3234 | if (parseSymbolicImmVal(Expr)) | |||
| 3235 | return MatchOperand_ParseFail; | |||
| 3236 | ||||
| 3237 | AArch64MCExpr::VariantKind ELFRefKind; | |||
| 3238 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
| 3239 | int64_t Addend; | |||
| 3240 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
| 3241 | if (DarwinRefKind == MCSymbolRefExpr::VK_None && | |||
| 3242 | ELFRefKind == AArch64MCExpr::VK_INVALID) { | |||
| 3243 | // No modifier was specified at all; this is the syntax for an ELF basic | |||
| 3244 | // ADR relocation (unfortunately). | |||
| 3245 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext()); | |||
| 3246 | } else { | |||
| 3247 | Error(S, "unexpected adr label"); | |||
| 3248 | return MatchOperand_ParseFail; | |||
| 3249 | } | |||
| 3250 | } | |||
| 3251 | ||||
| 3252 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 3253 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
| 3254 | return MatchOperand_Success; | |||
| 3255 | } | |||
| 3256 | ||||
| 3257 | /// tryParseFPImm - A floating point immediate expression operand. | |||
| 3258 | template<bool AddFPZeroAsLiteral> | |||
| 3259 | OperandMatchResultTy | |||
| 3260 | AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { | |||
| 3261 | SMLoc S = getLoc(); | |||
| 3262 | ||||
| 3263 | bool Hash = parseOptionalToken(AsmToken::Hash); | |||
| 3264 | ||||
| 3265 | // Handle negation, as that still comes through as a separate token. | |||
| 3266 | bool isNegative = parseOptionalToken(AsmToken::Minus); | |||
| 3267 | ||||
| 3268 | const AsmToken &Tok = getTok(); | |||
| 3269 | if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) { | |||
| 3270 | if (!Hash) | |||
| 3271 | return MatchOperand_NoMatch; | |||
| 3272 | TokError("invalid floating point immediate"); | |||
| 3273 | return MatchOperand_ParseFail; | |||
| 3274 | } | |||
| 3275 | ||||
| 3276 | // Parse hexadecimal representation. | |||
| 3277 | if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) { | |||
| 3278 | if (Tok.getIntVal() > 255 || isNegative) { | |||
| 3279 | TokError("encoded floating point value out of range"); | |||
| 3280 | return MatchOperand_ParseFail; | |||
| 3281 | } | |||
| 3282 | ||||
| 3283 | APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal())); | |||
| 3284 | Operands.push_back( | |||
| 3285 | AArch64Operand::CreateFPImm(F, true, S, getContext())); | |||
| 3286 | } else { | |||
| 3287 | // Parse FP representation. | |||
| 3288 | APFloat RealVal(APFloat::IEEEdouble()); | |||
| 3289 | auto StatusOrErr = | |||
| 3290 | RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); | |||
| 3291 | if (errorToBool(StatusOrErr.takeError())) { | |||
| 3292 | TokError("invalid floating point representation"); | |||
| 3293 | return MatchOperand_ParseFail; | |||
| 3294 | } | |||
| 3295 | ||||
| 3296 | if (isNegative) | |||
| 3297 | RealVal.changeSign(); | |||
| 3298 | ||||
| 3299 | if (AddFPZeroAsLiteral && RealVal.isPosZero()) { | |||
| 3300 | Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext())); | |||
| 3301 | Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext())); | |||
| 3302 | } else | |||
| 3303 | Operands.push_back(AArch64Operand::CreateFPImm( | |||
| 3304 | RealVal, *StatusOrErr == APFloat::opOK, S, getContext())); | |||
| 3305 | } | |||
| 3306 | ||||
| 3307 | Lex(); // Eat the token. | |||
| 3308 | ||||
| 3309 | return MatchOperand_Success; | |||
| 3310 | } | |||
| 3311 | ||||
| 3312 | /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with | |||
| 3313 | /// a shift suffix, for example '#1, lsl #12'. | |||
| 3314 | OperandMatchResultTy | |||
| 3315 | AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { | |||
| 3316 | SMLoc S = getLoc(); | |||
| 3317 | ||||
| 3318 | if (getTok().is(AsmToken::Hash)) | |||
| 3319 | Lex(); // Eat '#' | |||
| 3320 | else if (getTok().isNot(AsmToken::Integer)) | |||
| 3321 | // Operand should start from # or should be integer, emit error otherwise. | |||
| 3322 | return MatchOperand_NoMatch; | |||
| 3323 | ||||
| 3324 | if (getTok().is(AsmToken::Integer) && | |||
| 3325 | getLexer().peekTok().is(AsmToken::Colon)) | |||
| 3326 | return tryParseImmRange(Operands); | |||
| 3327 | ||||
| 3328 | const MCExpr *Imm = nullptr; | |||
| 3329 | if (parseSymbolicImmVal(Imm)) | |||
| 3330 | return MatchOperand_ParseFail; | |||
| 3331 | else if (getTok().isNot(AsmToken::Comma)) { | |||
| 3332 | Operands.push_back( | |||
| 3333 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); | |||
| 3334 | return MatchOperand_Success; | |||
| 3335 | } | |||
| 3336 | ||||
| 3337 | // Eat ',' | |||
| 3338 | Lex(); | |||
| 3339 | StringRef VecGroup; | |||
| 3340 | if (!parseOptionalVGOperand(Operands, VecGroup)) { | |||
| 3341 | Operands.push_back( | |||
| 3342 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); | |||
| 3343 | Operands.push_back( | |||
| 3344 | AArch64Operand::CreateToken(VecGroup, getLoc(), getContext())); | |||
| 3345 | return MatchOperand_Success; | |||
| 3346 | } | |||
| 3347 | ||||
| 3348 | // The optional operand must be "lsl #N" where N is non-negative. | |||
| 3349 | if (!getTok().is(AsmToken::Identifier) || | |||
| 3350 | !getTok().getIdentifier().equals_insensitive("lsl")) { | |||
| 3351 | Error(getLoc(), "only 'lsl #+N' valid after immediate"); | |||
| 3352 | return MatchOperand_ParseFail; | |||
| 3353 | } | |||
| 3354 | ||||
| 3355 | // Eat 'lsl' | |||
| 3356 | Lex(); | |||
| 3357 | ||||
| 3358 | parseOptionalToken(AsmToken::Hash); | |||
| 3359 | ||||
| 3360 | if (getTok().isNot(AsmToken::Integer)) { | |||
| 3361 | Error(getLoc(), "only 'lsl #+N' valid after immediate"); | |||
| 3362 | return MatchOperand_ParseFail; | |||
| 3363 | } | |||
| 3364 | ||||
| 3365 | int64_t ShiftAmount = getTok().getIntVal(); | |||
| 3366 | ||||
| 3367 | if (ShiftAmount < 0) { | |||
| 3368 | Error(getLoc(), "positive shift amount required"); | |||
| 3369 | return MatchOperand_ParseFail; | |||
| 3370 | } | |||
| 3371 | Lex(); // Eat the number | |||
| 3372 | ||||
| 3373 | // Just in case the optional lsl #0 is used for immediates other than zero. | |||
| 3374 | if (ShiftAmount == 0 && Imm != nullptr) { | |||
| 3375 | Operands.push_back( | |||
| 3376 | AArch64Operand::CreateImm(Imm, S, getLoc(), getContext())); | |||
| 3377 | return MatchOperand_Success; | |||
| 3378 | } | |||
| 3379 | ||||
| 3380 | Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, | |||
| 3381 | getLoc(), getContext())); | |||
| 3382 | return MatchOperand_Success; | |||
| 3383 | } | |||
| 3384 | ||||
| 3385 | /// parseCondCodeString - Parse a Condition Code string, optionally returning a | |||
| 3386 | /// suggestion to help common typos. | |||
| 3387 | AArch64CC::CondCode | |||
| 3388 | AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) { | |||
| 3389 | AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) | |||
| 3390 | .Case("eq", AArch64CC::EQ) | |||
| 3391 | .Case("ne", AArch64CC::NE) | |||
| 3392 | .Case("cs", AArch64CC::HS) | |||
| 3393 | .Case("hs", AArch64CC::HS) | |||
| 3394 | .Case("cc", AArch64CC::LO) | |||
| 3395 | .Case("lo", AArch64CC::LO) | |||
| 3396 | .Case("mi", AArch64CC::MI) | |||
| 3397 | .Case("pl", AArch64CC::PL) | |||
| 3398 | .Case("vs", AArch64CC::VS) | |||
| 3399 | .Case("vc", AArch64CC::VC) | |||
| 3400 | .Case("hi", AArch64CC::HI) | |||
| 3401 | .Case("ls", AArch64CC::LS) | |||
| 3402 | .Case("ge", AArch64CC::GE) | |||
| 3403 | .Case("lt", AArch64CC::LT) | |||
| 3404 | .Case("gt", AArch64CC::GT) | |||
| 3405 | .Case("le", AArch64CC::LE) | |||
| 3406 | .Case("al", AArch64CC::AL) | |||
| 3407 | .Case("nv", AArch64CC::NV) | |||
| 3408 | .Default(AArch64CC::Invalid); | |||
| 3409 | ||||
| 3410 | if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) { | |||
| 3411 | CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) | |||
| 3412 | .Case("none", AArch64CC::EQ) | |||
| 3413 | .Case("any", AArch64CC::NE) | |||
| 3414 | .Case("nlast", AArch64CC::HS) | |||
| 3415 | .Case("last", AArch64CC::LO) | |||
| 3416 | .Case("first", AArch64CC::MI) | |||
| 3417 | .Case("nfrst", AArch64CC::PL) | |||
| 3418 | .Case("pmore", AArch64CC::HI) | |||
| 3419 | .Case("plast", AArch64CC::LS) | |||
| 3420 | .Case("tcont", AArch64CC::GE) | |||
| 3421 | .Case("tstop", AArch64CC::LT) | |||
| 3422 | .Default(AArch64CC::Invalid); | |||
| 3423 | ||||
| 3424 | if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst") | |||
| 3425 | Suggestion = "nfrst"; | |||
| 3426 | } | |||
| 3427 | return CC; | |||
| 3428 | } | |||
| 3429 | ||||
| 3430 | /// parseCondCode - Parse a Condition Code operand. | |||
| 3431 | bool AArch64AsmParser::parseCondCode(OperandVector &Operands, | |||
| 3432 | bool invertCondCode) { | |||
| 3433 | SMLoc S = getLoc(); | |||
| 3434 | const AsmToken &Tok = getTok(); | |||
| 3435 | assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier")(static_cast <bool> (Tok.is(AsmToken::Identifier) && "Token is not an Identifier") ? void (0) : __assert_fail ("Tok.is(AsmToken::Identifier) && \"Token is not an Identifier\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3435 , __extension__ __PRETTY_FUNCTION__)); | |||
| 3436 | ||||
| 3437 | StringRef Cond = Tok.getString(); | |||
| 3438 | std::string Suggestion; | |||
| 3439 | AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion); | |||
| 3440 | if (CC == AArch64CC::Invalid) { | |||
| 3441 | std::string Msg = "invalid condition code"; | |||
| 3442 | if (!Suggestion.empty()) | |||
| 3443 | Msg += ", did you mean " + Suggestion + "?"; | |||
| 3444 | return TokError(Msg); | |||
| 3445 | } | |||
| 3446 | Lex(); // Eat identifier token. | |||
| 3447 | ||||
| 3448 | if (invertCondCode) { | |||
| 3449 | if (CC == AArch64CC::AL || CC == AArch64CC::NV) | |||
| 3450 | return TokError("condition codes AL and NV are invalid for this instruction"); | |||
| 3451 | CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC)); | |||
| 3452 | } | |||
| 3453 | ||||
| 3454 | Operands.push_back( | |||
| 3455 | AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext())); | |||
| 3456 | return false; | |||
| 3457 | } | |||
| 3458 | ||||
| 3459 | OperandMatchResultTy | |||
| 3460 | AArch64AsmParser::tryParseSVCR(OperandVector &Operands) { | |||
| 3461 | const AsmToken &Tok = getTok(); | |||
| 3462 | SMLoc S = getLoc(); | |||
| 3463 | ||||
| 3464 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 3465 | TokError("invalid operand for instruction"); | |||
| 3466 | return MatchOperand_ParseFail; | |||
| 3467 | } | |||
| 3468 | ||||
| 3469 | unsigned PStateImm = -1; | |||
| 3470 | const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString()); | |||
| 3471 | if (!SVCR) | |||
| 3472 | return MatchOperand_NoMatch; | |||
| 3473 | if (SVCR->haveFeatures(getSTI().getFeatureBits())) | |||
| 3474 | PStateImm = SVCR->Encoding; | |||
| 3475 | ||||
| 3476 | Operands.push_back( | |||
| 3477 | AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext())); | |||
| 3478 | Lex(); // Eat identifier token. | |||
| 3479 | return MatchOperand_Success; | |||
| 3480 | } | |||
| 3481 | ||||
| 3482 | OperandMatchResultTy | |||
| 3483 | AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) { | |||
| 3484 | const AsmToken &Tok = getTok(); | |||
| 3485 | SMLoc S = getLoc(); | |||
| 3486 | ||||
| 3487 | StringRef Name = Tok.getString(); | |||
| 3488 | ||||
| 3489 | if (Name.equals_insensitive("za") || Name.startswith_insensitive("za.")) { | |||
| 3490 | Lex(); // eat "za[.(b|h|s|d)]" | |||
| 3491 | unsigned ElementWidth = 0; | |||
| 3492 | auto DotPosition = Name.find('.'); | |||
| 3493 | if (DotPosition != StringRef::npos) { | |||
| 3494 | const auto &KindRes = | |||
| 3495 | parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix); | |||
| 3496 | if (!KindRes) { | |||
| 3497 | TokError( | |||
| 3498 | "Expected the register to be followed by element width suffix"); | |||
| 3499 | return MatchOperand_ParseFail; | |||
| 3500 | } | |||
| 3501 | ElementWidth = KindRes->second; | |||
| 3502 | } | |||
| 3503 | Operands.push_back(AArch64Operand::CreateMatrixRegister( | |||
| 3504 | AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(), | |||
| 3505 | getContext())); | |||
| 3506 | if (getLexer().is(AsmToken::LBrac)) { | |||
| 3507 | // There's no comma after matrix operand, so we can parse the next operand | |||
| 3508 | // immediately. | |||
| 3509 | if (parseOperand(Operands, false, false)) | |||
| 3510 | return MatchOperand_NoMatch; | |||
| 3511 | } | |||
| 3512 | return MatchOperand_Success; | |||
| 3513 | } | |||
| 3514 | ||||
| 3515 | // Try to parse matrix register. | |||
| 3516 | unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix); | |||
| 3517 | if (!Reg) | |||
| 3518 | return MatchOperand_NoMatch; | |||
| 3519 | ||||
| 3520 | size_t DotPosition = Name.find('.'); | |||
| 3521 | assert(DotPosition != StringRef::npos && "Unexpected register")(static_cast <bool> (DotPosition != StringRef::npos && "Unexpected register") ? void (0) : __assert_fail ("DotPosition != StringRef::npos && \"Unexpected register\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3521 , __extension__ __PRETTY_FUNCTION__)); | |||
| 3522 | ||||
| 3523 | StringRef Head = Name.take_front(DotPosition); | |||
| 3524 | StringRef Tail = Name.drop_front(DotPosition); | |||
| 3525 | StringRef RowOrColumn = Head.take_back(); | |||
| 3526 | ||||
| 3527 | MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower()) | |||
| 3528 | .Case("h", MatrixKind::Row) | |||
| 3529 | .Case("v", MatrixKind::Col) | |||
| 3530 | .Default(MatrixKind::Tile); | |||
| 3531 | ||||
| 3532 | // Next up, parsing the suffix | |||
| 3533 | const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix); | |||
| 3534 | if (!KindRes) { | |||
| 3535 | TokError("Expected the register to be followed by element width suffix"); | |||
| 3536 | return MatchOperand_ParseFail; | |||
| 3537 | } | |||
| 3538 | unsigned ElementWidth = KindRes->second; | |||
| 3539 | ||||
| 3540 | Lex(); | |||
| 3541 | ||||
| 3542 | Operands.push_back(AArch64Operand::CreateMatrixRegister( | |||
| 3543 | Reg, ElementWidth, Kind, S, getLoc(), getContext())); | |||
| 3544 | ||||
| 3545 | if (getLexer().is(AsmToken::LBrac)) { | |||
| 3546 | // There's no comma after matrix operand, so we can parse the next operand | |||
| 3547 | // immediately. | |||
| 3548 | if (parseOperand(Operands, false, false)) | |||
| 3549 | return MatchOperand_NoMatch; | |||
| 3550 | } | |||
| 3551 | return MatchOperand_Success; | |||
| 3552 | } | |||
| 3553 | ||||
| 3554 | /// tryParseOptionalShift - Some operands take an optional shift argument. Parse | |||
| 3555 | /// them if present. | |||
| 3556 | OperandMatchResultTy | |||
| 3557 | AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { | |||
| 3558 | const AsmToken &Tok = getTok(); | |||
| 3559 | std::string LowerID = Tok.getString().lower(); | |||
| 3560 | AArch64_AM::ShiftExtendType ShOp = | |||
| 3561 | StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) | |||
| 3562 | .Case("lsl", AArch64_AM::LSL) | |||
| 3563 | .Case("lsr", AArch64_AM::LSR) | |||
| 3564 | .Case("asr", AArch64_AM::ASR) | |||
| 3565 | .Case("ror", AArch64_AM::ROR) | |||
| 3566 | .Case("msl", AArch64_AM::MSL) | |||
| 3567 | .Case("uxtb", AArch64_AM::UXTB) | |||
| 3568 | .Case("uxth", AArch64_AM::UXTH) | |||
| 3569 | .Case("uxtw", AArch64_AM::UXTW) | |||
| 3570 | .Case("uxtx", AArch64_AM::UXTX) | |||
| 3571 | .Case("sxtb", AArch64_AM::SXTB) | |||
| 3572 | .Case("sxth", AArch64_AM::SXTH) | |||
| 3573 | .Case("sxtw", AArch64_AM::SXTW) | |||
| 3574 | .Case("sxtx", AArch64_AM::SXTX) | |||
| 3575 | .Default(AArch64_AM::InvalidShiftExtend); | |||
| 3576 | ||||
| 3577 | if (ShOp == AArch64_AM::InvalidShiftExtend) | |||
| 3578 | return MatchOperand_NoMatch; | |||
| 3579 | ||||
| 3580 | SMLoc S = Tok.getLoc(); | |||
| 3581 | Lex(); | |||
| 3582 | ||||
| 3583 | bool Hash = parseOptionalToken(AsmToken::Hash); | |||
| 3584 | ||||
| 3585 | if (!Hash && getLexer().isNot(AsmToken::Integer)) { | |||
| 3586 | if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || | |||
| 3587 | ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || | |||
| 3588 | ShOp == AArch64_AM::MSL) { | |||
| 3589 | // We expect a number here. | |||
| 3590 | TokError("expected #imm after shift specifier"); | |||
| 3591 | return MatchOperand_ParseFail; | |||
| 3592 | } | |||
| 3593 | ||||
| 3594 | // "extend" type operations don't need an immediate, #0 is implicit. | |||
| 3595 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 3596 | Operands.push_back( | |||
| 3597 | AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext())); | |||
| 3598 | return MatchOperand_Success; | |||
| 3599 | } | |||
| 3600 | ||||
| 3601 | // Make sure we do actually have a number, identifier or a parenthesized | |||
| 3602 | // expression. | |||
| 3603 | SMLoc E = getLoc(); | |||
| 3604 | if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) && | |||
| 3605 | !getTok().is(AsmToken::Identifier)) { | |||
| 3606 | Error(E, "expected integer shift amount"); | |||
| 3607 | return MatchOperand_ParseFail; | |||
| 3608 | } | |||
| 3609 | ||||
| 3610 | const MCExpr *ImmVal; | |||
| 3611 | if (getParser().parseExpression(ImmVal)) | |||
| 3612 | return MatchOperand_ParseFail; | |||
| 3613 | ||||
| 3614 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 3615 | if (!MCE) { | |||
| 3616 | Error(E, "expected constant '#imm' after shift specifier"); | |||
| 3617 | return MatchOperand_ParseFail; | |||
| 3618 | } | |||
| 3619 | ||||
| 3620 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 3621 | Operands.push_back(AArch64Operand::CreateShiftExtend( | |||
| 3622 | ShOp, MCE->getValue(), true, S, E, getContext())); | |||
| 3623 | return MatchOperand_Success; | |||
| 3624 | } | |||
| 3625 | ||||
| 3626 | static const struct Extension { | |||
| 3627 | const char *Name; | |||
| 3628 | const FeatureBitset Features; | |||
| 3629 | } ExtensionMap[] = { | |||
| 3630 | {"crc", {AArch64::FeatureCRC}}, | |||
| 3631 | {"sm4", {AArch64::FeatureSM4}}, | |||
| 3632 | {"sha3", {AArch64::FeatureSHA3}}, | |||
| 3633 | {"sha2", {AArch64::FeatureSHA2}}, | |||
| 3634 | {"aes", {AArch64::FeatureAES}}, | |||
| 3635 | {"crypto", {AArch64::FeatureCrypto}}, | |||
| 3636 | {"fp", {AArch64::FeatureFPARMv8}}, | |||
| 3637 | {"simd", {AArch64::FeatureNEON}}, | |||
| 3638 | {"ras", {AArch64::FeatureRAS}}, | |||
| 3639 | {"rasv2", {AArch64::FeatureRASv2}}, | |||
| 3640 | {"lse", {AArch64::FeatureLSE}}, | |||
| 3641 | {"predres", {AArch64::FeaturePredRes}}, | |||
| 3642 | {"predres2", {AArch64::FeatureSPECRES2}}, | |||
| 3643 | {"ccdp", {AArch64::FeatureCacheDeepPersist}}, | |||
| 3644 | {"mte", {AArch64::FeatureMTE}}, | |||
| 3645 | {"memtag", {AArch64::FeatureMTE}}, | |||
| 3646 | {"tlb-rmi", {AArch64::FeatureTLB_RMI}}, | |||
| 3647 | {"pan", {AArch64::FeaturePAN}}, | |||
| 3648 | {"pan-rwv", {AArch64::FeaturePAN_RWV}}, | |||
| 3649 | {"ccpp", {AArch64::FeatureCCPP}}, | |||
| 3650 | {"rcpc", {AArch64::FeatureRCPC}}, | |||
| 3651 | {"rng", {AArch64::FeatureRandGen}}, | |||
| 3652 | {"sve", {AArch64::FeatureSVE}}, | |||
| 3653 | {"sve2", {AArch64::FeatureSVE2}}, | |||
| 3654 | {"sve2-aes", {AArch64::FeatureSVE2AES}}, | |||
| 3655 | {"sve2-sm4", {AArch64::FeatureSVE2SM4}}, | |||
| 3656 | {"sve2-sha3", {AArch64::FeatureSVE2SHA3}}, | |||
| 3657 | {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}}, | |||
| 3658 | {"sve2p1", {AArch64::FeatureSVE2p1}}, | |||
| 3659 | {"b16b16", {AArch64::FeatureB16B16}}, | |||
| 3660 | {"ls64", {AArch64::FeatureLS64}}, | |||
| 3661 | {"xs", {AArch64::FeatureXS}}, | |||
| 3662 | {"pauth", {AArch64::FeaturePAuth}}, | |||
| 3663 | {"flagm", {AArch64::FeatureFlagM}}, | |||
| 3664 | {"rme", {AArch64::FeatureRME}}, | |||
| 3665 | {"sme", {AArch64::FeatureSME}}, | |||
| 3666 | {"sme-f64f64", {AArch64::FeatureSMEF64F64}}, | |||
| 3667 | {"sme-f16f16", {AArch64::FeatureSMEF16F16}}, | |||
| 3668 | {"sme-i16i64", {AArch64::FeatureSMEI16I64}}, | |||
| 3669 | {"sme2", {AArch64::FeatureSME2}}, | |||
| 3670 | {"sme2p1", {AArch64::FeatureSME2p1}}, | |||
| 3671 | {"hbc", {AArch64::FeatureHBC}}, | |||
| 3672 | {"mops", {AArch64::FeatureMOPS}}, | |||
| 3673 | {"mec", {AArch64::FeatureMEC}}, | |||
| 3674 | {"the", {AArch64::FeatureTHE}}, | |||
| 3675 | {"d128", {AArch64::FeatureD128}}, | |||
| 3676 | {"lse128", {AArch64::FeatureLSE128}}, | |||
| 3677 | {"ite", {AArch64::FeatureITE}}, | |||
| 3678 | {"cssc", {AArch64::FeatureCSSC}}, | |||
| 3679 | {"rcpc3", {AArch64::FeatureRCPC3}}, | |||
| 3680 | {"gcs", {AArch64::FeatureGCS}}, | |||
| 3681 | // FIXME: Unsupported extensions | |||
| 3682 | {"lor", {}}, | |||
| 3683 | {"rdma", {}}, | |||
| 3684 | {"profile", {}}, | |||
| 3685 | }; | |||
| 3686 | ||||
| 3687 | static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { | |||
| 3688 | if (FBS[AArch64::HasV8_0aOps]) | |||
| 3689 | Str += "ARMv8a"; | |||
| 3690 | if (FBS[AArch64::HasV8_1aOps]) | |||
| 3691 | Str += "ARMv8.1a"; | |||
| 3692 | else if (FBS[AArch64::HasV8_2aOps]) | |||
| 3693 | Str += "ARMv8.2a"; | |||
| 3694 | else if (FBS[AArch64::HasV8_3aOps]) | |||
| 3695 | Str += "ARMv8.3a"; | |||
| 3696 | else if (FBS[AArch64::HasV8_4aOps]) | |||
| 3697 | Str += "ARMv8.4a"; | |||
| 3698 | else if (FBS[AArch64::HasV8_5aOps]) | |||
| 3699 | Str += "ARMv8.5a"; | |||
| 3700 | else if (FBS[AArch64::HasV8_6aOps]) | |||
| 3701 | Str += "ARMv8.6a"; | |||
| 3702 | else if (FBS[AArch64::HasV8_7aOps]) | |||
| 3703 | Str += "ARMv8.7a"; | |||
| 3704 | else if (FBS[AArch64::HasV8_8aOps]) | |||
| 3705 | Str += "ARMv8.8a"; | |||
| 3706 | else if (FBS[AArch64::HasV8_9aOps]) | |||
| 3707 | Str += "ARMv8.9a"; | |||
| 3708 | else if (FBS[AArch64::HasV9_0aOps]) | |||
| 3709 | Str += "ARMv9-a"; | |||
| 3710 | else if (FBS[AArch64::HasV9_1aOps]) | |||
| 3711 | Str += "ARMv9.1a"; | |||
| 3712 | else if (FBS[AArch64::HasV9_2aOps]) | |||
| 3713 | Str += "ARMv9.2a"; | |||
| 3714 | else if (FBS[AArch64::HasV9_3aOps]) | |||
| 3715 | Str += "ARMv9.3a"; | |||
| 3716 | else if (FBS[AArch64::HasV9_4aOps]) | |||
| 3717 | Str += "ARMv9.4a"; | |||
| 3718 | else if (FBS[AArch64::HasV8_0rOps]) | |||
| 3719 | Str += "ARMv8r"; | |||
| 3720 | else { | |||
| 3721 | SmallVector<std::string, 2> ExtMatches; | |||
| 3722 | for (const auto& Ext : ExtensionMap) { | |||
| 3723 | // Use & in case multiple features are enabled | |||
| 3724 | if ((FBS & Ext.Features) != FeatureBitset()) | |||
| 3725 | ExtMatches.push_back(Ext.Name); | |||
| 3726 | } | |||
| 3727 | Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)"; | |||
| 3728 | } | |||
| 3729 | } | |||
| 3730 | ||||
| 3731 | void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands, | |||
| 3732 | SMLoc S) { | |||
| 3733 | const uint16_t Op2 = Encoding & 7; | |||
| 3734 | const uint16_t Cm = (Encoding & 0x78) >> 3; | |||
| 3735 | const uint16_t Cn = (Encoding & 0x780) >> 7; | |||
| 3736 | const uint16_t Op1 = (Encoding & 0x3800) >> 11; | |||
| 3737 | ||||
| 3738 | const MCExpr *Expr = MCConstantExpr::create(Op1, getContext()); | |||
| 3739 | ||||
| 3740 | Operands.push_back( | |||
| 3741 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); | |||
| 3742 | Operands.push_back( | |||
| 3743 | AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); | |||
| 3744 | Operands.push_back( | |||
| 3745 | AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); | |||
| 3746 | Expr = MCConstantExpr::create(Op2, getContext()); | |||
| 3747 | Operands.push_back( | |||
| 3748 | AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); | |||
| 3749 | } | |||
| 3750 | ||||
| 3751 | /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for | |||
| 3752 | /// the SYS instruction. Parse them specially so that we create a SYS MCInst. | |||
| 3753 | bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, | |||
| 3754 | OperandVector &Operands) { | |||
| 3755 | if (Name.contains('.')) | |||
| 3756 | return TokError("invalid operand"); | |||
| 3757 | ||||
| 3758 | Mnemonic = Name; | |||
| 3759 | Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext())); | |||
| 3760 | ||||
| 3761 | const AsmToken &Tok = getTok(); | |||
| 3762 | StringRef Op = Tok.getString(); | |||
| 3763 | SMLoc S = Tok.getLoc(); | |||
| 3764 | ||||
| 3765 | if (Mnemonic == "ic") { | |||
| 3766 | const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op); | |||
| 3767 | if (!IC) | |||
| 3768 | return TokError("invalid operand for IC instruction"); | |||
| 3769 | else if (!IC->haveFeatures(getSTI().getFeatureBits())) { | |||
| 3770 | std::string Str("IC " + std::string(IC->Name) + " requires: "); | |||
| 3771 | setRequiredFeatureString(IC->getRequiredFeatures(), Str); | |||
| 3772 | return TokError(Str); | |||
| 3773 | } | |||
| 3774 | createSysAlias(IC->Encoding, Operands, S); | |||
| 3775 | } else if (Mnemonic == "dc") { | |||
| 3776 | const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op); | |||
| 3777 | if (!DC) | |||
| 3778 | return TokError("invalid operand for DC instruction"); | |||
| 3779 | else if (!DC->haveFeatures(getSTI().getFeatureBits())) { | |||
| 3780 | std::string Str("DC " + std::string(DC->Name) + " requires: "); | |||
| 3781 | setRequiredFeatureString(DC->getRequiredFeatures(), Str); | |||
| 3782 | return TokError(Str); | |||
| 3783 | } | |||
| 3784 | createSysAlias(DC->Encoding, Operands, S); | |||
| 3785 | } else if (Mnemonic == "at") { | |||
| 3786 | const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op); | |||
| 3787 | if (!AT) | |||
| 3788 | return TokError("invalid operand for AT instruction"); | |||
| 3789 | else if (!AT->haveFeatures(getSTI().getFeatureBits())) { | |||
| 3790 | std::string Str("AT " + std::string(AT->Name) + " requires: "); | |||
| 3791 | setRequiredFeatureString(AT->getRequiredFeatures(), Str); | |||
| 3792 | return TokError(Str); | |||
| 3793 | } | |||
| 3794 | createSysAlias(AT->Encoding, Operands, S); | |||
| 3795 | } else if (Mnemonic == "tlbi") { | |||
| 3796 | const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op); | |||
| 3797 | if (!TLBI) | |||
| 3798 | return TokError("invalid operand for TLBI instruction"); | |||
| 3799 | else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) { | |||
| 3800 | std::string Str("TLBI " + std::string(TLBI->Name) + " requires: "); | |||
| 3801 | setRequiredFeatureString(TLBI->getRequiredFeatures(), Str); | |||
| 3802 | return TokError(Str); | |||
| 3803 | } | |||
| 3804 | createSysAlias(TLBI->Encoding, Operands, S); | |||
| 3805 | } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") { | |||
| 3806 | ||||
| 3807 | if (Op.lower() != "rctx") | |||
| 3808 | return TokError("invalid operand for prediction restriction instruction"); | |||
| 3809 | ||||
| 3810 | bool hasAll = getSTI().hasFeature(AArch64::FeatureAll); | |||
| 3811 | bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes); | |||
| 3812 | bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2); | |||
| 3813 | ||||
| 3814 | if (Mnemonic == "cosp" && !hasSpecres2) | |||
| 3815 | return TokError("COSP requires: predres2"); | |||
| 3816 | if (!hasPredres) | |||
| 3817 | return TokError(Mnemonic.upper() + "RCTX requires: predres"); | |||
| 3818 | ||||
| 3819 | uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100 | |||
| 3820 | : Mnemonic == "dvp" ? 0b101 | |||
| 3821 | : Mnemonic == "cosp" ? 0b110 | |||
| 3822 | : Mnemonic == "cpp" ? 0b111 | |||
| 3823 | : 0; | |||
| 3824 | assert(PRCTX_Op2 &&(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction" ) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3825 , __extension__ __PRETTY_FUNCTION__)) | |||
| 3825 | "Invalid mnemonic for prediction restriction instruction")(static_cast <bool> (PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction" ) ? void (0) : __assert_fail ("PRCTX_Op2 && \"Invalid mnemonic for prediction restriction instruction\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3825 , __extension__ __PRETTY_FUNCTION__)); | |||
| 3826 | const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3 | |||
| 3827 | const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2; | |||
| 3828 | ||||
| 3829 | createSysAlias(Encoding, Operands, S); | |||
| 3830 | } | |||
| 3831 | ||||
| 3832 | Lex(); // Eat operand. | |||
| 3833 | ||||
| 3834 | bool ExpectRegister = (Op.lower().find("all") == StringRef::npos); | |||
| 3835 | bool HasRegister = false; | |||
| 3836 | ||||
| 3837 | // Check for the optional register operand. | |||
| 3838 | if (parseOptionalToken(AsmToken::Comma)) { | |||
| 3839 | if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands)) | |||
| 3840 | return TokError("expected register operand"); | |||
| 3841 | HasRegister = true; | |||
| 3842 | } | |||
| 3843 | ||||
| 3844 | if (ExpectRegister && !HasRegister) | |||
| 3845 | return TokError("specified " + Mnemonic + " op requires a register"); | |||
| 3846 | else if (!ExpectRegister && HasRegister) | |||
| 3847 | return TokError("specified " + Mnemonic + " op does not use a register"); | |||
| 3848 | ||||
| 3849 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
| 3850 | return true; | |||
| 3851 | ||||
| 3852 | return false; | |||
| 3853 | } | |||
| 3854 | ||||
| 3855 | /// parseSyspAlias - The TLBIP instructions are simple aliases for | |||
| 3856 | /// the SYSP instruction. Parse them specially so that we create a SYSP MCInst. | |||
| 3857 | bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc, | |||
| 3858 | OperandVector &Operands) { | |||
| 3859 | if (Name.contains('.')) | |||
| 3860 | return TokError("invalid operand"); | |||
| 3861 | ||||
| 3862 | Mnemonic = Name; | |||
| 3863 | Operands.push_back( | |||
| 3864 | AArch64Operand::CreateToken("sysp", NameLoc, getContext())); | |||
| 3865 | ||||
| 3866 | const AsmToken &Tok = getTok(); | |||
| 3867 | StringRef Op = Tok.getString(); | |||
| 3868 | SMLoc S = Tok.getLoc(); | |||
| 3869 | ||||
| 3870 | if (Mnemonic == "tlbip") { | |||
| 3871 | bool HasnXSQualifier = Op.endswith_insensitive("nXS"); | |||
| 3872 | if (HasnXSQualifier) { | |||
| 3873 | Op = Op.drop_back(3); | |||
| 3874 | } | |||
| 3875 | const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op); | |||
| 3876 | if (!TLBIorig) | |||
| 3877 | return TokError("invalid operand for TLBIP instruction"); | |||
| 3878 | const AArch64TLBI::TLBI TLBI( | |||
| 3879 | TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0), | |||
| 3880 | TLBIorig->NeedsReg, | |||
| 3881 | HasnXSQualifier | |||
| 3882 | ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS}) | |||
| 3883 | : TLBIorig->FeaturesRequired); | |||
| 3884 | if (!TLBI.haveFeatures(getSTI().getFeatureBits())) { | |||
| 3885 | std::string Name = | |||
| 3886 | std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : ""); | |||
| 3887 | std::string Str("TLBIP " + Name + " requires: "); | |||
| 3888 | setRequiredFeatureString(TLBI.getRequiredFeatures(), Str); | |||
| 3889 | return TokError(Str); | |||
| 3890 | } | |||
| 3891 | createSysAlias(TLBI.Encoding, Operands, S); | |||
| 3892 | } | |||
| 3893 | ||||
| 3894 | Lex(); // Eat operand. | |||
| 3895 | ||||
| 3896 | if (parseComma()) | |||
| 3897 | return true; | |||
| 3898 | ||||
| 3899 | if (Tok.isNot(AsmToken::Identifier)) | |||
| 3900 | return TokError("expected register identifier"); | |||
| 3901 | auto Result = tryParseSyspXzrPair(Operands); | |||
| 3902 | if (Result == MatchOperand_NoMatch) | |||
| 3903 | Result = tryParseGPRSeqPair(Operands); | |||
| 3904 | if (Result != MatchOperand_Success) | |||
| 3905 | return TokError("specified " + Mnemonic + | |||
| 3906 | " op requires a pair of registers"); | |||
| 3907 | ||||
| 3908 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
| 3909 | return true; | |||
| 3910 | ||||
| 3911 | return false; | |||
| 3912 | } | |||
| 3913 | ||||
| 3914 | OperandMatchResultTy | |||
| 3915 | AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { | |||
| 3916 | MCAsmParser &Parser = getParser(); | |||
| 3917 | const AsmToken &Tok = getTok(); | |||
| 3918 | ||||
| 3919 | if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) { | |||
| 3920 | TokError("'csync' operand expected"); | |||
| 3921 | return MatchOperand_ParseFail; | |||
| 3922 | } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { | |||
| 3923 | // Immediate operand. | |||
| 3924 | const MCExpr *ImmVal; | |||
| 3925 | SMLoc ExprLoc = getLoc(); | |||
| 3926 | AsmToken IntTok = Tok; | |||
| 3927 | if (getParser().parseExpression(ImmVal)) | |||
| 3928 | return MatchOperand_ParseFail; | |||
| 3929 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 3930 | if (!MCE) { | |||
| 3931 | Error(ExprLoc, "immediate value expected for barrier operand"); | |||
| 3932 | return MatchOperand_ParseFail; | |||
| 3933 | } | |||
| 3934 | int64_t Value = MCE->getValue(); | |||
| 3935 | if (Mnemonic == "dsb" && Value > 15) { | |||
| 3936 | // This case is a no match here, but it might be matched by the nXS | |||
| 3937 | // variant. Deliberately not unlex the optional '#' as it is not necessary | |||
| 3938 | // to characterize an integer immediate. | |||
| 3939 | Parser.getLexer().UnLex(IntTok); | |||
| 3940 | return MatchOperand_NoMatch; | |||
| 3941 | } | |||
| 3942 | if (Value < 0 || Value > 15) { | |||
| 3943 | Error(ExprLoc, "barrier operand out of range"); | |||
| 3944 | return MatchOperand_ParseFail; | |||
| 3945 | } | |||
| 3946 | auto DB = AArch64DB::lookupDBByEncoding(Value); | |||
| 3947 | Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "", | |||
| 3948 | ExprLoc, getContext(), | |||
| 3949 | false /*hasnXSModifier*/)); | |||
| 3950 | return MatchOperand_Success; | |||
| 3951 | } | |||
| 3952 | ||||
| 3953 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 3954 | TokError("invalid operand for instruction"); | |||
| 3955 | return MatchOperand_ParseFail; | |||
| 3956 | } | |||
| 3957 | ||||
| 3958 | StringRef Operand = Tok.getString(); | |||
| 3959 | auto TSB = AArch64TSB::lookupTSBByName(Operand); | |||
| 3960 | auto DB = AArch64DB::lookupDBByName(Operand); | |||
| 3961 | // The only valid named option for ISB is 'sy' | |||
| 3962 | if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) { | |||
| 3963 | TokError("'sy' or #imm operand expected"); | |||
| 3964 | return MatchOperand_ParseFail; | |||
| 3965 | // The only valid named option for TSB is 'csync' | |||
| 3966 | } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) { | |||
| 3967 | TokError("'csync' operand expected"); | |||
| 3968 | return MatchOperand_ParseFail; | |||
| 3969 | } else if (!DB && !TSB) { | |||
| 3970 | if (Mnemonic == "dsb") { | |||
| 3971 | // This case is a no match here, but it might be matched by the nXS | |||
| 3972 | // variant. | |||
| 3973 | return MatchOperand_NoMatch; | |||
| 3974 | } | |||
| 3975 | TokError("invalid barrier option name"); | |||
| 3976 | return MatchOperand_ParseFail; | |||
| 3977 | } | |||
| 3978 | ||||
| 3979 | Operands.push_back(AArch64Operand::CreateBarrier( | |||
| 3980 | DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), | |||
| 3981 | getContext(), false /*hasnXSModifier*/)); | |||
| 3982 | Lex(); // Consume the option | |||
| 3983 | ||||
| 3984 | return MatchOperand_Success; | |||
| 3985 | } | |||
| 3986 | ||||
| 3987 | OperandMatchResultTy | |||
| 3988 | AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) { | |||
| 3989 | const AsmToken &Tok = getTok(); | |||
| 3990 | ||||
| 3991 | assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands")(static_cast <bool> (Mnemonic == "dsb" && "Instruction does not accept nXS operands" ) ? void (0) : __assert_fail ("Mnemonic == \"dsb\" && \"Instruction does not accept nXS operands\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 3991 , __extension__ __PRETTY_FUNCTION__)); | |||
| 3992 | if (Mnemonic != "dsb") | |||
| 3993 | return MatchOperand_ParseFail; | |||
| 3994 | ||||
| 3995 | if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { | |||
| 3996 | // Immediate operand. | |||
| 3997 | const MCExpr *ImmVal; | |||
| 3998 | SMLoc ExprLoc = getLoc(); | |||
| 3999 | if (getParser().parseExpression(ImmVal)) | |||
| 4000 | return MatchOperand_ParseFail; | |||
| 4001 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 4002 | if (!MCE) { | |||
| 4003 | Error(ExprLoc, "immediate value expected for barrier operand"); | |||
| 4004 | return MatchOperand_ParseFail; | |||
| 4005 | } | |||
| 4006 | int64_t Value = MCE->getValue(); | |||
| 4007 | // v8.7-A DSB in the nXS variant accepts only the following immediate | |||
| 4008 | // values: 16, 20, 24, 28. | |||
| 4009 | if (Value != 16 && Value != 20 && Value != 24 && Value != 28) { | |||
| 4010 | Error(ExprLoc, "barrier operand out of range"); | |||
| 4011 | return MatchOperand_ParseFail; | |||
| 4012 | } | |||
| 4013 | auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value); | |||
| 4014 | Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name, | |||
| 4015 | ExprLoc, getContext(), | |||
| 4016 | true /*hasnXSModifier*/)); | |||
| 4017 | return MatchOperand_Success; | |||
| 4018 | } | |||
| 4019 | ||||
| 4020 | if (Tok.isNot(AsmToken::Identifier)) { | |||
| 4021 | TokError("invalid operand for instruction"); | |||
| 4022 | return MatchOperand_ParseFail; | |||
| 4023 | } | |||
| 4024 | ||||
| 4025 | StringRef Operand = Tok.getString(); | |||
| 4026 | auto DB = AArch64DBnXS::lookupDBnXSByName(Operand); | |||
| 4027 | ||||
| 4028 | if (!DB) { | |||
| 4029 | TokError("invalid barrier option name"); | |||
| 4030 | return MatchOperand_ParseFail; | |||
| 4031 | } | |||
| 4032 | ||||
| 4033 | Operands.push_back( | |||
| 4034 | AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(), | |||
| 4035 | getContext(), true /*hasnXSModifier*/)); | |||
| 4036 | Lex(); // Consume the option | |||
| 4037 | ||||
| 4038 | return MatchOperand_Success; | |||
| 4039 | } | |||
| 4040 | ||||
| 4041 | OperandMatchResultTy | |||
| 4042 | AArch64AsmParser::tryParseSysReg(OperandVector &Operands) { | |||
| 4043 | const AsmToken &Tok = getTok(); | |||
| 4044 | ||||
| 4045 | if (Tok.isNot(AsmToken::Identifier)) | |||
| 4046 | return MatchOperand_NoMatch; | |||
| 4047 | ||||
| 4048 | if (AArch64SVCR::lookupSVCRByName(Tok.getString())) | |||
| 4049 | return MatchOperand_NoMatch; | |||
| 4050 | ||||
| 4051 | int MRSReg, MSRReg; | |||
| 4052 | auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString()); | |||
| 4053 | if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) { | |||
| 4054 | MRSReg = SysReg->Readable ? SysReg->Encoding : -1; | |||
| 4055 | MSRReg = SysReg->Writeable ? SysReg->Encoding : -1; | |||
| 4056 | } else | |||
| 4057 | MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString()); | |||
| 4058 | ||||
| 4059 | unsigned PStateImm = -1; | |||
| 4060 | auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString()); | |||
| 4061 | if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits())) | |||
| 4062 | PStateImm = PState15->Encoding; | |||
| 4063 | if (!PState15) { | |||
| 4064 | auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString()); | |||
| 4065 | if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits())) | |||
| 4066 | PStateImm = PState1->Encoding; | |||
| 4067 | } | |||
| 4068 | ||||
| 4069 | Operands.push_back( | |||
| 4070 | AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg, | |||
| 4071 | PStateImm, getContext())); | |||
| 4072 | Lex(); // Eat identifier | |||
| 4073 | ||||
| 4074 | return MatchOperand_Success; | |||
| 4075 | } | |||
| 4076 | ||||
| 4077 | /// tryParseNeonVectorRegister - Parse a vector register operand. | |||
| 4078 | bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { | |||
| 4079 | if (getTok().isNot(AsmToken::Identifier)) | |||
| 4080 | return true; | |||
| 4081 | ||||
| 4082 | SMLoc S = getLoc(); | |||
| 4083 | // Check for a vector register specifier first. | |||
| 4084 | StringRef Kind; | |||
| 4085 | MCRegister Reg; | |||
| 4086 | OperandMatchResultTy Res = | |||
| 4087 | tryParseVectorRegister(Reg, Kind, RegKind::NeonVector); | |||
| 4088 | if (Res != MatchOperand_Success) | |||
| 4089 | return true; | |||
| 4090 | ||||
| 4091 | const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector); | |||
| 4092 | if (!KindRes) | |||
| 4093 | return true; | |||
| 4094 | ||||
| 4095 | unsigned ElementWidth = KindRes->second; | |||
| 4096 | Operands.push_back( | |||
| 4097 | AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth, | |||
| 4098 | S, getLoc(), getContext())); | |||
| 4099 | ||||
| 4100 | // If there was an explicit qualifier, that goes on as a literal text | |||
| 4101 | // operand. | |||
| 4102 | if (!Kind.empty()) | |||
| 4103 | Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext())); | |||
| 4104 | ||||
| 4105 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; | |||
| 4106 | } | |||
| 4107 | ||||
| 4108 | OperandMatchResultTy | |||
| 4109 | AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { | |||
| 4110 | SMLoc SIdx = getLoc(); | |||
| 4111 | if (parseOptionalToken(AsmToken::LBrac)) { | |||
| 4112 | const MCExpr *ImmVal; | |||
| 4113 | if (getParser().parseExpression(ImmVal)) | |||
| 4114 | return MatchOperand_NoMatch; | |||
| 4115 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 4116 | if (!MCE) { | |||
| 4117 | TokError("immediate value expected for vector index"); | |||
| 4118 | return MatchOperand_ParseFail;; | |||
| 4119 | } | |||
| 4120 | ||||
| 4121 | SMLoc E = getLoc(); | |||
| 4122 | ||||
| 4123 | if (parseToken(AsmToken::RBrac, "']' expected")) | |||
| 4124 | return MatchOperand_ParseFail;; | |||
| 4125 | ||||
| 4126 | Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, | |||
| 4127 | E, getContext())); | |||
| 4128 | return MatchOperand_Success; | |||
| 4129 | } | |||
| 4130 | ||||
| 4131 | return MatchOperand_NoMatch; | |||
| 4132 | } | |||
| 4133 | ||||
| 4134 | // tryParseVectorRegister - Try to parse a vector register name with | |||
| 4135 | // optional kind specifier. If it is a register specifier, eat the token | |||
| 4136 | // and return it. | |||
| 4137 | OperandMatchResultTy | |||
| 4138 | AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg, StringRef &Kind, | |||
| 4139 | RegKind MatchKind) { | |||
| 4140 | const AsmToken &Tok = getTok(); | |||
| 4141 | ||||
| 4142 | if (Tok.isNot(AsmToken::Identifier)) | |||
| 4143 | return MatchOperand_NoMatch; | |||
| 4144 | ||||
| 4145 | StringRef Name = Tok.getString(); | |||
| 4146 | // If there is a kind specifier, it's separated from the register name by | |||
| 4147 | // a '.'. | |||
| 4148 | size_t Start = 0, Next = Name.find('.'); | |||
| 4149 | StringRef Head = Name.slice(Start, Next); | |||
| 4150 | unsigned RegNum = matchRegisterNameAlias(Head, MatchKind); | |||
| 4151 | ||||
| 4152 | if (RegNum) { | |||
| 4153 | if (Next != StringRef::npos) { | |||
| 4154 | Kind = Name.slice(Next, StringRef::npos); | |||
| 4155 | if (!isValidVectorKind(Kind, MatchKind)) { | |||
| 4156 | TokError("invalid vector kind qualifier"); | |||
| 4157 | return MatchOperand_ParseFail; | |||
| 4158 | } | |||
| 4159 | } | |||
| 4160 | Lex(); // Eat the register token. | |||
| 4161 | ||||
| 4162 | Reg = RegNum; | |||
| 4163 | return MatchOperand_Success; | |||
| 4164 | } | |||
| 4165 | ||||
| 4166 | return MatchOperand_NoMatch; | |||
| 4167 | } | |||
| 4168 | ||||
| 4169 | /// tryParseSVEPredicateVector - Parse a SVE predicate register operand. | |||
| 4170 | template <RegKind RK> OperandMatchResultTy | |||
| 4171 | AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { | |||
| 4172 | // Check for a SVE predicate register specifier first. | |||
| 4173 | const SMLoc S = getLoc(); | |||
| 4174 | StringRef Kind; | |||
| 4175 | MCRegister RegNum; | |||
| 4176 | auto Res = tryParseVectorRegister(RegNum, Kind, RK); | |||
| 4177 | if (Res != MatchOperand_Success) | |||
| 4178 | return Res; | |||
| 4179 | ||||
| 4180 | const auto &KindRes = parseVectorKind(Kind, RK); | |||
| 4181 | if (!KindRes) | |||
| 4182 | return MatchOperand_NoMatch; | |||
| 4183 | ||||
| 4184 | unsigned ElementWidth = KindRes->second; | |||
| 4185 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
| 4186 | RegNum, RK, ElementWidth, S, | |||
| 4187 | getLoc(), getContext())); | |||
| 4188 | ||||
| 4189 | if (getLexer().is(AsmToken::LBrac)) { | |||
| 4190 | if (RK == RegKind::SVEPredicateAsCounter) { | |||
| 4191 | OperandMatchResultTy ResIndex = tryParseVectorIndex(Operands); | |||
| 4192 | if (ResIndex == MatchOperand_Success) | |||
| 4193 | return MatchOperand_Success; | |||
| 4194 | } else { | |||
| 4195 | // Indexed predicate, there's no comma so try parse the next operand | |||
| 4196 | // immediately. | |||
| 4197 | if (parseOperand(Operands, false, false)) | |||
| 4198 | return MatchOperand_NoMatch; | |||
| 4199 | } | |||
| 4200 | } | |||
| 4201 | ||||
| 4202 | // Not all predicates are followed by a '/m' or '/z'. | |||
| 4203 | if (getTok().isNot(AsmToken::Slash)) | |||
| 4204 | return MatchOperand_Success; | |||
| 4205 | ||||
| 4206 | // But when they do they shouldn't have an element type suffix. | |||
| 4207 | if (!Kind.empty()) { | |||
| 4208 | Error(S, "not expecting size suffix"); | |||
| 4209 | return MatchOperand_ParseFail; | |||
| 4210 | } | |||
| 4211 | ||||
| 4212 | // Add a literal slash as operand | |||
| 4213 | Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext())); | |||
| 4214 | ||||
| 4215 | Lex(); // Eat the slash. | |||
| 4216 | ||||
| 4217 | // Zeroing or merging? | |||
| 4218 | auto Pred = getTok().getString().lower(); | |||
| 4219 | if (RK == RegKind::SVEPredicateAsCounter && Pred != "z") { | |||
| 4220 | Error(getLoc(), "expecting 'z' predication"); | |||
| 4221 | return MatchOperand_ParseFail; | |||
| 4222 | } | |||
| 4223 | ||||
| 4224 | if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m") { | |||
| 4225 | Error(getLoc(), "expecting 'm' or 'z' predication"); | |||
| 4226 | return MatchOperand_ParseFail; | |||
| 4227 | } | |||
| 4228 | ||||
| 4229 | // Add zero/merge token. | |||
| 4230 | const char *ZM = Pred == "z" ? "z" : "m"; | |||
| 4231 | Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext())); | |||
| 4232 | ||||
| 4233 | Lex(); // Eat zero/merge token. | |||
| 4234 | return MatchOperand_Success; | |||
| 4235 | } | |||
| 4236 | ||||
| 4237 | /// parseRegister - Parse a register operand. | |||
| 4238 | bool AArch64AsmParser::parseRegister(OperandVector &Operands) { | |||
| 4239 | // Try for a Neon vector register. | |||
| 4240 | if (!tryParseNeonVectorRegister(Operands)) | |||
| 4241 | return false; | |||
| 4242 | ||||
| 4243 | if (tryParseZTOperand(Operands) == MatchOperand_Success) | |||
| 4244 | return false; | |||
| 4245 | ||||
| 4246 | // Otherwise try for a scalar register. | |||
| 4247 | if (tryParseGPROperand<false>(Operands) == MatchOperand_Success) | |||
| 4248 | return false; | |||
| 4249 | ||||
| 4250 | return true; | |||
| 4251 | } | |||
| 4252 | ||||
| 4253 | bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { | |||
| 4254 | bool HasELFModifier = false; | |||
| 4255 | AArch64MCExpr::VariantKind RefKind; | |||
| 4256 | ||||
| 4257 | if (parseOptionalToken(AsmToken::Colon)) { | |||
| 4258 | HasELFModifier = true; | |||
| 4259 | ||||
| 4260 | if (getTok().isNot(AsmToken::Identifier)) | |||
| 4261 | return TokError("expect relocation specifier in operand after ':'"); | |||
| 4262 | ||||
| 4263 | std::string LowerCase = getTok().getIdentifier().lower(); | |||
| 4264 | RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) | |||
| 4265 | .Case("lo12", AArch64MCExpr::VK_LO12) | |||
| 4266 | .Case("abs_g3", AArch64MCExpr::VK_ABS_G3) | |||
| 4267 | .Case("abs_g2", AArch64MCExpr::VK_ABS_G2) | |||
| 4268 | .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S) | |||
| 4269 | .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC) | |||
| 4270 | .Case("abs_g1", AArch64MCExpr::VK_ABS_G1) | |||
| 4271 | .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S) | |||
| 4272 | .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC) | |||
| 4273 | .Case("abs_g0", AArch64MCExpr::VK_ABS_G0) | |||
| 4274 | .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S) | |||
| 4275 | .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC) | |||
| 4276 | .Case("prel_g3", AArch64MCExpr::VK_PREL_G3) | |||
| 4277 | .Case("prel_g2", AArch64MCExpr::VK_PREL_G2) | |||
| 4278 | .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC) | |||
| 4279 | .Case("prel_g1", AArch64MCExpr::VK_PREL_G1) | |||
| 4280 | .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC) | |||
| 4281 | .Case("prel_g0", AArch64MCExpr::VK_PREL_G0) | |||
| 4282 | .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC) | |||
| 4283 | .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2) | |||
| 4284 | .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1) | |||
| 4285 | .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC) | |||
| 4286 | .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0) | |||
| 4287 | .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC) | |||
| 4288 | .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12) | |||
| 4289 | .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12) | |||
| 4290 | .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC) | |||
| 4291 | .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC) | |||
| 4292 | .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2) | |||
| 4293 | .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1) | |||
| 4294 | .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC) | |||
| 4295 | .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0) | |||
| 4296 | .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC) | |||
| 4297 | .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12) | |||
| 4298 | .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12) | |||
| 4299 | .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC) | |||
| 4300 | .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12) | |||
| 4301 | .Case("got", AArch64MCExpr::VK_GOT_PAGE) | |||
| 4302 | .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15) | |||
| 4303 | .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12) | |||
| 4304 | .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE) | |||
| 4305 | .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC) | |||
| 4306 | .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) | |||
| 4307 | .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) | |||
| 4308 | .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) | |||
| 4309 | .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12) | |||
| 4310 | .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12) | |||
| 4311 | .Default(AArch64MCExpr::VK_INVALID); | |||
| 4312 | ||||
| 4313 | if (RefKind == AArch64MCExpr::VK_INVALID) | |||
| 4314 | return TokError("expect relocation specifier in operand after ':'"); | |||
| 4315 | ||||
| 4316 | Lex(); // Eat identifier | |||
| 4317 | ||||
| 4318 | if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier")) | |||
| 4319 | return true; | |||
| 4320 | } | |||
| 4321 | ||||
| 4322 | if (getParser().parseExpression(ImmVal)) | |||
| 4323 | return true; | |||
| 4324 | ||||
| 4325 | if (HasELFModifier) | |||
| 4326 | ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext()); | |||
| 4327 | ||||
| 4328 | return false; | |||
| 4329 | } | |||
| 4330 | ||||
| 4331 | OperandMatchResultTy | |||
| 4332 | AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) { | |||
| 4333 | if (getTok().isNot(AsmToken::LCurly)) | |||
| 4334 | return MatchOperand_NoMatch; | |||
| 4335 | ||||
| 4336 | auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) { | |||
| 4337 | StringRef Name = getTok().getString(); | |||
| 4338 | size_t DotPosition = Name.find('.'); | |||
| 4339 | if (DotPosition == StringRef::npos) | |||
| 4340 | return MatchOperand_NoMatch; | |||
| 4341 | ||||
| 4342 | unsigned RegNum = matchMatrixTileListRegName(Name); | |||
| 4343 | if (!RegNum) | |||
| 4344 | return MatchOperand_NoMatch; | |||
| 4345 | ||||
| 4346 | StringRef Tail = Name.drop_front(DotPosition); | |||
| 4347 | const std::optional<std::pair<int, int>> &KindRes = | |||
| 4348 | parseVectorKind(Tail, RegKind::Matrix); | |||
| 4349 | if (!KindRes) { | |||
| 4350 | TokError("Expected the register to be followed by element width suffix"); | |||
| 4351 | return MatchOperand_ParseFail; | |||
| 4352 | } | |||
| 4353 | ElementWidth = KindRes->second; | |||
| 4354 | Reg = RegNum; | |||
| 4355 | Lex(); // Eat the register. | |||
| 4356 | return MatchOperand_Success; | |||
| 4357 | }; | |||
| 4358 | ||||
| 4359 | SMLoc S = getLoc(); | |||
| 4360 | auto LCurly = getTok(); | |||
| 4361 | Lex(); // Eat left bracket token. | |||
| 4362 | ||||
| 4363 | // Empty matrix list | |||
| 4364 | if (parseOptionalToken(AsmToken::RCurly)) { | |||
| 4365 | Operands.push_back(AArch64Operand::CreateMatrixTileList( | |||
| 4366 | /*RegMask=*/0, S, getLoc(), getContext())); | |||
| 4367 | return MatchOperand_Success; | |||
| 4368 | } | |||
| 4369 | ||||
| 4370 | // Try parse {za} alias early | |||
| 4371 | if (getTok().getString().equals_insensitive("za")) { | |||
| 4372 | Lex(); // Eat 'za' | |||
| 4373 | ||||
| 4374 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
| 4375 | return MatchOperand_ParseFail; | |||
| 4376 | ||||
| 4377 | Operands.push_back(AArch64Operand::CreateMatrixTileList( | |||
| 4378 | /*RegMask=*/0xFF, S, getLoc(), getContext())); | |||
| 4379 | return MatchOperand_Success; | |||
| 4380 | } | |||
| 4381 | ||||
| 4382 | SMLoc TileLoc = getLoc(); | |||
| 4383 | ||||
| 4384 | unsigned FirstReg, ElementWidth; | |||
| 4385 | auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth); | |||
| 4386 | if (ParseRes != MatchOperand_Success) { | |||
| 4387 | getLexer().UnLex(LCurly); | |||
| 4388 | return ParseRes; | |||
| 4389 | } | |||
| 4390 | ||||
| 4391 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
| 4392 | ||||
| 4393 | unsigned PrevReg = FirstReg; | |||
| 4394 | ||||
| 4395 | SmallSet<unsigned, 8> DRegs; | |||
| 4396 | AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth); | |||
| 4397 | ||||
| 4398 | SmallSet<unsigned, 8> SeenRegs; | |||
| 4399 | SeenRegs.insert(FirstReg); | |||
| 4400 | ||||
| 4401 | while (parseOptionalToken(AsmToken::Comma)) { | |||
| 4402 | TileLoc = getLoc(); | |||
| 4403 | unsigned Reg, NextElementWidth; | |||
| 4404 | ParseRes = ParseMatrixTile(Reg, NextElementWidth); | |||
| 4405 | if (ParseRes != MatchOperand_Success) | |||
| 4406 | return ParseRes; | |||
| 4407 | ||||
| 4408 | // Element size must match on all regs in the list. | |||
| 4409 | if (ElementWidth != NextElementWidth) { | |||
| 4410 | Error(TileLoc, "mismatched register size suffix"); | |||
| 4411 | return MatchOperand_ParseFail; | |||
| 4412 | } | |||
| 4413 | ||||
| 4414 | if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg))) | |||
| 4415 | Warning(TileLoc, "tile list not in ascending order"); | |||
| 4416 | ||||
| 4417 | if (SeenRegs.contains(Reg)) | |||
| 4418 | Warning(TileLoc, "duplicate tile in list"); | |||
| 4419 | else { | |||
| 4420 | SeenRegs.insert(Reg); | |||
| 4421 | AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth); | |||
| 4422 | } | |||
| 4423 | ||||
| 4424 | PrevReg = Reg; | |||
| 4425 | } | |||
| 4426 | ||||
| 4427 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
| 4428 | return MatchOperand_ParseFail; | |||
| 4429 | ||||
| 4430 | unsigned RegMask = 0; | |||
| 4431 | for (auto Reg : DRegs) | |||
| 4432 | RegMask |= 0x1 << (RI->getEncodingValue(Reg) - | |||
| 4433 | RI->getEncodingValue(AArch64::ZAD0)); | |||
| 4434 | Operands.push_back( | |||
| 4435 | AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext())); | |||
| 4436 | ||||
| 4437 | return MatchOperand_Success; | |||
| 4438 | } | |||
| 4439 | ||||
| 4440 | template <RegKind VectorKind> | |||
| 4441 | OperandMatchResultTy | |||
| 4442 | AArch64AsmParser::tryParseVectorList(OperandVector &Operands, | |||
| 4443 | bool ExpectMatch) { | |||
| 4444 | MCAsmParser &Parser = getParser(); | |||
| 4445 | if (!getTok().is(AsmToken::LCurly)) | |||
| 4446 | return MatchOperand_NoMatch; | |||
| 4447 | ||||
| 4448 | // Wrapper around parse function | |||
| 4449 | auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc, | |||
| 4450 | bool NoMatchIsError) { | |||
| 4451 | auto RegTok = getTok(); | |||
| 4452 | auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind); | |||
| 4453 | if (ParseRes == MatchOperand_Success) { | |||
| 4454 | if (parseVectorKind(Kind, VectorKind)) | |||
| 4455 | return ParseRes; | |||
| 4456 | llvm_unreachable("Expected a valid vector kind")::llvm::llvm_unreachable_internal("Expected a valid vector kind" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 4456 ); | |||
| 4457 | } | |||
| 4458 | ||||
| 4459 | if (RegTok.is(AsmToken::Identifier) && ParseRes == MatchOperand_NoMatch && | |||
| 4460 | RegTok.getString().equals_insensitive("zt0")) | |||
| 4461 | return MatchOperand_NoMatch; | |||
| 4462 | ||||
| 4463 | if (RegTok.isNot(AsmToken::Identifier) || | |||
| 4464 | ParseRes == MatchOperand_ParseFail || | |||
| 4465 | (ParseRes == MatchOperand_NoMatch && NoMatchIsError && | |||
| 4466 | !RegTok.getString().startswith_insensitive("za"))) { | |||
| 4467 | Error(Loc, "vector register expected"); | |||
| 4468 | return MatchOperand_ParseFail; | |||
| 4469 | } | |||
| 4470 | ||||
| 4471 | return MatchOperand_NoMatch; | |||
| 4472 | }; | |||
| 4473 | ||||
| 4474 | int NumRegs = getNumRegsForRegKind(VectorKind); | |||
| 4475 | SMLoc S = getLoc(); | |||
| 4476 | auto LCurly = getTok(); | |||
| 4477 | Lex(); // Eat left bracket token. | |||
| 4478 | ||||
| 4479 | StringRef Kind; | |||
| 4480 | MCRegister FirstReg; | |||
| 4481 | auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); | |||
| 4482 | ||||
| 4483 | // Put back the original left bracket if there was no match, so that | |||
| 4484 | // different types of list-operands can be matched (e.g. SVE, Neon). | |||
| 4485 | if (ParseRes == MatchOperand_NoMatch) | |||
| 4486 | Parser.getLexer().UnLex(LCurly); | |||
| 4487 | ||||
| 4488 | if (ParseRes != MatchOperand_Success) | |||
| 4489 | return ParseRes; | |||
| 4490 | ||||
| 4491 | int64_t PrevReg = FirstReg; | |||
| 4492 | unsigned Count = 1; | |||
| 4493 | ||||
| 4494 | int Stride = 1; | |||
| 4495 | if (parseOptionalToken(AsmToken::Minus)) { | |||
| 4496 | SMLoc Loc = getLoc(); | |||
| 4497 | StringRef NextKind; | |||
| 4498 | ||||
| 4499 | MCRegister Reg; | |||
| 4500 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); | |||
| 4501 | if (ParseRes != MatchOperand_Success) | |||
| 4502 | return ParseRes; | |||
| 4503 | ||||
| 4504 | // Any Kind suffices must match on all regs in the list. | |||
| 4505 | if (Kind != NextKind) { | |||
| 4506 | Error(Loc, "mismatched register size suffix"); | |||
| 4507 | return MatchOperand_ParseFail; | |||
| 4508 | } | |||
| 4509 | ||||
| 4510 | unsigned Space = | |||
| 4511 | (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg); | |||
| 4512 | ||||
| 4513 | if (Space == 0 || Space > 3) { | |||
| 4514 | Error(Loc, "invalid number of vectors"); | |||
| 4515 | return MatchOperand_ParseFail; | |||
| 4516 | } | |||
| 4517 | ||||
| 4518 | Count += Space; | |||
| 4519 | } | |||
| 4520 | else { | |||
| 4521 | bool HasCalculatedStride = false; | |||
| 4522 | while (parseOptionalToken(AsmToken::Comma)) { | |||
| 4523 | SMLoc Loc = getLoc(); | |||
| 4524 | StringRef NextKind; | |||
| 4525 | MCRegister Reg; | |||
| 4526 | ParseRes = ParseVector(Reg, NextKind, getLoc(), true); | |||
| 4527 | if (ParseRes != MatchOperand_Success) | |||
| 4528 | return ParseRes; | |||
| 4529 | ||||
| 4530 | // Any Kind suffices must match on all regs in the list. | |||
| 4531 | if (Kind != NextKind) { | |||
| 4532 | Error(Loc, "mismatched register size suffix"); | |||
| 4533 | return MatchOperand_ParseFail; | |||
| 4534 | } | |||
| 4535 | ||||
| 4536 | unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg); | |||
| 4537 | unsigned PrevRegVal = | |||
| 4538 | getContext().getRegisterInfo()->getEncodingValue(PrevReg); | |||
| 4539 | if (!HasCalculatedStride) { | |||
| 4540 | Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal) | |||
| 4541 | : (RegVal + NumRegs - PrevRegVal); | |||
| 4542 | HasCalculatedStride = true; | |||
| 4543 | } | |||
| 4544 | ||||
| 4545 | // Register must be incremental (with a wraparound at last register). | |||
| 4546 | if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) { | |||
| 4547 | Error(Loc, "registers must have the same sequential stride"); | |||
| 4548 | return MatchOperand_ParseFail; | |||
| 4549 | } | |||
| 4550 | ||||
| 4551 | PrevReg = Reg; | |||
| 4552 | ++Count; | |||
| 4553 | } | |||
| 4554 | } | |||
| 4555 | ||||
| 4556 | if (parseToken(AsmToken::RCurly, "'}' expected")) | |||
| 4557 | return MatchOperand_ParseFail; | |||
| 4558 | ||||
| 4559 | if (Count > 4) { | |||
| 4560 | Error(S, "invalid number of vectors"); | |||
| 4561 | return MatchOperand_ParseFail; | |||
| 4562 | } | |||
| 4563 | ||||
| 4564 | unsigned NumElements = 0; | |||
| 4565 | unsigned ElementWidth = 0; | |||
| 4566 | if (!Kind.empty()) { | |||
| 4567 | if (const auto &VK = parseVectorKind(Kind, VectorKind)) | |||
| 4568 | std::tie(NumElements, ElementWidth) = *VK; | |||
| 4569 | } | |||
| 4570 | ||||
| 4571 | Operands.push_back(AArch64Operand::CreateVectorList( | |||
| 4572 | FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S, | |||
| 4573 | getLoc(), getContext())); | |||
| 4574 | ||||
| 4575 | return MatchOperand_Success; | |||
| 4576 | } | |||
| 4577 | ||||
| 4578 | /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. | |||
| 4579 | bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { | |||
| 4580 | auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true); | |||
| 4581 | if (ParseRes != MatchOperand_Success) | |||
| 4582 | return true; | |||
| 4583 | ||||
| 4584 | return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; | |||
| 4585 | } | |||
| 4586 | ||||
| 4587 | OperandMatchResultTy | |||
| 4588 | AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { | |||
| 4589 | SMLoc StartLoc = getLoc(); | |||
| 4590 | ||||
| 4591 | MCRegister RegNum; | |||
| 4592 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); | |||
| 4593 | if (Res != MatchOperand_Success) | |||
| 4594 | return Res; | |||
| 4595 | ||||
| 4596 | if (!parseOptionalToken(AsmToken::Comma)) { | |||
| 4597 | Operands.push_back(AArch64Operand::CreateReg( | |||
| 4598 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
| 4599 | return MatchOperand_Success; | |||
| 4600 | } | |||
| 4601 | ||||
| 4602 | parseOptionalToken(AsmToken::Hash); | |||
| 4603 | ||||
| 4604 | if (getTok().isNot(AsmToken::Integer)) { | |||
| 4605 | Error(getLoc(), "index must be absent or #0"); | |||
| 4606 | return MatchOperand_ParseFail; | |||
| 4607 | } | |||
| 4608 | ||||
| 4609 | const MCExpr *ImmVal; | |||
| 4610 | if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || | |||
| 4611 | cast<MCConstantExpr>(ImmVal)->getValue() != 0) { | |||
| 4612 | Error(getLoc(), "index must be absent or #0"); | |||
| 4613 | return MatchOperand_ParseFail; | |||
| 4614 | } | |||
| 4615 | ||||
| 4616 | Operands.push_back(AArch64Operand::CreateReg( | |||
| 4617 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); | |||
| 4618 | return MatchOperand_Success; | |||
| 4619 | } | |||
| 4620 | ||||
| 4621 | OperandMatchResultTy | |||
| 4622 | AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) { | |||
| 4623 | SMLoc StartLoc = getLoc(); | |||
| 4624 | const AsmToken &Tok = getTok(); | |||
| 4625 | std::string Name = Tok.getString().lower(); | |||
| 4626 | ||||
| 4627 | unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable); | |||
| 4628 | ||||
| 4629 | if (RegNum == 0) | |||
| 4630 | return MatchOperand_NoMatch; | |||
| 4631 | ||||
| 4632 | Operands.push_back(AArch64Operand::CreateReg( | |||
| 4633 | RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext())); | |||
| 4634 | Lex(); // Eat identifier token. | |||
| 4635 | ||||
| 4636 | // Check if register is followed by an index | |||
| 4637 | if (parseOptionalToken(AsmToken::LBrac)) { | |||
| 4638 | const MCExpr *ImmVal; | |||
| 4639 | if (getParser().parseExpression(ImmVal)) | |||
| 4640 | return MatchOperand_NoMatch; | |||
| 4641 | const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 4642 | if (!MCE) { | |||
| 4643 | TokError("immediate value expected for vector index"); | |||
| 4644 | return MatchOperand_ParseFail; | |||
| 4645 | } | |||
| 4646 | if (parseToken(AsmToken::RBrac, "']' expected")) | |||
| 4647 | return MatchOperand_ParseFail; | |||
| 4648 | ||||
| 4649 | Operands.push_back(AArch64Operand::CreateImm( | |||
| 4650 | MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc, | |||
| 4651 | getLoc(), getContext())); | |||
| 4652 | } | |||
| 4653 | ||||
| 4654 | return MatchOperand_Success; | |||
| 4655 | } | |||
| 4656 | ||||
| 4657 | template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> | |||
| 4658 | OperandMatchResultTy | |||
| 4659 | AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { | |||
| 4660 | SMLoc StartLoc = getLoc(); | |||
| 4661 | ||||
| 4662 | MCRegister RegNum; | |||
| 4663 | OperandMatchResultTy Res = tryParseScalarRegister(RegNum); | |||
| 4664 | if (Res != MatchOperand_Success) | |||
| 4665 | return Res; | |||
| 4666 | ||||
| 4667 | // No shift/extend is the default. | |||
| 4668 | if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) { | |||
| 4669 | Operands.push_back(AArch64Operand::CreateReg( | |||
| 4670 | RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); | |||
| 4671 | return MatchOperand_Success; | |||
| 4672 | } | |||
| 4673 | ||||
| 4674 | // Eat the comma | |||
| 4675 | Lex(); | |||
| 4676 | ||||
| 4677 | // Match the shift | |||
| 4678 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; | |||
| 4679 | Res = tryParseOptionalShiftExtend(ExtOpnd); | |||
| 4680 | if (Res != MatchOperand_Success) | |||
| 4681 | return Res; | |||
| 4682 | ||||
| 4683 | auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); | |||
| 4684 | Operands.push_back(AArch64Operand::CreateReg( | |||
| 4685 | RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, | |||
| 4686 | Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), | |||
| 4687 | Ext->hasShiftExtendAmount())); | |||
| 4688 | ||||
| 4689 | return MatchOperand_Success; | |||
| 4690 | } | |||
| 4691 | ||||
| 4692 | bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { | |||
| 4693 | MCAsmParser &Parser = getParser(); | |||
| 4694 | ||||
| 4695 | // Some SVE instructions have a decoration after the immediate, i.e. | |||
| 4696 | // "mul vl". We parse them here and add tokens, which must be present in the | |||
| 4697 | // asm string in the tablegen instruction. | |||
| 4698 | bool NextIsVL = | |||
| 4699 | Parser.getLexer().peekTok().getString().equals_insensitive("vl"); | |||
| 4700 | bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash); | |||
| 4701 | if (!getTok().getString().equals_insensitive("mul") || | |||
| 4702 | !(NextIsVL || NextIsHash)) | |||
| 4703 | return true; | |||
| 4704 | ||||
| 4705 | Operands.push_back( | |||
| 4706 | AArch64Operand::CreateToken("mul", getLoc(), getContext())); | |||
| 4707 | Lex(); // Eat the "mul" | |||
| 4708 | ||||
| 4709 | if (NextIsVL) { | |||
| 4710 | Operands.push_back( | |||
| 4711 | AArch64Operand::CreateToken("vl", getLoc(), getContext())); | |||
| 4712 | Lex(); // Eat the "vl" | |||
| 4713 | return false; | |||
| 4714 | } | |||
| 4715 | ||||
| 4716 | if (NextIsHash) { | |||
| 4717 | Lex(); // Eat the # | |||
| 4718 | SMLoc S = getLoc(); | |||
| 4719 | ||||
| 4720 | // Parse immediate operand. | |||
| 4721 | const MCExpr *ImmVal; | |||
| 4722 | if (!Parser.parseExpression(ImmVal)) | |||
| 4723 | if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) { | |||
| 4724 | Operands.push_back(AArch64Operand::CreateImm( | |||
| 4725 | MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(), | |||
| 4726 | getContext())); | |||
| 4727 | return MatchOperand_Success; | |||
| 4728 | } | |||
| 4729 | } | |||
| 4730 | ||||
| 4731 | return Error(getLoc(), "expected 'vl' or '#<imm>'"); | |||
| 4732 | } | |||
| 4733 | ||||
| 4734 | bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands, | |||
| 4735 | StringRef &VecGroup) { | |||
| 4736 | MCAsmParser &Parser = getParser(); | |||
| 4737 | auto Tok = Parser.getTok(); | |||
| 4738 | if (Tok.isNot(AsmToken::Identifier)) | |||
| 4739 | return true; | |||
| 4740 | ||||
| 4741 | StringRef VG = StringSwitch<StringRef>(Tok.getString().lower()) | |||
| 4742 | .Case("vgx2", "vgx2") | |||
| 4743 | .Case("vgx4", "vgx4") | |||
| 4744 | .Default(""); | |||
| 4745 | ||||
| 4746 | if (VG.empty()) | |||
| 4747 | return true; | |||
| 4748 | ||||
| 4749 | VecGroup = VG; | |||
| 4750 | Parser.Lex(); // Eat vgx[2|4] | |||
| 4751 | return false; | |||
| 4752 | } | |||
| 4753 | ||||
| 4754 | bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) { | |||
| 4755 | auto Tok = getTok(); | |||
| 4756 | if (Tok.isNot(AsmToken::Identifier)) | |||
| 4757 | return true; | |||
| 4758 | ||||
| 4759 | auto Keyword = Tok.getString(); | |||
| 4760 | Keyword = StringSwitch<StringRef>(Keyword.lower()) | |||
| 4761 | .Case("sm", "sm") | |||
| 4762 | .Case("za", "za") | |||
| 4763 | .Default(Keyword); | |||
| 4764 | Operands.push_back( | |||
| 4765 | AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext())); | |||
| 4766 | ||||
| 4767 | Lex(); | |||
| 4768 | return false; | |||
| 4769 | } | |||
| 4770 | ||||
| 4771 | /// parseOperand - Parse a arm instruction operand. For now this parses the | |||
| 4772 | /// operand regardless of the mnemonic. | |||
| 4773 | bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, | |||
| 4774 | bool invertCondCode) { | |||
| 4775 | MCAsmParser &Parser = getParser(); | |||
| 4776 | ||||
| 4777 | OperandMatchResultTy ResTy = | |||
| 4778 | MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true); | |||
| 4779 | ||||
| 4780 | // Check if the current operand has a custom associated parser, if so, try to | |||
| 4781 | // custom parse the operand, or fallback to the general approach. | |||
| 4782 | if (ResTy == MatchOperand_Success) | |||
| 4783 | return false; | |||
| 4784 | // If there wasn't a custom match, try the generic matcher below. Otherwise, | |||
| 4785 | // there was a match, but an error occurred, in which case, just return that | |||
| 4786 | // the operand parsing failed. | |||
| 4787 | if (ResTy == MatchOperand_ParseFail) | |||
| 4788 | return true; | |||
| 4789 | ||||
| 4790 | // Nothing custom, so do general case parsing. | |||
| 4791 | SMLoc S, E; | |||
| 4792 | switch (getLexer().getKind()) { | |||
| 4793 | default: { | |||
| 4794 | SMLoc S = getLoc(); | |||
| 4795 | const MCExpr *Expr; | |||
| 4796 | if (parseSymbolicImmVal(Expr)) | |||
| 4797 | return Error(S, "invalid operand"); | |||
| 4798 | ||||
| 4799 | SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 4800 | Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); | |||
| 4801 | return false; | |||
| 4802 | } | |||
| 4803 | case AsmToken::LBrac: { | |||
| 4804 | Operands.push_back( | |||
| 4805 | AArch64Operand::CreateToken("[", getLoc(), getContext())); | |||
| 4806 | Lex(); // Eat '[' | |||
| 4807 | ||||
| 4808 | // There's no comma after a '[', so we can parse the next operand | |||
| 4809 | // immediately. | |||
| 4810 | return parseOperand(Operands, false, false); | |||
| 4811 | } | |||
| 4812 | case AsmToken::LCurly: { | |||
| 4813 | if (!parseNeonVectorList(Operands)) | |||
| 4814 | return false; | |||
| 4815 | ||||
| 4816 | Operands.push_back( | |||
| 4817 | AArch64Operand::CreateToken("{", getLoc(), getContext())); | |||
| 4818 | Lex(); // Eat '{' | |||
| 4819 | ||||
| 4820 | // There's no comma after a '{', so we can parse the next operand | |||
| 4821 | // immediately. | |||
| 4822 | return parseOperand(Operands, false, false); | |||
| 4823 | } | |||
| 4824 | case AsmToken::Identifier: { | |||
| 4825 | // See if this is a "VG" decoration used by SME instructions. | |||
| 4826 | StringRef VecGroup; | |||
| 4827 | if (!parseOptionalVGOperand(Operands, VecGroup)) { | |||
| 4828 | Operands.push_back( | |||
| 4829 | AArch64Operand::CreateToken(VecGroup, getLoc(), getContext())); | |||
| 4830 | return false; | |||
| 4831 | } | |||
| 4832 | // If we're expecting a Condition Code operand, then just parse that. | |||
| 4833 | if (isCondCode) | |||
| 4834 | return parseCondCode(Operands, invertCondCode); | |||
| 4835 | ||||
| 4836 | // If it's a register name, parse it. | |||
| 4837 | if (!parseRegister(Operands)) | |||
| 4838 | return false; | |||
| 4839 | ||||
| 4840 | // See if this is a "mul vl" decoration or "mul #<int>" operand used | |||
| 4841 | // by SVE instructions. | |||
| 4842 | if (!parseOptionalMulOperand(Operands)) | |||
| 4843 | return false; | |||
| 4844 | ||||
| 4845 | // This could be an optional "shift" or "extend" operand. | |||
| 4846 | OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); | |||
| 4847 | // We can only continue if no tokens were eaten. | |||
| 4848 | if (GotShift != MatchOperand_NoMatch) | |||
| 4849 | return GotShift; | |||
| 4850 | ||||
| 4851 | // If this is a two-word mnemonic, parse its special keyword | |||
| 4852 | // operand as an identifier. | |||
| 4853 | if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" || | |||
| 4854 | Mnemonic == "gcsb") | |||
| 4855 | return parseKeywordOperand(Operands); | |||
| 4856 | ||||
| 4857 | // This was not a register so parse other operands that start with an | |||
| 4858 | // identifier (like labels) as expressions and create them as immediates. | |||
| 4859 | const MCExpr *IdVal; | |||
| 4860 | S = getLoc(); | |||
| 4861 | if (getParser().parseExpression(IdVal)) | |||
| 4862 | return true; | |||
| 4863 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 4864 | Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext())); | |||
| 4865 | return false; | |||
| 4866 | } | |||
| 4867 | case AsmToken::Integer: | |||
| 4868 | case AsmToken::Real: | |||
| 4869 | case AsmToken::Hash: { | |||
| 4870 | // #42 -> immediate. | |||
| 4871 | S = getLoc(); | |||
| 4872 | ||||
| 4873 | parseOptionalToken(AsmToken::Hash); | |||
| 4874 | ||||
| 4875 | // Parse a negative sign | |||
| 4876 | bool isNegative = false; | |||
| 4877 | if (getTok().is(AsmToken::Minus)) { | |||
| 4878 | isNegative = true; | |||
| 4879 | // We need to consume this token only when we have a Real, otherwise | |||
| 4880 | // we let parseSymbolicImmVal take care of it | |||
| 4881 | if (Parser.getLexer().peekTok().is(AsmToken::Real)) | |||
| 4882 | Lex(); | |||
| 4883 | } | |||
| 4884 | ||||
| 4885 | // The only Real that should come through here is a literal #0.0 for | |||
| 4886 | // the fcmp[e] r, #0.0 instructions. They expect raw token operands, | |||
| 4887 | // so convert the value. | |||
| 4888 | const AsmToken &Tok = getTok(); | |||
| 4889 | if (Tok.is(AsmToken::Real)) { | |||
| 4890 | APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); | |||
| 4891 | uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); | |||
| 4892 | if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && | |||
| 4893 | Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && | |||
| 4894 | Mnemonic != "fcmlt" && Mnemonic != "fcmne") | |||
| 4895 | return TokError("unexpected floating point literal"); | |||
| 4896 | else if (IntVal != 0 || isNegative) | |||
| 4897 | return TokError("expected floating-point constant #0.0"); | |||
| 4898 | Lex(); // Eat the token. | |||
| 4899 | ||||
| 4900 | Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext())); | |||
| 4901 | Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext())); | |||
| 4902 | return false; | |||
| 4903 | } | |||
| 4904 | ||||
| 4905 | const MCExpr *ImmVal; | |||
| 4906 | if (parseSymbolicImmVal(ImmVal)) | |||
| 4907 | return true; | |||
| 4908 | ||||
| 4909 | E = SMLoc::getFromPointer(getLoc().getPointer() - 1); | |||
| 4910 | Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext())); | |||
| 4911 | return false; | |||
| 4912 | } | |||
| 4913 | case AsmToken::Equal: { | |||
| 4914 | SMLoc Loc = getLoc(); | |||
| 4915 | if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) | |||
| 4916 | return TokError("unexpected token in operand"); | |||
| 4917 | Lex(); // Eat '=' | |||
| 4918 | const MCExpr *SubExprVal; | |||
| 4919 | if (getParser().parseExpression(SubExprVal)) | |||
| 4920 | return true; | |||
| 4921 | ||||
| 4922 | if (Operands.size() < 2 || | |||
| 4923 | !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) | |||
| 4924 | return Error(Loc, "Only valid when first operand is register"); | |||
| 4925 | ||||
| 4926 | bool IsXReg = | |||
| 4927 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
| 4928 | Operands[1]->getReg()); | |||
| 4929 | ||||
| 4930 | MCContext& Ctx = getContext(); | |||
| 4931 | E = SMLoc::getFromPointer(Loc.getPointer() - 1); | |||
| 4932 | // If the op is an imm and can be fit into a mov, then replace ldr with mov. | |||
| 4933 | if (isa<MCConstantExpr>(SubExprVal)) { | |||
| 4934 | uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue(); | |||
| 4935 | uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; | |||
| 4936 | while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) { | |||
| 4937 | ShiftAmt += 16; | |||
| 4938 | Imm >>= 16; | |||
| 4939 | } | |||
| 4940 | if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { | |||
| 4941 | Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx); | |||
| 4942 | Operands.push_back(AArch64Operand::CreateImm( | |||
| 4943 | MCConstantExpr::create(Imm, Ctx), S, E, Ctx)); | |||
| 4944 | if (ShiftAmt) | |||
| 4945 | Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL, | |||
| 4946 | ShiftAmt, true, S, E, Ctx)); | |||
| 4947 | return false; | |||
| 4948 | } | |||
| 4949 | APInt Simm = APInt(64, Imm << ShiftAmt); | |||
| 4950 | // check if the immediate is an unsigned or signed 32-bit int for W regs | |||
| 4951 | if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32))) | |||
| 4952 | return Error(Loc, "Immediate too large for register"); | |||
| 4953 | } | |||
| 4954 | // If it is a label or an imm that cannot fit in a movz, put it into CP. | |||
| 4955 | const MCExpr *CPLoc = | |||
| 4956 | getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc); | |||
| 4957 | Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx)); | |||
| 4958 | return false; | |||
| 4959 | } | |||
| 4960 | } | |||
| 4961 | } | |||
| 4962 | ||||
| 4963 | bool AArch64AsmParser::parseImmExpr(int64_t &Out) { | |||
| 4964 | const MCExpr *Expr = nullptr; | |||
| 4965 | SMLoc L = getLoc(); | |||
| 4966 | if (check(getParser().parseExpression(Expr), L, "expected expression")) | |||
| 4967 | return true; | |||
| 4968 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); | |||
| 4969 | if (check(!Value, L, "expected constant expression")) | |||
| 4970 | return true; | |||
| 4971 | Out = Value->getValue(); | |||
| ||||
| 4972 | return false; | |||
| 4973 | } | |||
| 4974 | ||||
| 4975 | bool AArch64AsmParser::parseComma() { | |||
| 4976 | if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma")) | |||
| 4977 | return true; | |||
| 4978 | // Eat the comma | |||
| 4979 | Lex(); | |||
| 4980 | return false; | |||
| 4981 | } | |||
| 4982 | ||||
| 4983 | bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base, | |||
| 4984 | unsigned First, unsigned Last) { | |||
| 4985 | MCRegister Reg; | |||
| 4986 | SMLoc Start, End; | |||
| 4987 | if (check(parseRegister(Reg, Start, End), getLoc(), "expected register")) | |||
| 4988 | return true; | |||
| 4989 | ||||
| 4990 | // Special handling for FP and LR; they aren't linearly after x28 in | |||
| 4991 | // the registers enum. | |||
| 4992 | unsigned RangeEnd = Last; | |||
| 4993 | if (Base == AArch64::X0) { | |||
| 4994 | if (Last == AArch64::FP) { | |||
| 4995 | RangeEnd = AArch64::X28; | |||
| 4996 | if (Reg == AArch64::FP) { | |||
| 4997 | Out = 29; | |||
| 4998 | return false; | |||
| 4999 | } | |||
| 5000 | } | |||
| 5001 | if (Last == AArch64::LR) { | |||
| 5002 | RangeEnd = AArch64::X28; | |||
| 5003 | if (Reg == AArch64::FP) { | |||
| 5004 | Out = 29; | |||
| 5005 | return false; | |||
| 5006 | } else if (Reg == AArch64::LR) { | |||
| 5007 | Out = 30; | |||
| 5008 | return false; | |||
| 5009 | } | |||
| 5010 | } | |||
| 5011 | } | |||
| 5012 | ||||
| 5013 | if (check(Reg < First || Reg > RangeEnd, Start, | |||
| 5014 | Twine("expected register in range ") + | |||
| 5015 | AArch64InstPrinter::getRegisterName(First) + " to " + | |||
| 5016 | AArch64InstPrinter::getRegisterName(Last))) | |||
| 5017 | return true; | |||
| 5018 | Out = Reg - Base; | |||
| 5019 | return false; | |||
| 5020 | } | |||
| 5021 | ||||
| 5022 | bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1, | |||
| 5023 | const MCParsedAsmOperand &Op2) const { | |||
| 5024 | auto &AOp1 = static_cast<const AArch64Operand&>(Op1); | |||
| 5025 | auto &AOp2 = static_cast<const AArch64Operand&>(Op2); | |||
| 5026 | ||||
| 5027 | if (AOp1.isVectorList() && AOp2.isVectorList()) | |||
| 5028 | return AOp1.getVectorListCount() == AOp2.getVectorListCount() && | |||
| 5029 | AOp1.getVectorListStart() == AOp2.getVectorListStart() && | |||
| 5030 | AOp1.getVectorListStride() == AOp2.getVectorListStride(); | |||
| 5031 | ||||
| 5032 | if (!AOp1.isReg() || !AOp2.isReg()) | |||
| 5033 | return false; | |||
| 5034 | ||||
| 5035 | if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && | |||
| 5036 | AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) | |||
| 5037 | return MCTargetAsmParser::areEqualRegs(Op1, Op2); | |||
| 5038 | ||||
| 5039 | assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&(static_cast <bool> (AOp1.isScalarReg() && AOp2 .isScalarReg() && "Testing equality of non-scalar registers not supported" ) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5040 , __extension__ __PRETTY_FUNCTION__)) | |||
| 5040 | "Testing equality of non-scalar registers not supported")(static_cast <bool> (AOp1.isScalarReg() && AOp2 .isScalarReg() && "Testing equality of non-scalar registers not supported" ) ? void (0) : __assert_fail ("AOp1.isScalarReg() && AOp2.isScalarReg() && \"Testing equality of non-scalar registers not supported\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5040 , __extension__ __PRETTY_FUNCTION__)); | |||
| 5041 | ||||
| 5042 | // Check if a registers match their sub/super register classes. | |||
| 5043 | if (AOp1.getRegEqualityTy() == EqualsSuperReg) | |||
| 5044 | return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); | |||
| 5045 | if (AOp1.getRegEqualityTy() == EqualsSubReg) | |||
| 5046 | return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); | |||
| 5047 | if (AOp2.getRegEqualityTy() == EqualsSuperReg) | |||
| 5048 | return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); | |||
| 5049 | if (AOp2.getRegEqualityTy() == EqualsSubReg) | |||
| 5050 | return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); | |||
| 5051 | ||||
| 5052 | return false; | |||
| 5053 | } | |||
| 5054 | ||||
| 5055 | /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its | |||
| 5056 | /// operands. | |||
| 5057 | bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, | |||
| 5058 | StringRef Name, SMLoc NameLoc, | |||
| 5059 | OperandVector &Operands) { | |||
| 5060 | Name = StringSwitch<StringRef>(Name.lower()) | |||
| 5061 | .Case("beq", "b.eq") | |||
| 5062 | .Case("bne", "b.ne") | |||
| 5063 | .Case("bhs", "b.hs") | |||
| 5064 | .Case("bcs", "b.cs") | |||
| 5065 | .Case("blo", "b.lo") | |||
| 5066 | .Case("bcc", "b.cc") | |||
| 5067 | .Case("bmi", "b.mi") | |||
| 5068 | .Case("bpl", "b.pl") | |||
| 5069 | .Case("bvs", "b.vs") | |||
| 5070 | .Case("bvc", "b.vc") | |||
| 5071 | .Case("bhi", "b.hi") | |||
| 5072 | .Case("bls", "b.ls") | |||
| 5073 | .Case("bge", "b.ge") | |||
| 5074 | .Case("blt", "b.lt") | |||
| 5075 | .Case("bgt", "b.gt") | |||
| 5076 | .Case("ble", "b.le") | |||
| 5077 | .Case("bal", "b.al") | |||
| 5078 | .Case("bnv", "b.nv") | |||
| 5079 | .Default(Name); | |||
| 5080 | ||||
| 5081 | // First check for the AArch64-specific .req directive. | |||
| 5082 | if (getTok().is(AsmToken::Identifier) && | |||
| 5083 | getTok().getIdentifier().lower() == ".req") { | |||
| 5084 | parseDirectiveReq(Name, NameLoc); | |||
| 5085 | // We always return 'error' for this, as we're done with this | |||
| 5086 | // statement and don't need to match the 'instruction." | |||
| 5087 | return true; | |||
| 5088 | } | |||
| 5089 | ||||
| 5090 | // Create the leading tokens for the mnemonic, split by '.' characters. | |||
| 5091 | size_t Start = 0, Next = Name.find('.'); | |||
| 5092 | StringRef Head = Name.slice(Start, Next); | |||
| 5093 | ||||
| 5094 | // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for | |||
| 5095 | // the SYS instruction. | |||
| 5096 | if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" || | |||
| 5097 | Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp") | |||
| 5098 | return parseSysAlias(Head, NameLoc, Operands); | |||
| 5099 | ||||
| 5100 | // TLBIP instructions are aliases for the SYSP instruction. | |||
| 5101 | if (Head == "tlbip") | |||
| 5102 | return parseSyspAlias(Head, NameLoc, Operands); | |||
| 5103 | ||||
| 5104 | Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext())); | |||
| 5105 | Mnemonic = Head; | |||
| 5106 | ||||
| 5107 | // Handle condition codes for a branch mnemonic | |||
| 5108 | if ((Head == "b" || Head == "bc") && Next != StringRef::npos) { | |||
| 5109 | Start = Next; | |||
| 5110 | Next = Name.find('.', Start + 1); | |||
| 5111 | Head = Name.slice(Start + 1, Next); | |||
| 5112 | ||||
| 5113 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
| 5114 | (Head.data() - Name.data())); | |||
| 5115 | std::string Suggestion; | |||
| 5116 | AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion); | |||
| 5117 | if (CC == AArch64CC::Invalid) { | |||
| 5118 | std::string Msg = "invalid condition code"; | |||
| 5119 | if (!Suggestion.empty()) | |||
| 5120 | Msg += ", did you mean " + Suggestion + "?"; | |||
| 5121 | return Error(SuffixLoc, Msg); | |||
| 5122 | } | |||
| 5123 | Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(), | |||
| 5124 | /*IsSuffix=*/true)); | |||
| 5125 | Operands.push_back( | |||
| 5126 | AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext())); | |||
| 5127 | } | |||
| 5128 | ||||
| 5129 | // Add the remaining tokens in the mnemonic. | |||
| 5130 | while (Next != StringRef::npos) { | |||
| 5131 | Start = Next; | |||
| 5132 | Next = Name.find('.', Start + 1); | |||
| 5133 | Head = Name.slice(Start, Next); | |||
| 5134 | SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + | |||
| 5135 | (Head.data() - Name.data()) + 1); | |||
| 5136 | Operands.push_back(AArch64Operand::CreateToken( | |||
| 5137 | Head, SuffixLoc, getContext(), /*IsSuffix=*/true)); | |||
| 5138 | } | |||
| 5139 | ||||
| 5140 | // Conditional compare instructions have a Condition Code operand, which needs | |||
| 5141 | // to be parsed and an immediate operand created. | |||
| 5142 | bool condCodeFourthOperand = | |||
| 5143 | (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || | |||
| 5144 | Head == "fccmpe" || Head == "fcsel" || Head == "csel" || | |||
| 5145 | Head == "csinc" || Head == "csinv" || Head == "csneg"); | |||
| 5146 | ||||
| 5147 | // These instructions are aliases to some of the conditional select | |||
| 5148 | // instructions. However, the condition code is inverted in the aliased | |||
| 5149 | // instruction. | |||
| 5150 | // | |||
| 5151 | // FIXME: Is this the correct way to handle these? Or should the parser | |||
| 5152 | // generate the aliased instructions directly? | |||
| 5153 | bool condCodeSecondOperand = (Head == "cset" || Head == "csetm"); | |||
| 5154 | bool condCodeThirdOperand = | |||
| 5155 | (Head == "cinc" || Head == "cinv" || Head == "cneg"); | |||
| 5156 | ||||
| 5157 | // Read the remaining operands. | |||
| 5158 | if (getLexer().isNot(AsmToken::EndOfStatement)) { | |||
| 5159 | ||||
| 5160 | unsigned N = 1; | |||
| 5161 | do { | |||
| 5162 | // Parse and remember the operand. | |||
| 5163 | if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) || | |||
| 5164 | (N == 3 && condCodeThirdOperand) || | |||
| 5165 | (N == 2 && condCodeSecondOperand), | |||
| 5166 | condCodeSecondOperand || condCodeThirdOperand)) { | |||
| 5167 | return true; | |||
| 5168 | } | |||
| 5169 | ||||
| 5170 | // After successfully parsing some operands there are three special cases | |||
| 5171 | // to consider (i.e. notional operands not separated by commas). Two are | |||
| 5172 | // due to memory specifiers: | |||
| 5173 | // + An RBrac will end an address for load/store/prefetch | |||
| 5174 | // + An '!' will indicate a pre-indexed operation. | |||
| 5175 | // | |||
| 5176 | // And a further case is '}', which ends a group of tokens specifying the | |||
| 5177 | // SME accumulator array 'ZA' or tile vector, i.e. | |||
| 5178 | // | |||
| 5179 | // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }' | |||
| 5180 | // | |||
| 5181 | // It's someone else's responsibility to make sure these tokens are sane | |||
| 5182 | // in the given context! | |||
| 5183 | ||||
| 5184 | if (parseOptionalToken(AsmToken::RBrac)) | |||
| 5185 | Operands.push_back( | |||
| 5186 | AArch64Operand::CreateToken("]", getLoc(), getContext())); | |||
| 5187 | if (parseOptionalToken(AsmToken::Exclaim)) | |||
| 5188 | Operands.push_back( | |||
| 5189 | AArch64Operand::CreateToken("!", getLoc(), getContext())); | |||
| 5190 | if (parseOptionalToken(AsmToken::RCurly)) | |||
| 5191 | Operands.push_back( | |||
| 5192 | AArch64Operand::CreateToken("}", getLoc(), getContext())); | |||
| 5193 | ||||
| 5194 | ++N; | |||
| 5195 | } while (parseOptionalToken(AsmToken::Comma)); | |||
| 5196 | } | |||
| 5197 | ||||
| 5198 | if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) | |||
| 5199 | return true; | |||
| 5200 | ||||
| 5201 | return false; | |||
| 5202 | } | |||
| 5203 | ||||
| 5204 | static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) { | |||
| 5205 | assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31))(static_cast <bool> ((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)) ? void (0) : __assert_fail ("(ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5205 , __extension__ __PRETTY_FUNCTION__)); | |||
| 5206 | return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) || | |||
| 5207 | (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) || | |||
| 5208 | (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) || | |||
| 5209 | (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) || | |||
| 5210 | (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) || | |||
| 5211 | (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0)); | |||
| 5212 | } | |||
| 5213 | ||||
| 5214 | // FIXME: This entire function is a giant hack to provide us with decent | |||
| 5215 | // operand range validation/diagnostics until TableGen/MC can be extended | |||
| 5216 | // to support autogeneration of this kind of validation. | |||
| 5217 | bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc, | |||
| 5218 | SmallVectorImpl<SMLoc> &Loc) { | |||
| 5219 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
| 5220 | const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); | |||
| 5221 | ||||
| 5222 | // A prefix only applies to the instruction following it. Here we extract | |||
| 5223 | // prefix information for the next instruction before validating the current | |||
| 5224 | // one so that in the case of failure we don't erronously continue using the | |||
| 5225 | // current prefix. | |||
| 5226 | PrefixInfo Prefix = NextPrefix; | |||
| 5227 | NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags); | |||
| 5228 | ||||
| 5229 | // Before validating the instruction in isolation we run through the rules | |||
| 5230 | // applicable when it follows a prefix instruction. | |||
| 5231 | // NOTE: brk & hlt can be prefixed but require no additional validation. | |||
| 5232 | if (Prefix.isActive() && | |||
| 5233 | (Inst.getOpcode() != AArch64::BRK) && | |||
| 5234 | (Inst.getOpcode() != AArch64::HLT)) { | |||
| 5235 | ||||
| 5236 | // Prefixed intructions must have a destructive operand. | |||
| 5237 | if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) == | |||
| 5238 | AArch64::NotDestructive) | |||
| 5239 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
| 5240 | " movprfx, suggest replacing movprfx with mov"); | |||
| 5241 | ||||
| 5242 | // Destination operands must match. | |||
| 5243 | if (Inst.getOperand(0).getReg() != Prefix.getDstReg()) | |||
| 5244 | return Error(Loc[0], "instruction is unpredictable when following a" | |||
| 5245 | " movprfx writing to a different destination"); | |||
| 5246 | ||||
| 5247 | // Destination operand must not be used in any other location. | |||
| 5248 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) { | |||
| 5249 | if (Inst.getOperand(i).isReg() && | |||
| 5250 | (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) && | |||
| 5251 | isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg())) | |||
| 5252 | return Error(Loc[0], "instruction is unpredictable when following a" | |||
| 5253 | " movprfx and destination also used as non-destructive" | |||
| 5254 | " source"); | |||
| 5255 | } | |||
| 5256 | ||||
| 5257 | auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID]; | |||
| 5258 | if (Prefix.isPredicated()) { | |||
| 5259 | int PgIdx = -1; | |||
| 5260 | ||||
| 5261 | // Find the instructions general predicate. | |||
| 5262 | for (unsigned i = 1; i < Inst.getNumOperands(); ++i) | |||
| 5263 | if (Inst.getOperand(i).isReg() && | |||
| 5264 | PPRRegClass.contains(Inst.getOperand(i).getReg())) { | |||
| 5265 | PgIdx = i; | |||
| 5266 | break; | |||
| 5267 | } | |||
| 5268 | ||||
| 5269 | // Instruction must be predicated if the movprfx is predicated. | |||
| 5270 | if (PgIdx == -1 || | |||
| 5271 | (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone) | |||
| 5272 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
| 5273 | " predicated movprfx, suggest using unpredicated movprfx"); | |||
| 5274 | ||||
| 5275 | // Instruction must use same general predicate as the movprfx. | |||
| 5276 | if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg()) | |||
| 5277 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
| 5278 | " predicated movprfx using a different general predicate"); | |||
| 5279 | ||||
| 5280 | // Instruction element type must match the movprfx. | |||
| 5281 | if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize()) | |||
| 5282 | return Error(IDLoc, "instruction is unpredictable when following a" | |||
| 5283 | " predicated movprfx with a different element size"); | |||
| 5284 | } | |||
| 5285 | } | |||
| 5286 | ||||
| 5287 | // Check for indexed addressing modes w/ the base register being the | |||
| 5288 | // same as a destination/source register or pair load where | |||
| 5289 | // the Rt == Rt2. All of those are undefined behaviour. | |||
| 5290 | switch (Inst.getOpcode()) { | |||
| 5291 | case AArch64::LDPSWpre: | |||
| 5292 | case AArch64::LDPWpost: | |||
| 5293 | case AArch64::LDPWpre: | |||
| 5294 | case AArch64::LDPXpost: | |||
| 5295 | case AArch64::LDPXpre: { | |||
| 5296 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
| 5297 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
| 5298 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
| 5299 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
| 5300 | return Error(Loc[0], "unpredictable LDP instruction, writeback base " | |||
| 5301 | "is also a destination"); | |||
| 5302 | if (RI->isSubRegisterEq(Rn, Rt2)) | |||
| 5303 | return Error(Loc[1], "unpredictable LDP instruction, writeback base " | |||
| 5304 | "is also a destination"); | |||
| 5305 | [[fallthrough]]; | |||
| 5306 | } | |||
| 5307 | case AArch64::LDR_ZA: | |||
| 5308 | case AArch64::STR_ZA: { | |||
| 5309 | if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() && | |||
| 5310 | Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm()) | |||
| 5311 | return Error(Loc[1], | |||
| 5312 | "unpredictable instruction, immediate and offset mismatch."); | |||
| 5313 | break; | |||
| 5314 | } | |||
| 5315 | case AArch64::LDPDi: | |||
| 5316 | case AArch64::LDPQi: | |||
| 5317 | case AArch64::LDPSi: | |||
| 5318 | case AArch64::LDPSWi: | |||
| 5319 | case AArch64::LDPWi: | |||
| 5320 | case AArch64::LDPXi: { | |||
| 5321 | unsigned Rt = Inst.getOperand(0).getReg(); | |||
| 5322 | unsigned Rt2 = Inst.getOperand(1).getReg(); | |||
| 5323 | if (Rt == Rt2) | |||
| 5324 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); | |||
| 5325 | break; | |||
| 5326 | } | |||
| 5327 | case AArch64::LDPDpost: | |||
| 5328 | case AArch64::LDPDpre: | |||
| 5329 | case AArch64::LDPQpost: | |||
| 5330 | case AArch64::LDPQpre: | |||
| 5331 | case AArch64::LDPSpost: | |||
| 5332 | case AArch64::LDPSpre: | |||
| 5333 | case AArch64::LDPSWpost: { | |||
| 5334 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
| 5335 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
| 5336 | if (Rt == Rt2) | |||
| 5337 | return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); | |||
| 5338 | break; | |||
| 5339 | } | |||
| 5340 | case AArch64::STPDpost: | |||
| 5341 | case AArch64::STPDpre: | |||
| 5342 | case AArch64::STPQpost: | |||
| 5343 | case AArch64::STPQpre: | |||
| 5344 | case AArch64::STPSpost: | |||
| 5345 | case AArch64::STPSpre: | |||
| 5346 | case AArch64::STPWpost: | |||
| 5347 | case AArch64::STPWpre: | |||
| 5348 | case AArch64::STPXpost: | |||
| 5349 | case AArch64::STPXpre: { | |||
| 5350 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
| 5351 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
| 5352 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
| 5353 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
| 5354 | return Error(Loc[0], "unpredictable STP instruction, writeback base " | |||
| 5355 | "is also a source"); | |||
| 5356 | if (RI->isSubRegisterEq(Rn, Rt2)) | |||
| 5357 | return Error(Loc[1], "unpredictable STP instruction, writeback base " | |||
| 5358 | "is also a source"); | |||
| 5359 | break; | |||
| 5360 | } | |||
| 5361 | case AArch64::LDRBBpre: | |||
| 5362 | case AArch64::LDRBpre: | |||
| 5363 | case AArch64::LDRHHpre: | |||
| 5364 | case AArch64::LDRHpre: | |||
| 5365 | case AArch64::LDRSBWpre: | |||
| 5366 | case AArch64::LDRSBXpre: | |||
| 5367 | case AArch64::LDRSHWpre: | |||
| 5368 | case AArch64::LDRSHXpre: | |||
| 5369 | case AArch64::LDRSWpre: | |||
| 5370 | case AArch64::LDRWpre: | |||
| 5371 | case AArch64::LDRXpre: | |||
| 5372 | case AArch64::LDRBBpost: | |||
| 5373 | case AArch64::LDRBpost: | |||
| 5374 | case AArch64::LDRHHpost: | |||
| 5375 | case AArch64::LDRHpost: | |||
| 5376 | case AArch64::LDRSBWpost: | |||
| 5377 | case AArch64::LDRSBXpost: | |||
| 5378 | case AArch64::LDRSHWpost: | |||
| 5379 | case AArch64::LDRSHXpost: | |||
| 5380 | case AArch64::LDRSWpost: | |||
| 5381 | case AArch64::LDRWpost: | |||
| 5382 | case AArch64::LDRXpost: { | |||
| 5383 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
| 5384 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
| 5385 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
| 5386 | return Error(Loc[0], "unpredictable LDR instruction, writeback base " | |||
| 5387 | "is also a source"); | |||
| 5388 | break; | |||
| 5389 | } | |||
| 5390 | case AArch64::STRBBpost: | |||
| 5391 | case AArch64::STRBpost: | |||
| 5392 | case AArch64::STRHHpost: | |||
| 5393 | case AArch64::STRHpost: | |||
| 5394 | case AArch64::STRWpost: | |||
| 5395 | case AArch64::STRXpost: | |||
| 5396 | case AArch64::STRBBpre: | |||
| 5397 | case AArch64::STRBpre: | |||
| 5398 | case AArch64::STRHHpre: | |||
| 5399 | case AArch64::STRHpre: | |||
| 5400 | case AArch64::STRWpre: | |||
| 5401 | case AArch64::STRXpre: { | |||
| 5402 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
| 5403 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
| 5404 | if (RI->isSubRegisterEq(Rn, Rt)) | |||
| 5405 | return Error(Loc[0], "unpredictable STR instruction, writeback base " | |||
| 5406 | "is also a source"); | |||
| 5407 | break; | |||
| 5408 | } | |||
| 5409 | case AArch64::STXRB: | |||
| 5410 | case AArch64::STXRH: | |||
| 5411 | case AArch64::STXRW: | |||
| 5412 | case AArch64::STXRX: | |||
| 5413 | case AArch64::STLXRB: | |||
| 5414 | case AArch64::STLXRH: | |||
| 5415 | case AArch64::STLXRW: | |||
| 5416 | case AArch64::STLXRX: { | |||
| 5417 | unsigned Rs = Inst.getOperand(0).getReg(); | |||
| 5418 | unsigned Rt = Inst.getOperand(1).getReg(); | |||
| 5419 | unsigned Rn = Inst.getOperand(2).getReg(); | |||
| 5420 | if (RI->isSubRegisterEq(Rt, Rs) || | |||
| 5421 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) | |||
| 5422 | return Error(Loc[0], | |||
| 5423 | "unpredictable STXR instruction, status is also a source"); | |||
| 5424 | break; | |||
| 5425 | } | |||
| 5426 | case AArch64::STXPW: | |||
| 5427 | case AArch64::STXPX: | |||
| 5428 | case AArch64::STLXPW: | |||
| 5429 | case AArch64::STLXPX: { | |||
| 5430 | unsigned Rs = Inst.getOperand(0).getReg(); | |||
| 5431 | unsigned Rt1 = Inst.getOperand(1).getReg(); | |||
| 5432 | unsigned Rt2 = Inst.getOperand(2).getReg(); | |||
| 5433 | unsigned Rn = Inst.getOperand(3).getReg(); | |||
| 5434 | if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) || | |||
| 5435 | (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) | |||
| 5436 | return Error(Loc[0], | |||
| 5437 | "unpredictable STXP instruction, status is also a source"); | |||
| 5438 | break; | |||
| 5439 | } | |||
| 5440 | case AArch64::LDRABwriteback: | |||
| 5441 | case AArch64::LDRAAwriteback: { | |||
| 5442 | unsigned Xt = Inst.getOperand(0).getReg(); | |||
| 5443 | unsigned Xn = Inst.getOperand(1).getReg(); | |||
| 5444 | if (Xt == Xn) | |||
| 5445 | return Error(Loc[0], | |||
| 5446 | "unpredictable LDRA instruction, writeback base" | |||
| 5447 | " is also a destination"); | |||
| 5448 | break; | |||
| 5449 | } | |||
| 5450 | } | |||
| 5451 | ||||
| 5452 | // Check v8.8-A memops instructions. | |||
| 5453 | switch (Inst.getOpcode()) { | |||
| 5454 | case AArch64::CPYFP: | |||
| 5455 | case AArch64::CPYFPWN: | |||
| 5456 | case AArch64::CPYFPRN: | |||
| 5457 | case AArch64::CPYFPN: | |||
| 5458 | case AArch64::CPYFPWT: | |||
| 5459 | case AArch64::CPYFPWTWN: | |||
| 5460 | case AArch64::CPYFPWTRN: | |||
| 5461 | case AArch64::CPYFPWTN: | |||
| 5462 | case AArch64::CPYFPRT: | |||
| 5463 | case AArch64::CPYFPRTWN: | |||
| 5464 | case AArch64::CPYFPRTRN: | |||
| 5465 | case AArch64::CPYFPRTN: | |||
| 5466 | case AArch64::CPYFPT: | |||
| 5467 | case AArch64::CPYFPTWN: | |||
| 5468 | case AArch64::CPYFPTRN: | |||
| 5469 | case AArch64::CPYFPTN: | |||
| 5470 | case AArch64::CPYFM: | |||
| 5471 | case AArch64::CPYFMWN: | |||
| 5472 | case AArch64::CPYFMRN: | |||
| 5473 | case AArch64::CPYFMN: | |||
| 5474 | case AArch64::CPYFMWT: | |||
| 5475 | case AArch64::CPYFMWTWN: | |||
| 5476 | case AArch64::CPYFMWTRN: | |||
| 5477 | case AArch64::CPYFMWTN: | |||
| 5478 | case AArch64::CPYFMRT: | |||
| 5479 | case AArch64::CPYFMRTWN: | |||
| 5480 | case AArch64::CPYFMRTRN: | |||
| 5481 | case AArch64::CPYFMRTN: | |||
| 5482 | case AArch64::CPYFMT: | |||
| 5483 | case AArch64::CPYFMTWN: | |||
| 5484 | case AArch64::CPYFMTRN: | |||
| 5485 | case AArch64::CPYFMTN: | |||
| 5486 | case AArch64::CPYFE: | |||
| 5487 | case AArch64::CPYFEWN: | |||
| 5488 | case AArch64::CPYFERN: | |||
| 5489 | case AArch64::CPYFEN: | |||
| 5490 | case AArch64::CPYFEWT: | |||
| 5491 | case AArch64::CPYFEWTWN: | |||
| 5492 | case AArch64::CPYFEWTRN: | |||
| 5493 | case AArch64::CPYFEWTN: | |||
| 5494 | case AArch64::CPYFERT: | |||
| 5495 | case AArch64::CPYFERTWN: | |||
| 5496 | case AArch64::CPYFERTRN: | |||
| 5497 | case AArch64::CPYFERTN: | |||
| 5498 | case AArch64::CPYFET: | |||
| 5499 | case AArch64::CPYFETWN: | |||
| 5500 | case AArch64::CPYFETRN: | |||
| 5501 | case AArch64::CPYFETN: | |||
| 5502 | case AArch64::CPYP: | |||
| 5503 | case AArch64::CPYPWN: | |||
| 5504 | case AArch64::CPYPRN: | |||
| 5505 | case AArch64::CPYPN: | |||
| 5506 | case AArch64::CPYPWT: | |||
| 5507 | case AArch64::CPYPWTWN: | |||
| 5508 | case AArch64::CPYPWTRN: | |||
| 5509 | case AArch64::CPYPWTN: | |||
| 5510 | case AArch64::CPYPRT: | |||
| 5511 | case AArch64::CPYPRTWN: | |||
| 5512 | case AArch64::CPYPRTRN: | |||
| 5513 | case AArch64::CPYPRTN: | |||
| 5514 | case AArch64::CPYPT: | |||
| 5515 | case AArch64::CPYPTWN: | |||
| 5516 | case AArch64::CPYPTRN: | |||
| 5517 | case AArch64::CPYPTN: | |||
| 5518 | case AArch64::CPYM: | |||
| 5519 | case AArch64::CPYMWN: | |||
| 5520 | case AArch64::CPYMRN: | |||
| 5521 | case AArch64::CPYMN: | |||
| 5522 | case AArch64::CPYMWT: | |||
| 5523 | case AArch64::CPYMWTWN: | |||
| 5524 | case AArch64::CPYMWTRN: | |||
| 5525 | case AArch64::CPYMWTN: | |||
| 5526 | case AArch64::CPYMRT: | |||
| 5527 | case AArch64::CPYMRTWN: | |||
| 5528 | case AArch64::CPYMRTRN: | |||
| 5529 | case AArch64::CPYMRTN: | |||
| 5530 | case AArch64::CPYMT: | |||
| 5531 | case AArch64::CPYMTWN: | |||
| 5532 | case AArch64::CPYMTRN: | |||
| 5533 | case AArch64::CPYMTN: | |||
| 5534 | case AArch64::CPYE: | |||
| 5535 | case AArch64::CPYEWN: | |||
| 5536 | case AArch64::CPYERN: | |||
| 5537 | case AArch64::CPYEN: | |||
| 5538 | case AArch64::CPYEWT: | |||
| 5539 | case AArch64::CPYEWTWN: | |||
| 5540 | case AArch64::CPYEWTRN: | |||
| 5541 | case AArch64::CPYEWTN: | |||
| 5542 | case AArch64::CPYERT: | |||
| 5543 | case AArch64::CPYERTWN: | |||
| 5544 | case AArch64::CPYERTRN: | |||
| 5545 | case AArch64::CPYERTN: | |||
| 5546 | case AArch64::CPYET: | |||
| 5547 | case AArch64::CPYETWN: | |||
| 5548 | case AArch64::CPYETRN: | |||
| 5549 | case AArch64::CPYETN: { | |||
| 5550 | unsigned Xd_wb = Inst.getOperand(0).getReg(); | |||
| 5551 | unsigned Xs_wb = Inst.getOperand(1).getReg(); | |||
| 5552 | unsigned Xn_wb = Inst.getOperand(2).getReg(); | |||
| 5553 | unsigned Xd = Inst.getOperand(3).getReg(); | |||
| 5554 | unsigned Xs = Inst.getOperand(4).getReg(); | |||
| 5555 | unsigned Xn = Inst.getOperand(5).getReg(); | |||
| 5556 | if (Xd_wb != Xd) | |||
| 5557 | return Error(Loc[0], | |||
| 5558 | "invalid CPY instruction, Xd_wb and Xd do not match"); | |||
| 5559 | if (Xs_wb != Xs) | |||
| 5560 | return Error(Loc[0], | |||
| 5561 | "invalid CPY instruction, Xs_wb and Xs do not match"); | |||
| 5562 | if (Xn_wb != Xn) | |||
| 5563 | return Error(Loc[0], | |||
| 5564 | "invalid CPY instruction, Xn_wb and Xn do not match"); | |||
| 5565 | if (Xd == Xs) | |||
| 5566 | return Error(Loc[0], "invalid CPY instruction, destination and source" | |||
| 5567 | " registers are the same"); | |||
| 5568 | if (Xd == Xn) | |||
| 5569 | return Error(Loc[0], "invalid CPY instruction, destination and size" | |||
| 5570 | " registers are the same"); | |||
| 5571 | if (Xs == Xn) | |||
| 5572 | return Error(Loc[0], "invalid CPY instruction, source and size" | |||
| 5573 | " registers are the same"); | |||
| 5574 | break; | |||
| 5575 | } | |||
| 5576 | case AArch64::SETP: | |||
| 5577 | case AArch64::SETPT: | |||
| 5578 | case AArch64::SETPN: | |||
| 5579 | case AArch64::SETPTN: | |||
| 5580 | case AArch64::SETM: | |||
| 5581 | case AArch64::SETMT: | |||
| 5582 | case AArch64::SETMN: | |||
| 5583 | case AArch64::SETMTN: | |||
| 5584 | case AArch64::SETE: | |||
| 5585 | case AArch64::SETET: | |||
| 5586 | case AArch64::SETEN: | |||
| 5587 | case AArch64::SETETN: | |||
| 5588 | case AArch64::SETGP: | |||
| 5589 | case AArch64::SETGPT: | |||
| 5590 | case AArch64::SETGPN: | |||
| 5591 | case AArch64::SETGPTN: | |||
| 5592 | case AArch64::SETGM: | |||
| 5593 | case AArch64::SETGMT: | |||
| 5594 | case AArch64::SETGMN: | |||
| 5595 | case AArch64::SETGMTN: | |||
| 5596 | case AArch64::MOPSSETGE: | |||
| 5597 | case AArch64::MOPSSETGET: | |||
| 5598 | case AArch64::MOPSSETGEN: | |||
| 5599 | case AArch64::MOPSSETGETN: { | |||
| 5600 | unsigned Xd_wb = Inst.getOperand(0).getReg(); | |||
| 5601 | unsigned Xn_wb = Inst.getOperand(1).getReg(); | |||
| 5602 | unsigned Xd = Inst.getOperand(2).getReg(); | |||
| 5603 | unsigned Xn = Inst.getOperand(3).getReg(); | |||
| 5604 | unsigned Xm = Inst.getOperand(4).getReg(); | |||
| 5605 | if (Xd_wb != Xd) | |||
| 5606 | return Error(Loc[0], | |||
| 5607 | "invalid SET instruction, Xd_wb and Xd do not match"); | |||
| 5608 | if (Xn_wb != Xn) | |||
| 5609 | return Error(Loc[0], | |||
| 5610 | "invalid SET instruction, Xn_wb and Xn do not match"); | |||
| 5611 | if (Xd == Xn) | |||
| 5612 | return Error(Loc[0], "invalid SET instruction, destination and size" | |||
| 5613 | " registers are the same"); | |||
| 5614 | if (Xd == Xm) | |||
| 5615 | return Error(Loc[0], "invalid SET instruction, destination and source" | |||
| 5616 | " registers are the same"); | |||
| 5617 | if (Xn == Xm) | |||
| 5618 | return Error(Loc[0], "invalid SET instruction, source and size" | |||
| 5619 | " registers are the same"); | |||
| 5620 | break; | |||
| 5621 | } | |||
| 5622 | } | |||
| 5623 | ||||
| 5624 | // Now check immediate ranges. Separate from the above as there is overlap | |||
| 5625 | // in the instructions being checked and this keeps the nested conditionals | |||
| 5626 | // to a minimum. | |||
| 5627 | switch (Inst.getOpcode()) { | |||
| 5628 | case AArch64::ADDSWri: | |||
| 5629 | case AArch64::ADDSXri: | |||
| 5630 | case AArch64::ADDWri: | |||
| 5631 | case AArch64::ADDXri: | |||
| 5632 | case AArch64::SUBSWri: | |||
| 5633 | case AArch64::SUBSXri: | |||
| 5634 | case AArch64::SUBWri: | |||
| 5635 | case AArch64::SUBXri: { | |||
| 5636 | // Annoyingly we can't do this in the isAddSubImm predicate, so there is | |||
| 5637 | // some slight duplication here. | |||
| 5638 | if (Inst.getOperand(2).isExpr()) { | |||
| 5639 | const MCExpr *Expr = Inst.getOperand(2).getExpr(); | |||
| 5640 | AArch64MCExpr::VariantKind ELFRefKind; | |||
| 5641 | MCSymbolRefExpr::VariantKind DarwinRefKind; | |||
| 5642 | int64_t Addend; | |||
| 5643 | if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { | |||
| 5644 | ||||
| 5645 | // Only allow these with ADDXri. | |||
| 5646 | if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || | |||
| 5647 | DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && | |||
| 5648 | Inst.getOpcode() == AArch64::ADDXri) | |||
| 5649 | return false; | |||
| 5650 | ||||
| 5651 | // Only allow these with ADDXri/ADDWri | |||
| 5652 | if ((ELFRefKind == AArch64MCExpr::VK_LO12 || | |||
| 5653 | ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || | |||
| 5654 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || | |||
| 5655 | ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || | |||
| 5656 | ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || | |||
| 5657 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || | |||
| 5658 | ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || | |||
| 5659 | ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || | |||
| 5660 | ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || | |||
| 5661 | ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && | |||
| 5662 | (Inst.getOpcode() == AArch64::ADDXri || | |||
| 5663 | Inst.getOpcode() == AArch64::ADDWri)) | |||
| 5664 | return false; | |||
| 5665 | ||||
| 5666 | // Don't allow symbol refs in the immediate field otherwise | |||
| 5667 | // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of | |||
| 5668 | // operands of the original instruction (i.e. 'add w0, w1, borked' vs | |||
| 5669 | // 'cmp w0, 'borked') | |||
| 5670 | return Error(Loc.back(), "invalid immediate expression"); | |||
| 5671 | } | |||
| 5672 | // We don't validate more complex expressions here | |||
| 5673 | } | |||
| 5674 | return false; | |||
| 5675 | } | |||
| 5676 | default: | |||
| 5677 | return false; | |||
| 5678 | } | |||
| 5679 | } | |||
| 5680 | ||||
| 5681 | static std::string AArch64MnemonicSpellCheck(StringRef S, | |||
| 5682 | const FeatureBitset &FBS, | |||
| 5683 | unsigned VariantID = 0); | |||
| 5684 | ||||
| 5685 | bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, | |||
| 5686 | uint64_t ErrorInfo, | |||
| 5687 | OperandVector &Operands) { | |||
| 5688 | switch (ErrCode) { | |||
| 5689 | case Match_InvalidTiedOperand: { | |||
| 5690 | auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]); | |||
| 5691 | if (Op.isVectorList()) | |||
| 5692 | return Error(Loc, "operand must match destination register list"); | |||
| 5693 | ||||
| 5694 | assert(Op.isReg() && "Unexpected operand type")(static_cast <bool> (Op.isReg() && "Unexpected operand type" ) ? void (0) : __assert_fail ("Op.isReg() && \"Unexpected operand type\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5694 , __extension__ __PRETTY_FUNCTION__)); | |||
| 5695 | switch (Op.getRegEqualityTy()) { | |||
| 5696 | case RegConstraintEqualityTy::EqualsSubReg: | |||
| 5697 | return Error(Loc, "operand must be 64-bit form of destination register"); | |||
| 5698 | case RegConstraintEqualityTy::EqualsSuperReg: | |||
| 5699 | return Error(Loc, "operand must be 32-bit form of destination register"); | |||
| 5700 | case RegConstraintEqualityTy::EqualsReg: | |||
| 5701 | return Error(Loc, "operand must match destination register"); | |||
| 5702 | } | |||
| 5703 | llvm_unreachable("Unknown RegConstraintEqualityTy")::llvm::llvm_unreachable_internal("Unknown RegConstraintEqualityTy" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 5703 ); | |||
| 5704 | } | |||
| 5705 | case Match_MissingFeature: | |||
| 5706 | return Error(Loc, | |||
| 5707 | "instruction requires a CPU feature not currently enabled"); | |||
| 5708 | case Match_InvalidOperand: | |||
| 5709 | return Error(Loc, "invalid operand for instruction"); | |||
| 5710 | case Match_InvalidSuffix: | |||
| 5711 | return Error(Loc, "invalid type suffix for instruction"); | |||
| 5712 | case Match_InvalidCondCode: | |||
| 5713 | return Error(Loc, "expected AArch64 condition code"); | |||
| 5714 | case Match_AddSubRegExtendSmall: | |||
| 5715 | return Error(Loc, | |||
| 5716 | "expected '[su]xt[bhw]' with optional integer in range [0, 4]"); | |||
| 5717 | case Match_AddSubRegExtendLarge: | |||
| 5718 | return Error(Loc, | |||
| 5719 | "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]"); | |||
| 5720 | case Match_AddSubSecondSource: | |||
| 5721 | return Error(Loc, | |||
| 5722 | "expected compatible register, symbol or integer in range [0, 4095]"); | |||
| 5723 | case Match_LogicalSecondSource: | |||
| 5724 | return Error(Loc, "expected compatible register or logical immediate"); | |||
| 5725 | case Match_InvalidMovImm32Shift: | |||
| 5726 | return Error(Loc, "expected 'lsl' with optional integer 0 or 16"); | |||
| 5727 | case Match_InvalidMovImm64Shift: | |||
| 5728 | return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48"); | |||
| 5729 | case Match_AddSubRegShift32: | |||
| 5730 | return Error(Loc, | |||
| 5731 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]"); | |||
| 5732 | case Match_AddSubRegShift64: | |||
| 5733 | return Error(Loc, | |||
| 5734 | "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]"); | |||
| 5735 | case Match_InvalidFPImm: | |||
| 5736 | return Error(Loc, | |||
| 5737 | "expected compatible register or floating-point constant"); | |||
| 5738 | case Match_InvalidMemoryIndexedSImm6: | |||
| 5739 | return Error(Loc, "index must be an integer in range [-32, 31]."); | |||
| 5740 | case Match_InvalidMemoryIndexedSImm5: | |||
| 5741 | return Error(Loc, "index must be an integer in range [-16, 15]."); | |||
| 5742 | case Match_InvalidMemoryIndexed1SImm4: | |||
| 5743 | return Error(Loc, "index must be an integer in range [-8, 7]."); | |||
| 5744 | case Match_InvalidMemoryIndexed2SImm4: | |||
| 5745 | return Error(Loc, "index must be a multiple of 2 in range [-16, 14]."); | |||
| 5746 | case Match_InvalidMemoryIndexed3SImm4: | |||
| 5747 | return Error(Loc, "index must be a multiple of 3 in range [-24, 21]."); | |||
| 5748 | case Match_InvalidMemoryIndexed4SImm4: | |||
| 5749 | return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); | |||
| 5750 | case Match_InvalidMemoryIndexed16SImm4: | |||
| 5751 | return Error(Loc, "index must be a multiple of 16 in range [-128, 112]."); | |||
| 5752 | case Match_InvalidMemoryIndexed32SImm4: | |||
| 5753 | return Error(Loc, "index must be a multiple of 32 in range [-256, 224]."); | |||
| 5754 | case Match_InvalidMemoryIndexed1SImm6: | |||
| 5755 | return Error(Loc, "index must be an integer in range [-32, 31]."); | |||
| 5756 | case Match_InvalidMemoryIndexedSImm8: | |||
| 5757 | return Error(Loc, "index must be an integer in range [-128, 127]."); | |||
| 5758 | case Match_InvalidMemoryIndexedSImm9: | |||
| 5759 | return Error(Loc, "index must be an integer in range [-256, 255]."); | |||
| 5760 | case Match_InvalidMemoryIndexed16SImm9: | |||
| 5761 | return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080]."); | |||
| 5762 | case Match_InvalidMemoryIndexed8SImm10: | |||
| 5763 | return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); | |||
| 5764 | case Match_InvalidMemoryIndexed4SImm7: | |||
| 5765 | return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); | |||
| 5766 | case Match_InvalidMemoryIndexed8SImm7: | |||
| 5767 | return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); | |||
| 5768 | case Match_InvalidMemoryIndexed16SImm7: | |||
| 5769 | return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); | |||
| 5770 | case Match_InvalidMemoryIndexed8UImm5: | |||
| 5771 | return Error(Loc, "index must be a multiple of 8 in range [0, 248]."); | |||
| 5772 | case Match_InvalidMemoryIndexed8UImm3: | |||
| 5773 | return Error(Loc, "index must be a multiple of 8 in range [0, 56]."); | |||
| 5774 | case Match_InvalidMemoryIndexed4UImm5: | |||
| 5775 | return Error(Loc, "index must be a multiple of 4 in range [0, 124]."); | |||
| 5776 | case Match_InvalidMemoryIndexed2UImm5: | |||
| 5777 | return Error(Loc, "index must be a multiple of 2 in range [0, 62]."); | |||
| 5778 | case Match_InvalidMemoryIndexed8UImm6: | |||
| 5779 | return Error(Loc, "index must be a multiple of 8 in range [0, 504]."); | |||
| 5780 | case Match_InvalidMemoryIndexed16UImm6: | |||
| 5781 | return Error(Loc, "index must be a multiple of 16 in range [0, 1008]."); | |||
| 5782 | case Match_InvalidMemoryIndexed4UImm6: | |||
| 5783 | return Error(Loc, "index must be a multiple of 4 in range [0, 252]."); | |||
| 5784 | case Match_InvalidMemoryIndexed2UImm6: | |||
| 5785 | return Error(Loc, "index must be a multiple of 2 in range [0, 126]."); | |||
| 5786 | case Match_InvalidMemoryIndexed1UImm6: | |||
| 5787 | return Error(Loc, "index must be in range [0, 63]."); | |||
| 5788 | case Match_InvalidMemoryWExtend8: | |||
| 5789 | return Error(Loc, | |||
| 5790 | "expected 'uxtw' or 'sxtw' with optional shift of #0"); | |||
| 5791 | case Match_InvalidMemoryWExtend16: | |||
| 5792 | return Error(Loc, | |||
| 5793 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1"); | |||
| 5794 | case Match_InvalidMemoryWExtend32: | |||
| 5795 | return Error(Loc, | |||
| 5796 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2"); | |||
| 5797 | case Match_InvalidMemoryWExtend64: | |||
| 5798 | return Error(Loc, | |||
| 5799 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3"); | |||
| 5800 | case Match_InvalidMemoryWExtend128: | |||
| 5801 | return Error(Loc, | |||
| 5802 | "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4"); | |||
| 5803 | case Match_InvalidMemoryXExtend8: | |||
| 5804 | return Error(Loc, | |||
| 5805 | "expected 'lsl' or 'sxtx' with optional shift of #0"); | |||
| 5806 | case Match_InvalidMemoryXExtend16: | |||
| 5807 | return Error(Loc, | |||
| 5808 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #1"); | |||
| 5809 | case Match_InvalidMemoryXExtend32: | |||
| 5810 | return Error(Loc, | |||
| 5811 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #2"); | |||
| 5812 | case Match_InvalidMemoryXExtend64: | |||
| 5813 | return Error(Loc, | |||
| 5814 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #3"); | |||
| 5815 | case Match_InvalidMemoryXExtend128: | |||
| 5816 | return Error(Loc, | |||
| 5817 | "expected 'lsl' or 'sxtx' with optional shift of #0 or #4"); | |||
| 5818 | case Match_InvalidMemoryIndexed1: | |||
| 5819 | return Error(Loc, "index must be an integer in range [0, 4095]."); | |||
| 5820 | case Match_InvalidMemoryIndexed2: | |||
| 5821 | return Error(Loc, "index must be a multiple of 2 in range [0, 8190]."); | |||
| 5822 | case Match_InvalidMemoryIndexed4: | |||
| 5823 | return Error(Loc, "index must be a multiple of 4 in range [0, 16380]."); | |||
| 5824 | case Match_InvalidMemoryIndexed8: | |||
| 5825 | return Error(Loc, "index must be a multiple of 8 in range [0, 32760]."); | |||
| 5826 | case Match_InvalidMemoryIndexed16: | |||
| 5827 | return Error(Loc, "index must be a multiple of 16 in range [0, 65520]."); | |||
| 5828 | case Match_InvalidImm0_0: | |||
| 5829 | return Error(Loc, "immediate must be 0."); | |||
| 5830 | case Match_InvalidImm0_1: | |||
| 5831 | return Error(Loc, "immediate must be an integer in range [0, 1]."); | |||
| 5832 | case Match_InvalidImm0_3: | |||
| 5833 | return Error(Loc, "immediate must be an integer in range [0, 3]."); | |||
| 5834 | case Match_InvalidImm0_7: | |||
| 5835 | return Error(Loc, "immediate must be an integer in range [0, 7]."); | |||
| 5836 | case Match_InvalidImm0_15: | |||
| 5837 | return Error(Loc, "immediate must be an integer in range [0, 15]."); | |||
| 5838 | case Match_InvalidImm0_31: | |||
| 5839 | return Error(Loc, "immediate must be an integer in range [0, 31]."); | |||
| 5840 | case Match_InvalidImm0_63: | |||
| 5841 | return Error(Loc, "immediate must be an integer in range [0, 63]."); | |||
| 5842 | case Match_InvalidImm0_127: | |||
| 5843 | return Error(Loc, "immediate must be an integer in range [0, 127]."); | |||
| 5844 | case Match_InvalidImm0_255: | |||
| 5845 | return Error(Loc, "immediate must be an integer in range [0, 255]."); | |||
| 5846 | case Match_InvalidImm0_65535: | |||
| 5847 | return Error(Loc, "immediate must be an integer in range [0, 65535]."); | |||
| 5848 | case Match_InvalidImm1_8: | |||
| 5849 | return Error(Loc, "immediate must be an integer in range [1, 8]."); | |||
| 5850 | case Match_InvalidImm1_16: | |||
| 5851 | return Error(Loc, "immediate must be an integer in range [1, 16]."); | |||
| 5852 | case Match_InvalidImm1_32: | |||
| 5853 | return Error(Loc, "immediate must be an integer in range [1, 32]."); | |||
| 5854 | case Match_InvalidImm1_64: | |||
| 5855 | return Error(Loc, "immediate must be an integer in range [1, 64]."); | |||
| 5856 | case Match_InvalidMemoryIndexedRange2UImm0: | |||
| 5857 | return Error(Loc, "vector select offset must be the immediate range 0:1."); | |||
| 5858 | case Match_InvalidMemoryIndexedRange2UImm1: | |||
| 5859 | return Error(Loc, "vector select offset must be an immediate range of the " | |||
| 5860 | "form <immf>:<imml>, where the first " | |||
| 5861 | "immediate is a multiple of 2 in the range [0, 2], and " | |||
| 5862 | "the second immediate is immf + 1."); | |||
| 5863 | case Match_InvalidMemoryIndexedRange2UImm2: | |||
| 5864 | case Match_InvalidMemoryIndexedRange2UImm3: | |||
| 5865 | return Error( | |||
| 5866 | Loc, | |||
| 5867 | "vector select offset must be an immediate range of the form " | |||
| 5868 | "<immf>:<imml>, " | |||
| 5869 | "where the first immediate is a multiple of 2 in the range [0, 6] or " | |||
| 5870 | "[0, 14] " | |||
| 5871 | "depending on the instruction, and the second immediate is immf + 1."); | |||
| 5872 | case Match_InvalidMemoryIndexedRange4UImm0: | |||
| 5873 | return Error(Loc, "vector select offset must be the immediate range 0:3."); | |||
| 5874 | case Match_InvalidMemoryIndexedRange4UImm1: | |||
| 5875 | case Match_InvalidMemoryIndexedRange4UImm2: | |||
| 5876 | return Error( | |||
| 5877 | Loc, | |||
| 5878 | "vector select offset must be an immediate range of the form " | |||
| 5879 | "<immf>:<imml>, " | |||
| 5880 | "where the first immediate is a multiple of 4 in the range [0, 4] or " | |||
| 5881 | "[0, 12] " | |||
| 5882 | "depending on the instruction, and the second immediate is immf + 3."); | |||
| 5883 | case Match_InvalidSVEAddSubImm8: | |||
| 5884 | return Error(Loc, "immediate must be an integer in range [0, 255]" | |||
| 5885 | " with a shift amount of 0"); | |||
| 5886 | case Match_InvalidSVEAddSubImm16: | |||
| 5887 | case Match_InvalidSVEAddSubImm32: | |||
| 5888 | case Match_InvalidSVEAddSubImm64: | |||
| 5889 | return Error(Loc, "immediate must be an integer in range [0, 255] or a " | |||
| 5890 | "multiple of 256 in range [256, 65280]"); | |||
| 5891 | case Match_InvalidSVECpyImm8: | |||
| 5892 | return Error(Loc, "immediate must be an integer in range [-128, 255]" | |||
| 5893 | " with a shift amount of 0"); | |||
| 5894 | case Match_InvalidSVECpyImm16: | |||
| 5895 | return Error(Loc, "immediate must be an integer in range [-128, 127] or a " | |||
| 5896 | "multiple of 256 in range [-32768, 65280]"); | |||
| 5897 | case Match_InvalidSVECpyImm32: | |||
| 5898 | case Match_InvalidSVECpyImm64: | |||
| 5899 | return Error(Loc, "immediate must be an integer in range [-128, 127] or a " | |||
| 5900 | "multiple of 256 in range [-32768, 32512]"); | |||
| 5901 | case Match_InvalidIndexRange0_0: | |||
| 5902 | return Error(Loc, "expected lane specifier '[0]'"); | |||
| 5903 | case Match_InvalidIndexRange1_1: | |||
| 5904 | return Error(Loc, "expected lane specifier '[1]'"); | |||
| 5905 | case Match_InvalidIndexRange0_15: | |||
| 5906 | return Error(Loc, "vector lane must be an integer in range [0, 15]."); | |||
| 5907 | case Match_InvalidIndexRange0_7: | |||
| 5908 | return Error(Loc, "vector lane must be an integer in range [0, 7]."); | |||
| 5909 | case Match_InvalidIndexRange0_3: | |||
| 5910 | return Error(Loc, "vector lane must be an integer in range [0, 3]."); | |||
| 5911 | case Match_InvalidIndexRange0_1: | |||
| 5912 | return Error(Loc, "vector lane must be an integer in range [0, 1]."); | |||
| 5913 | case Match_InvalidSVEIndexRange0_63: | |||
| 5914 | return Error(Loc, "vector lane must be an integer in range [0, 63]."); | |||
| 5915 | case Match_InvalidSVEIndexRange0_31: | |||
| 5916 | return Error(Loc, "vector lane must be an integer in range [0, 31]."); | |||
| 5917 | case Match_InvalidSVEIndexRange0_15: | |||
| 5918 | return Error(Loc, "vector lane must be an integer in range [0, 15]."); | |||
| 5919 | case Match_InvalidSVEIndexRange0_7: | |||
| 5920 | return Error(Loc, "vector lane must be an integer in range [0, 7]."); | |||
| 5921 | case Match_InvalidSVEIndexRange0_3: | |||
| 5922 | return Error(Loc, "vector lane must be an integer in range [0, 3]."); | |||
| 5923 | case Match_InvalidLabel: | |||
| 5924 | return Error(Loc, "expected label or encodable integer pc offset"); | |||
| 5925 | case Match_MRS: | |||
| 5926 | return Error(Loc, "expected readable system register"); | |||
| 5927 | case Match_MSR: | |||
| 5928 | case Match_InvalidSVCR: | |||
| 5929 | return Error(Loc, "expected writable system register or pstate"); | |||
| 5930 | case Match_InvalidComplexRotationEven: | |||
| 5931 | return Error(Loc, "complex rotation must be 0, 90, 180 or 270."); | |||
| 5932 | case Match_InvalidComplexRotationOdd: | |||
| 5933 | return Error(Loc, "complex rotation must be 90 or 270."); | |||
| 5934 | case Match_MnemonicFail: { | |||
| 5935 | std::string Suggestion = AArch64MnemonicSpellCheck( | |||
| 5936 | ((AArch64Operand &)*Operands[0]).getToken(), | |||
| 5937 | ComputeAvailableFeatures(STI->getFeatureBits())); | |||
| 5938 | return Error(Loc, "unrecognized instruction mnemonic" + Suggestion); | |||
| 5939 | } | |||
| 5940 | case Match_InvalidGPR64shifted8: | |||
| 5941 | return Error(Loc, "register must be x0..x30 or xzr, without shift"); | |||
| 5942 | case Match_InvalidGPR64shifted16: | |||
| 5943 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'"); | |||
| 5944 | case Match_InvalidGPR64shifted32: | |||
| 5945 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'"); | |||
| 5946 | case Match_InvalidGPR64shifted64: | |||
| 5947 | return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'"); | |||
| 5948 | case Match_InvalidGPR64shifted128: | |||
| 5949 | return Error( | |||
| 5950 | Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'"); | |||
| 5951 | case Match_InvalidGPR64NoXZRshifted8: | |||
| 5952 | return Error(Loc, "register must be x0..x30 without shift"); | |||
| 5953 | case Match_InvalidGPR64NoXZRshifted16: | |||
| 5954 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'"); | |||
| 5955 | case Match_InvalidGPR64NoXZRshifted32: | |||
| 5956 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'"); | |||
| 5957 | case Match_InvalidGPR64NoXZRshifted64: | |||
| 5958 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'"); | |||
| 5959 | case Match_InvalidGPR64NoXZRshifted128: | |||
| 5960 | return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'"); | |||
| 5961 | case Match_InvalidZPR32UXTW8: | |||
| 5962 | case Match_InvalidZPR32SXTW8: | |||
| 5963 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'"); | |||
| 5964 | case Match_InvalidZPR32UXTW16: | |||
| 5965 | case Match_InvalidZPR32SXTW16: | |||
| 5966 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'"); | |||
| 5967 | case Match_InvalidZPR32UXTW32: | |||
| 5968 | case Match_InvalidZPR32SXTW32: | |||
| 5969 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'"); | |||
| 5970 | case Match_InvalidZPR32UXTW64: | |||
| 5971 | case Match_InvalidZPR32SXTW64: | |||
| 5972 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'"); | |||
| 5973 | case Match_InvalidZPR64UXTW8: | |||
| 5974 | case Match_InvalidZPR64SXTW8: | |||
| 5975 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'"); | |||
| 5976 | case Match_InvalidZPR64UXTW16: | |||
| 5977 | case Match_InvalidZPR64SXTW16: | |||
| 5978 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'"); | |||
| 5979 | case Match_InvalidZPR64UXTW32: | |||
| 5980 | case Match_InvalidZPR64SXTW32: | |||
| 5981 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'"); | |||
| 5982 | case Match_InvalidZPR64UXTW64: | |||
| 5983 | case Match_InvalidZPR64SXTW64: | |||
| 5984 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'"); | |||
| 5985 | case Match_InvalidZPR32LSL8: | |||
| 5986 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'"); | |||
| 5987 | case Match_InvalidZPR32LSL16: | |||
| 5988 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'"); | |||
| 5989 | case Match_InvalidZPR32LSL32: | |||
| 5990 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'"); | |||
| 5991 | case Match_InvalidZPR32LSL64: | |||
| 5992 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'"); | |||
| 5993 | case Match_InvalidZPR64LSL8: | |||
| 5994 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'"); | |||
| 5995 | case Match_InvalidZPR64LSL16: | |||
| 5996 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'"); | |||
| 5997 | case Match_InvalidZPR64LSL32: | |||
| 5998 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'"); | |||
| 5999 | case Match_InvalidZPR64LSL64: | |||
| 6000 | return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'"); | |||
| 6001 | case Match_InvalidZPR0: | |||
| 6002 | return Error(Loc, "expected register without element width suffix"); | |||
| 6003 | case Match_InvalidZPR8: | |||
| 6004 | case Match_InvalidZPR16: | |||
| 6005 | case Match_InvalidZPR32: | |||
| 6006 | case Match_InvalidZPR64: | |||
| 6007 | case Match_InvalidZPR128: | |||
| 6008 | return Error(Loc, "invalid element width"); | |||
| 6009 | case Match_InvalidZPR_3b8: | |||
| 6010 | return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b"); | |||
| 6011 | case Match_InvalidZPR_3b16: | |||
| 6012 | return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h"); | |||
| 6013 | case Match_InvalidZPR_3b32: | |||
| 6014 | return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s"); | |||
| 6015 | case Match_InvalidZPR_4b8: | |||
| 6016 | return Error(Loc, | |||
| 6017 | "Invalid restricted vector register, expected z0.b..z15.b"); | |||
| 6018 | case Match_InvalidZPR_4b16: | |||
| 6019 | return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h"); | |||
| 6020 | case Match_InvalidZPR_4b32: | |||
| 6021 | return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s"); | |||
| 6022 | case Match_InvalidZPR_4b64: | |||
| 6023 | return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d"); | |||
| 6024 | case Match_InvalidSVEPattern: | |||
| 6025 | return Error(Loc, "invalid predicate pattern"); | |||
| 6026 | case Match_InvalidSVEPredicateAnyReg: | |||
| 6027 | case Match_InvalidSVEPredicateBReg: | |||
| 6028 | case Match_InvalidSVEPredicateHReg: | |||
| 6029 | case Match_InvalidSVEPredicateSReg: | |||
| 6030 | case Match_InvalidSVEPredicateDReg: | |||
| 6031 | return Error(Loc, "invalid predicate register."); | |||
| 6032 | case Match_InvalidSVEPredicate3bAnyReg: | |||
| 6033 | return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)"); | |||
| 6034 | case Match_InvalidSVEPNPredicateB_p8to15Reg: | |||
| 6035 | case Match_InvalidSVEPNPredicateH_p8to15Reg: | |||
| 6036 | case Match_InvalidSVEPNPredicateS_p8to15Reg: | |||
| 6037 | case Match_InvalidSVEPNPredicateD_p8to15Reg: | |||
| 6038 | return Error(Loc, "Invalid predicate register, expected PN in range " | |||
| 6039 | "pn8..pn15 with element suffix."); | |||
| 6040 | case Match_InvalidSVEPNPredicateAny_p8to15Reg: | |||
| 6041 | return Error(Loc, "invalid restricted predicate-as-counter register " | |||
| 6042 | "expected pn8..pn15"); | |||
| 6043 | case Match_InvalidSVEPNPredicateBReg: | |||
| 6044 | case Match_InvalidSVEPNPredicateHReg: | |||
| 6045 | case Match_InvalidSVEPNPredicateSReg: | |||
| 6046 | case Match_InvalidSVEPNPredicateDReg: | |||
| 6047 | return Error(Loc, "Invalid predicate register, expected PN in range " | |||
| 6048 | "pn0..pn15 with element suffix."); | |||
| 6049 | case Match_InvalidSVEVecLenSpecifier: | |||
| 6050 | return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4"); | |||
| 6051 | case Match_InvalidSVEPredicateListMul2x8: | |||
| 6052 | case Match_InvalidSVEPredicateListMul2x16: | |||
| 6053 | case Match_InvalidSVEPredicateListMul2x32: | |||
| 6054 | case Match_InvalidSVEPredicateListMul2x64: | |||
| 6055 | return Error(Loc, "Invalid vector list, expected list with 2 consecutive " | |||
| 6056 | "predicate registers, where the first vector is a multiple of 2 " | |||
| 6057 | "and with correct element type"); | |||
| 6058 | case Match_InvalidSVEExactFPImmOperandHalfOne: | |||
| 6059 | return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0."); | |||
| 6060 | case Match_InvalidSVEExactFPImmOperandHalfTwo: | |||
| 6061 | return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0."); | |||
| 6062 | case Match_InvalidSVEExactFPImmOperandZeroOne: | |||
| 6063 | return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0."); | |||
| 6064 | case Match_InvalidMatrixTileVectorH8: | |||
| 6065 | case Match_InvalidMatrixTileVectorV8: | |||
| 6066 | return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b"); | |||
| 6067 | case Match_InvalidMatrixTileVectorH16: | |||
| 6068 | case Match_InvalidMatrixTileVectorV16: | |||
| 6069 | return Error(Loc, | |||
| 6070 | "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h"); | |||
| 6071 | case Match_InvalidMatrixTileVectorH32: | |||
| 6072 | case Match_InvalidMatrixTileVectorV32: | |||
| 6073 | return Error(Loc, | |||
| 6074 | "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s"); | |||
| 6075 | case Match_InvalidMatrixTileVectorH64: | |||
| 6076 | case Match_InvalidMatrixTileVectorV64: | |||
| 6077 | return Error(Loc, | |||
| 6078 | "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d"); | |||
| 6079 | case Match_InvalidMatrixTileVectorH128: | |||
| 6080 | case Match_InvalidMatrixTileVectorV128: | |||
| 6081 | return Error(Loc, | |||
| 6082 | "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q"); | |||
| 6083 | case Match_InvalidMatrixTile32: | |||
| 6084 | return Error(Loc, "invalid matrix operand, expected za[0-3].s"); | |||
| 6085 | case Match_InvalidMatrixTile64: | |||
| 6086 | return Error(Loc, "invalid matrix operand, expected za[0-7].d"); | |||
| 6087 | case Match_InvalidMatrix: | |||
| 6088 | return Error(Loc, "invalid matrix operand, expected za"); | |||
| 6089 | case Match_InvalidMatrix8: | |||
| 6090 | return Error(Loc, "invalid matrix operand, expected suffix .b"); | |||
| 6091 | case Match_InvalidMatrix16: | |||
| 6092 | return Error(Loc, "invalid matrix operand, expected suffix .h"); | |||
| 6093 | case Match_InvalidMatrix32: | |||
| 6094 | return Error(Loc, "invalid matrix operand, expected suffix .s"); | |||
| 6095 | case Match_InvalidMatrix64: | |||
| 6096 | return Error(Loc, "invalid matrix operand, expected suffix .d"); | |||
| 6097 | case Match_InvalidMatrixIndexGPR32_12_15: | |||
| 6098 | return Error(Loc, "operand must be a register in range [w12, w15]"); | |||
| 6099 | case Match_InvalidMatrixIndexGPR32_8_11: | |||
| 6100 | return Error(Loc, "operand must be a register in range [w8, w11]"); | |||
| 6101 | case Match_InvalidSVEVectorListMul2x8: | |||
| 6102 | case Match_InvalidSVEVectorListMul2x16: | |||
| 6103 | case Match_InvalidSVEVectorListMul2x32: | |||
| 6104 | case Match_InvalidSVEVectorListMul2x64: | |||
| 6105 | return Error(Loc, "Invalid vector list, expected list with 2 consecutive " | |||
| 6106 | "SVE vectors, where the first vector is a multiple of 2 " | |||
| 6107 | "and with matching element types"); | |||
| 6108 | case Match_InvalidSVEVectorListMul4x8: | |||
| 6109 | case Match_InvalidSVEVectorListMul4x16: | |||
| 6110 | case Match_InvalidSVEVectorListMul4x32: | |||
| 6111 | case Match_InvalidSVEVectorListMul4x64: | |||
| 6112 | return Error(Loc, "Invalid vector list, expected list with 4 consecutive " | |||
| 6113 | "SVE vectors, where the first vector is a multiple of 4 " | |||
| 6114 | "and with matching element types"); | |||
| 6115 | case Match_InvalidLookupTable: | |||
| 6116 | return Error(Loc, "Invalid lookup table, expected zt0"); | |||
| 6117 | case Match_InvalidSVEVectorListStrided2x8: | |||
| 6118 | case Match_InvalidSVEVectorListStrided2x16: | |||
| 6119 | case Match_InvalidSVEVectorListStrided2x32: | |||
| 6120 | case Match_InvalidSVEVectorListStrided2x64: | |||
| 6121 | return Error( | |||
| 6122 | Loc, | |||
| 6123 | "Invalid vector list, expected list with each SVE vector in the list " | |||
| 6124 | "8 registers apart, and the first register in the range [z0, z7] or " | |||
| 6125 | "[z16, z23] and with correct element type"); | |||
| 6126 | case Match_InvalidSVEVectorListStrided4x8: | |||
| 6127 | case Match_InvalidSVEVectorListStrided4x16: | |||
| 6128 | case Match_InvalidSVEVectorListStrided4x32: | |||
| 6129 | case Match_InvalidSVEVectorListStrided4x64: | |||
| 6130 | return Error( | |||
| 6131 | Loc, | |||
| 6132 | "Invalid vector list, expected list with each SVE vector in the list " | |||
| 6133 | "4 registers apart, and the first register in the range [z0, z3] or " | |||
| 6134 | "[z16, z19] and with correct element type"); | |||
| 6135 | default: | |||
| 6136 | llvm_unreachable("unexpected error code!")::llvm::llvm_unreachable_internal("unexpected error code!", "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp" , 6136); | |||
| 6137 | } | |||
| 6138 | } | |||
| 6139 | ||||
| 6140 | static const char *getSubtargetFeatureName(uint64_t Val); | |||
| 6141 | ||||
| 6142 | bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, | |||
| 6143 | OperandVector &Operands, | |||
| 6144 | MCStreamer &Out, | |||
| 6145 | uint64_t &ErrorInfo, | |||
| 6146 | bool MatchingInlineAsm) { | |||
| 6147 | assert(!Operands.empty() && "Unexpect empty operand list!")(static_cast <bool> (!Operands.empty() && "Unexpect empty operand list!" ) ? void (0) : __assert_fail ("!Operands.empty() && \"Unexpect empty operand list!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 6147 , __extension__ __PRETTY_FUNCTION__)); | |||
| 6148 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]); | |||
| 6149 | assert(Op.isToken() && "Leading operand should always be a mnemonic!")(static_cast <bool> (Op.isToken() && "Leading operand should always be a mnemonic!" ) ? void (0) : __assert_fail ("Op.isToken() && \"Leading operand should always be a mnemonic!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 6149 , __extension__ __PRETTY_FUNCTION__)); | |||
| 6150 | ||||
| 6151 | StringRef Tok = Op.getToken(); | |||
| 6152 | unsigned NumOperands = Operands.size(); | |||
| 6153 | ||||
| 6154 | if (NumOperands == 4 && Tok == "lsl") { | |||
| 6155 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); | |||
| 6156 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
| 6157 | if (Op2.isScalarReg() && Op3.isImm()) { | |||
| 6158 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); | |||
| 6159 | if (Op3CE) { | |||
| 6160 | uint64_t Op3Val = Op3CE->getValue(); | |||
| 6161 | uint64_t NewOp3Val = 0; | |||
| 6162 | uint64_t NewOp4Val = 0; | |||
| 6163 | if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains( | |||
| 6164 | Op2.getReg())) { | |||
| 6165 | NewOp3Val = (32 - Op3Val) & 0x1f; | |||
| 6166 | NewOp4Val = 31 - Op3Val; | |||
| 6167 | } else { | |||
| 6168 | NewOp3Val = (64 - Op3Val) & 0x3f; | |||
| 6169 | NewOp4Val = 63 - Op3Val; | |||
| 6170 | } | |||
| 6171 | ||||
| 6172 | const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext()); | |||
| 6173 | const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext()); | |||
| 6174 | ||||
| 6175 | Operands[0] = | |||
| 6176 | AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext()); | |||
| 6177 | Operands.push_back(AArch64Operand::CreateImm( | |||
| 6178 | NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext())); | |||
| 6179 | Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(), | |||
| 6180 | Op3.getEndLoc(), getContext()); | |||
| 6181 | } | |||
| 6182 | } | |||
| 6183 | } else if (NumOperands == 4 && Tok == "bfc") { | |||
| 6184 | // FIXME: Horrible hack to handle BFC->BFM alias. | |||
| 6185 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6186 | AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); | |||
| 6187 | AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); | |||
| 6188 | ||||
| 6189 | if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { | |||
| 6190 | const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm()); | |||
| 6191 | const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm()); | |||
| 6192 | ||||
| 6193 | if (LSBCE && WidthCE) { | |||
| 6194 | uint64_t LSB = LSBCE->getValue(); | |||
| 6195 | uint64_t Width = WidthCE->getValue(); | |||
| 6196 | ||||
| 6197 | uint64_t RegWidth = 0; | |||
| 6198 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
| 6199 | Op1.getReg())) | |||
| 6200 | RegWidth = 64; | |||
| 6201 | else | |||
| 6202 | RegWidth = 32; | |||
| 6203 | ||||
| 6204 | if (LSB >= RegWidth) | |||
| 6205 | return Error(LSBOp.getStartLoc(), | |||
| 6206 | "expected integer in range [0, 31]"); | |||
| 6207 | if (Width < 1 || Width > RegWidth) | |||
| 6208 | return Error(WidthOp.getStartLoc(), | |||
| 6209 | "expected integer in range [1, 32]"); | |||
| 6210 | ||||
| 6211 | uint64_t ImmR = 0; | |||
| 6212 | if (RegWidth == 32) | |||
| 6213 | ImmR = (32 - LSB) & 0x1f; | |||
| 6214 | else | |||
| 6215 | ImmR = (64 - LSB) & 0x3f; | |||
| 6216 | ||||
| 6217 | uint64_t ImmS = Width - 1; | |||
| 6218 | ||||
| 6219 | if (ImmR != 0 && ImmS >= ImmR) | |||
| 6220 | return Error(WidthOp.getStartLoc(), | |||
| 6221 | "requested insert overflows register"); | |||
| 6222 | ||||
| 6223 | const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext()); | |||
| 6224 | const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext()); | |||
| 6225 | Operands[0] = | |||
| 6226 | AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext()); | |||
| 6227 | Operands[2] = AArch64Operand::CreateReg( | |||
| 6228 | RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar, | |||
| 6229 | SMLoc(), SMLoc(), getContext()); | |||
| 6230 | Operands[3] = AArch64Operand::CreateImm( | |||
| 6231 | ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext()); | |||
| 6232 | Operands.emplace_back( | |||
| 6233 | AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(), | |||
| 6234 | WidthOp.getEndLoc(), getContext())); | |||
| 6235 | } | |||
| 6236 | } | |||
| 6237 | } else if (NumOperands == 5) { | |||
| 6238 | // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and | |||
| 6239 | // UBFIZ -> UBFM aliases. | |||
| 6240 | if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") { | |||
| 6241 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6242 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
| 6243 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); | |||
| 6244 | ||||
| 6245 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { | |||
| 6246 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); | |||
| 6247 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); | |||
| 6248 | ||||
| 6249 | if (Op3CE && Op4CE) { | |||
| 6250 | uint64_t Op3Val = Op3CE->getValue(); | |||
| 6251 | uint64_t Op4Val = Op4CE->getValue(); | |||
| 6252 | ||||
| 6253 | uint64_t RegWidth = 0; | |||
| 6254 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
| 6255 | Op1.getReg())) | |||
| 6256 | RegWidth = 64; | |||
| 6257 | else | |||
| 6258 | RegWidth = 32; | |||
| 6259 | ||||
| 6260 | if (Op3Val >= RegWidth) | |||
| 6261 | return Error(Op3.getStartLoc(), | |||
| 6262 | "expected integer in range [0, 31]"); | |||
| 6263 | if (Op4Val < 1 || Op4Val > RegWidth) | |||
| 6264 | return Error(Op4.getStartLoc(), | |||
| 6265 | "expected integer in range [1, 32]"); | |||
| 6266 | ||||
| 6267 | uint64_t NewOp3Val = 0; | |||
| 6268 | if (RegWidth == 32) | |||
| 6269 | NewOp3Val = (32 - Op3Val) & 0x1f; | |||
| 6270 | else | |||
| 6271 | NewOp3Val = (64 - Op3Val) & 0x3f; | |||
| 6272 | ||||
| 6273 | uint64_t NewOp4Val = Op4Val - 1; | |||
| 6274 | ||||
| 6275 | if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) | |||
| 6276 | return Error(Op4.getStartLoc(), | |||
| 6277 | "requested insert overflows register"); | |||
| 6278 | ||||
| 6279 | const MCExpr *NewOp3 = | |||
| 6280 | MCConstantExpr::create(NewOp3Val, getContext()); | |||
| 6281 | const MCExpr *NewOp4 = | |||
| 6282 | MCConstantExpr::create(NewOp4Val, getContext()); | |||
| 6283 | Operands[3] = AArch64Operand::CreateImm( | |||
| 6284 | NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext()); | |||
| 6285 | Operands[4] = AArch64Operand::CreateImm( | |||
| 6286 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); | |||
| 6287 | if (Tok == "bfi") | |||
| 6288 | Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(), | |||
| 6289 | getContext()); | |||
| 6290 | else if (Tok == "sbfiz") | |||
| 6291 | Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(), | |||
| 6292 | getContext()); | |||
| 6293 | else if (Tok == "ubfiz") | |||
| 6294 | Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), | |||
| 6295 | getContext()); | |||
| 6296 | else | |||
| 6297 | llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 6297 ); | |||
| 6298 | } | |||
| 6299 | } | |||
| 6300 | ||||
| 6301 | // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and | |||
| 6302 | // UBFX -> UBFM aliases. | |||
| 6303 | } else if (NumOperands == 5 && | |||
| 6304 | (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) { | |||
| 6305 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6306 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
| 6307 | AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); | |||
| 6308 | ||||
| 6309 | if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { | |||
| 6310 | const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); | |||
| 6311 | const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); | |||
| 6312 | ||||
| 6313 | if (Op3CE && Op4CE) { | |||
| 6314 | uint64_t Op3Val = Op3CE->getValue(); | |||
| 6315 | uint64_t Op4Val = Op4CE->getValue(); | |||
| 6316 | ||||
| 6317 | uint64_t RegWidth = 0; | |||
| 6318 | if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
| 6319 | Op1.getReg())) | |||
| 6320 | RegWidth = 64; | |||
| 6321 | else | |||
| 6322 | RegWidth = 32; | |||
| 6323 | ||||
| 6324 | if (Op3Val >= RegWidth) | |||
| 6325 | return Error(Op3.getStartLoc(), | |||
| 6326 | "expected integer in range [0, 31]"); | |||
| 6327 | if (Op4Val < 1 || Op4Val > RegWidth) | |||
| 6328 | return Error(Op4.getStartLoc(), | |||
| 6329 | "expected integer in range [1, 32]"); | |||
| 6330 | ||||
| 6331 | uint64_t NewOp4Val = Op3Val + Op4Val - 1; | |||
| 6332 | ||||
| 6333 | if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) | |||
| 6334 | return Error(Op4.getStartLoc(), | |||
| 6335 | "requested extract overflows register"); | |||
| 6336 | ||||
| 6337 | const MCExpr *NewOp4 = | |||
| 6338 | MCConstantExpr::create(NewOp4Val, getContext()); | |||
| 6339 | Operands[4] = AArch64Operand::CreateImm( | |||
| 6340 | NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); | |||
| 6341 | if (Tok == "bfxil") | |||
| 6342 | Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(), | |||
| 6343 | getContext()); | |||
| 6344 | else if (Tok == "sbfx") | |||
| 6345 | Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(), | |||
| 6346 | getContext()); | |||
| 6347 | else if (Tok == "ubfx") | |||
| 6348 | Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), | |||
| 6349 | getContext()); | |||
| 6350 | else | |||
| 6351 | llvm_unreachable("No valid mnemonic for alias?")::llvm::llvm_unreachable_internal("No valid mnemonic for alias?" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 6351 ); | |||
| 6352 | } | |||
| 6353 | } | |||
| 6354 | } | |||
| 6355 | } | |||
| 6356 | ||||
| 6357 | // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing | |||
| 6358 | // instruction for FP registers correctly in some rare circumstances. Convert | |||
| 6359 | // it to a safe instruction and warn (because silently changing someone's | |||
| 6360 | // assembly is rude). | |||
| 6361 | if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) && | |||
| 6362 | NumOperands == 4 && Tok == "movi") { | |||
| 6363 | AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6364 | AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); | |||
| 6365 | AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); | |||
| 6366 | if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) || | |||
| 6367 | (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) { | |||
| 6368 | StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken(); | |||
| 6369 | if (Suffix.lower() == ".2d" && | |||
| 6370 | cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) { | |||
| 6371 | Warning(IDLoc, "instruction movi.2d with immediate #0 may not function" | |||
| 6372 | " correctly on this CPU, converting to equivalent movi.16b"); | |||
| 6373 | // Switch the suffix to .16b. | |||
| 6374 | unsigned Idx = Op1.isToken() ? 1 : 2; | |||
| 6375 | Operands[Idx] = | |||
| 6376 | AArch64Operand::CreateToken(".16b", IDLoc, getContext()); | |||
| 6377 | } | |||
| 6378 | } | |||
| 6379 | } | |||
| 6380 | ||||
| 6381 | // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands. | |||
| 6382 | // InstAlias can't quite handle this since the reg classes aren't | |||
| 6383 | // subclasses. | |||
| 6384 | if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) { | |||
| 6385 | // The source register can be Wn here, but the matcher expects a | |||
| 6386 | // GPR64. Twiddle it here if necessary. | |||
| 6387 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); | |||
| 6388 | if (Op.isScalarReg()) { | |||
| 6389 | unsigned Reg = getXRegFromWReg(Op.getReg()); | |||
| 6390 | Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, | |||
| 6391 | Op.getStartLoc(), Op.getEndLoc(), | |||
| 6392 | getContext()); | |||
| 6393 | } | |||
| 6394 | } | |||
| 6395 | // FIXME: Likewise for sxt[bh] with a Xd dst operand | |||
| 6396 | else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { | |||
| 6397 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6398 | if (Op.isScalarReg() && | |||
| 6399 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
| 6400 | Op.getReg())) { | |||
| 6401 | // The source register can be Wn here, but the matcher expects a | |||
| 6402 | // GPR64. Twiddle it here if necessary. | |||
| 6403 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); | |||
| 6404 | if (Op.isScalarReg()) { | |||
| 6405 | unsigned Reg = getXRegFromWReg(Op.getReg()); | |||
| 6406 | Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, | |||
| 6407 | Op.getStartLoc(), | |||
| 6408 | Op.getEndLoc(), getContext()); | |||
| 6409 | } | |||
| 6410 | } | |||
| 6411 | } | |||
| 6412 | // FIXME: Likewise for uxt[bh] with a Xd dst operand | |||
| 6413 | else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { | |||
| 6414 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6415 | if (Op.isScalarReg() && | |||
| 6416 | AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( | |||
| 6417 | Op.getReg())) { | |||
| 6418 | // The source register can be Wn here, but the matcher expects a | |||
| 6419 | // GPR32. Twiddle it here if necessary. | |||
| 6420 | AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); | |||
| 6421 | if (Op.isScalarReg()) { | |||
| 6422 | unsigned Reg = getWRegFromXReg(Op.getReg()); | |||
| 6423 | Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, | |||
| 6424 | Op.getStartLoc(), | |||
| 6425 | Op.getEndLoc(), getContext()); | |||
| 6426 | } | |||
| 6427 | } | |||
| 6428 | } | |||
| 6429 | ||||
| 6430 | MCInst Inst; | |||
| 6431 | FeatureBitset MissingFeatures; | |||
| 6432 | // First try to match against the secondary set of tables containing the | |||
| 6433 | // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). | |||
| 6434 | unsigned MatchResult = | |||
| 6435 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, | |||
| 6436 | MatchingInlineAsm, 1); | |||
| 6437 | ||||
| 6438 | // If that fails, try against the alternate table containing long-form NEON: | |||
| 6439 | // "fadd v0.2s, v1.2s, v2.2s" | |||
| 6440 | if (MatchResult != Match_Success) { | |||
| 6441 | // But first, save the short-form match result: we can use it in case the | |||
| 6442 | // long-form match also fails. | |||
| 6443 | auto ShortFormNEONErrorInfo = ErrorInfo; | |||
| 6444 | auto ShortFormNEONMatchResult = MatchResult; | |||
| 6445 | auto ShortFormNEONMissingFeatures = MissingFeatures; | |||
| 6446 | ||||
| 6447 | MatchResult = | |||
| 6448 | MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, | |||
| 6449 | MatchingInlineAsm, 0); | |||
| 6450 | ||||
| 6451 | // Now, both matches failed, and the long-form match failed on the mnemonic | |||
| 6452 | // suffix token operand. The short-form match failure is probably more | |||
| 6453 | // relevant: use it instead. | |||
| 6454 | if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 && | |||
| 6455 | Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() && | |||
| 6456 | ((AArch64Operand &)*Operands[1]).isTokenSuffix()) { | |||
| 6457 | MatchResult = ShortFormNEONMatchResult; | |||
| 6458 | ErrorInfo = ShortFormNEONErrorInfo; | |||
| 6459 | MissingFeatures = ShortFormNEONMissingFeatures; | |||
| 6460 | } | |||
| 6461 | } | |||
| 6462 | ||||
| 6463 | switch (MatchResult) { | |||
| 6464 | case Match_Success: { | |||
| 6465 | // Perform range checking and other semantic validations | |||
| 6466 | SmallVector<SMLoc, 8> OperandLocs; | |||
| 6467 | NumOperands = Operands.size(); | |||
| 6468 | for (unsigned i = 1; i < NumOperands; ++i) | |||
| 6469 | OperandLocs.push_back(Operands[i]->getStartLoc()); | |||
| 6470 | if (validateInstruction(Inst, IDLoc, OperandLocs)) | |||
| 6471 | return true; | |||
| 6472 | ||||
| 6473 | Inst.setLoc(IDLoc); | |||
| 6474 | Out.emitInstruction(Inst, getSTI()); | |||
| 6475 | return false; | |||
| 6476 | } | |||
| 6477 | case Match_MissingFeature: { | |||
| 6478 | assert(MissingFeatures.any() && "Unknown missing feature!")(static_cast <bool> (MissingFeatures.any() && "Unknown missing feature!" ) ? void (0) : __assert_fail ("MissingFeatures.any() && \"Unknown missing feature!\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 6478 , __extension__ __PRETTY_FUNCTION__)); | |||
| 6479 | // Special case the error message for the very common case where only | |||
| 6480 | // a single subtarget feature is missing (neon, e.g.). | |||
| 6481 | std::string Msg = "instruction requires:"; | |||
| 6482 | for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) { | |||
| 6483 | if (MissingFeatures[i]) { | |||
| 6484 | Msg += " "; | |||
| 6485 | Msg += getSubtargetFeatureName(i); | |||
| 6486 | } | |||
| 6487 | } | |||
| 6488 | return Error(IDLoc, Msg); | |||
| 6489 | } | |||
| 6490 | case Match_MnemonicFail: | |||
| 6491 | return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands); | |||
| 6492 | case Match_InvalidOperand: { | |||
| 6493 | SMLoc ErrorLoc = IDLoc; | |||
| 6494 | ||||
| 6495 | if (ErrorInfo != ~0ULL) { | |||
| 6496 | if (ErrorInfo >= Operands.size()) | |||
| 6497 | return Error(IDLoc, "too few operands for instruction", | |||
| 6498 | SMRange(IDLoc, getTok().getLoc())); | |||
| 6499 | ||||
| 6500 | ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); | |||
| 6501 | if (ErrorLoc == SMLoc()) | |||
| 6502 | ErrorLoc = IDLoc; | |||
| 6503 | } | |||
| 6504 | // If the match failed on a suffix token operand, tweak the diagnostic | |||
| 6505 | // accordingly. | |||
| 6506 | if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() && | |||
| 6507 | ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) | |||
| 6508 | MatchResult = Match_InvalidSuffix; | |||
| 6509 | ||||
| 6510 | return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); | |||
| 6511 | } | |||
| 6512 | case Match_InvalidTiedOperand: | |||
| 6513 | case Match_InvalidMemoryIndexed1: | |||
| 6514 | case Match_InvalidMemoryIndexed2: | |||
| 6515 | case Match_InvalidMemoryIndexed4: | |||
| 6516 | case Match_InvalidMemoryIndexed8: | |||
| 6517 | case Match_InvalidMemoryIndexed16: | |||
| 6518 | case Match_InvalidCondCode: | |||
| 6519 | case Match_AddSubRegExtendSmall: | |||
| 6520 | case Match_AddSubRegExtendLarge: | |||
| 6521 | case Match_AddSubSecondSource: | |||
| 6522 | case Match_LogicalSecondSource: | |||
| 6523 | case Match_AddSubRegShift32: | |||
| 6524 | case Match_AddSubRegShift64: | |||
| 6525 | case Match_InvalidMovImm32Shift: | |||
| 6526 | case Match_InvalidMovImm64Shift: | |||
| 6527 | case Match_InvalidFPImm: | |||
| 6528 | case Match_InvalidMemoryWExtend8: | |||
| 6529 | case Match_InvalidMemoryWExtend16: | |||
| 6530 | case Match_InvalidMemoryWExtend32: | |||
| 6531 | case Match_InvalidMemoryWExtend64: | |||
| 6532 | case Match_InvalidMemoryWExtend128: | |||
| 6533 | case Match_InvalidMemoryXExtend8: | |||
| 6534 | case Match_InvalidMemoryXExtend16: | |||
| 6535 | case Match_InvalidMemoryXExtend32: | |||
| 6536 | case Match_InvalidMemoryXExtend64: | |||
| 6537 | case Match_InvalidMemoryXExtend128: | |||
| 6538 | case Match_InvalidMemoryIndexed1SImm4: | |||
| 6539 | case Match_InvalidMemoryIndexed2SImm4: | |||
| 6540 | case Match_InvalidMemoryIndexed3SImm4: | |||
| 6541 | case Match_InvalidMemoryIndexed4SImm4: | |||
| 6542 | case Match_InvalidMemoryIndexed1SImm6: | |||
| 6543 | case Match_InvalidMemoryIndexed16SImm4: | |||
| 6544 | case Match_InvalidMemoryIndexed32SImm4: | |||
| 6545 | case Match_InvalidMemoryIndexed4SImm7: | |||
| 6546 | case Match_InvalidMemoryIndexed8SImm7: | |||
| 6547 | case Match_InvalidMemoryIndexed16SImm7: | |||
| 6548 | case Match_InvalidMemoryIndexed8UImm5: | |||
| 6549 | case Match_InvalidMemoryIndexed8UImm3: | |||
| 6550 | case Match_InvalidMemoryIndexed4UImm5: | |||
| 6551 | case Match_InvalidMemoryIndexed2UImm5: | |||
| 6552 | case Match_InvalidMemoryIndexed1UImm6: | |||
| 6553 | case Match_InvalidMemoryIndexed2UImm6: | |||
| 6554 | case Match_InvalidMemoryIndexed4UImm6: | |||
| 6555 | case Match_InvalidMemoryIndexed8UImm6: | |||
| 6556 | case Match_InvalidMemoryIndexed16UImm6: | |||
| 6557 | case Match_InvalidMemoryIndexedSImm6: | |||
| 6558 | case Match_InvalidMemoryIndexedSImm5: | |||
| 6559 | case Match_InvalidMemoryIndexedSImm8: | |||
| 6560 | case Match_InvalidMemoryIndexedSImm9: | |||
| 6561 | case Match_InvalidMemoryIndexed16SImm9: | |||
| 6562 | case Match_InvalidMemoryIndexed8SImm10: | |||
| 6563 | case Match_InvalidImm0_0: | |||
| 6564 | case Match_InvalidImm0_1: | |||
| 6565 | case Match_InvalidImm0_3: | |||
| 6566 | case Match_InvalidImm0_7: | |||
| 6567 | case Match_InvalidImm0_15: | |||
| 6568 | case Match_InvalidImm0_31: | |||
| 6569 | case Match_InvalidImm0_63: | |||
| 6570 | case Match_InvalidImm0_127: | |||
| 6571 | case Match_InvalidImm0_255: | |||
| 6572 | case Match_InvalidImm0_65535: | |||
| 6573 | case Match_InvalidImm1_8: | |||
| 6574 | case Match_InvalidImm1_16: | |||
| 6575 | case Match_InvalidImm1_32: | |||
| 6576 | case Match_InvalidImm1_64: | |||
| 6577 | case Match_InvalidMemoryIndexedRange2UImm0: | |||
| 6578 | case Match_InvalidMemoryIndexedRange2UImm1: | |||
| 6579 | case Match_InvalidMemoryIndexedRange2UImm2: | |||
| 6580 | case Match_InvalidMemoryIndexedRange2UImm3: | |||
| 6581 | case Match_InvalidMemoryIndexedRange4UImm0: | |||
| 6582 | case Match_InvalidMemoryIndexedRange4UImm1: | |||
| 6583 | case Match_InvalidMemoryIndexedRange4UImm2: | |||
| 6584 | case Match_InvalidSVEAddSubImm8: | |||
| 6585 | case Match_InvalidSVEAddSubImm16: | |||
| 6586 | case Match_InvalidSVEAddSubImm32: | |||
| 6587 | case Match_InvalidSVEAddSubImm64: | |||
| 6588 | case Match_InvalidSVECpyImm8: | |||
| 6589 | case Match_InvalidSVECpyImm16: | |||
| 6590 | case Match_InvalidSVECpyImm32: | |||
| 6591 | case Match_InvalidSVECpyImm64: | |||
| 6592 | case Match_InvalidIndexRange0_0: | |||
| 6593 | case Match_InvalidIndexRange1_1: | |||
| 6594 | case Match_InvalidIndexRange0_15: | |||
| 6595 | case Match_InvalidIndexRange0_7: | |||
| 6596 | case Match_InvalidIndexRange0_3: | |||
| 6597 | case Match_InvalidIndexRange0_1: | |||
| 6598 | case Match_InvalidSVEIndexRange0_63: | |||
| 6599 | case Match_InvalidSVEIndexRange0_31: | |||
| 6600 | case Match_InvalidSVEIndexRange0_15: | |||
| 6601 | case Match_InvalidSVEIndexRange0_7: | |||
| 6602 | case Match_InvalidSVEIndexRange0_3: | |||
| 6603 | case Match_InvalidLabel: | |||
| 6604 | case Match_InvalidComplexRotationEven: | |||
| 6605 | case Match_InvalidComplexRotationOdd: | |||
| 6606 | case Match_InvalidGPR64shifted8: | |||
| 6607 | case Match_InvalidGPR64shifted16: | |||
| 6608 | case Match_InvalidGPR64shifted32: | |||
| 6609 | case Match_InvalidGPR64shifted64: | |||
| 6610 | case Match_InvalidGPR64shifted128: | |||
| 6611 | case Match_InvalidGPR64NoXZRshifted8: | |||
| 6612 | case Match_InvalidGPR64NoXZRshifted16: | |||
| 6613 | case Match_InvalidGPR64NoXZRshifted32: | |||
| 6614 | case Match_InvalidGPR64NoXZRshifted64: | |||
| 6615 | case Match_InvalidGPR64NoXZRshifted128: | |||
| 6616 | case Match_InvalidZPR32UXTW8: | |||
| 6617 | case Match_InvalidZPR32UXTW16: | |||
| 6618 | case Match_InvalidZPR32UXTW32: | |||
| 6619 | case Match_InvalidZPR32UXTW64: | |||
| 6620 | case Match_InvalidZPR32SXTW8: | |||
| 6621 | case Match_InvalidZPR32SXTW16: | |||
| 6622 | case Match_InvalidZPR32SXTW32: | |||
| 6623 | case Match_InvalidZPR32SXTW64: | |||
| 6624 | case Match_InvalidZPR64UXTW8: | |||
| 6625 | case Match_InvalidZPR64SXTW8: | |||
| 6626 | case Match_InvalidZPR64UXTW16: | |||
| 6627 | case Match_InvalidZPR64SXTW16: | |||
| 6628 | case Match_InvalidZPR64UXTW32: | |||
| 6629 | case Match_InvalidZPR64SXTW32: | |||
| 6630 | case Match_InvalidZPR64UXTW64: | |||
| 6631 | case Match_InvalidZPR64SXTW64: | |||
| 6632 | case Match_InvalidZPR32LSL8: | |||
| 6633 | case Match_InvalidZPR32LSL16: | |||
| 6634 | case Match_InvalidZPR32LSL32: | |||
| 6635 | case Match_InvalidZPR32LSL64: | |||
| 6636 | case Match_InvalidZPR64LSL8: | |||
| 6637 | case Match_InvalidZPR64LSL16: | |||
| 6638 | case Match_InvalidZPR64LSL32: | |||
| 6639 | case Match_InvalidZPR64LSL64: | |||
| 6640 | case Match_InvalidZPR0: | |||
| 6641 | case Match_InvalidZPR8: | |||
| 6642 | case Match_InvalidZPR16: | |||
| 6643 | case Match_InvalidZPR32: | |||
| 6644 | case Match_InvalidZPR64: | |||
| 6645 | case Match_InvalidZPR128: | |||
| 6646 | case Match_InvalidZPR_3b8: | |||
| 6647 | case Match_InvalidZPR_3b16: | |||
| 6648 | case Match_InvalidZPR_3b32: | |||
| 6649 | case Match_InvalidZPR_4b8: | |||
| 6650 | case Match_InvalidZPR_4b16: | |||
| 6651 | case Match_InvalidZPR_4b32: | |||
| 6652 | case Match_InvalidZPR_4b64: | |||
| 6653 | case Match_InvalidSVEPredicateAnyReg: | |||
| 6654 | case Match_InvalidSVEPattern: | |||
| 6655 | case Match_InvalidSVEVecLenSpecifier: | |||
| 6656 | case Match_InvalidSVEPredicateBReg: | |||
| 6657 | case Match_InvalidSVEPredicateHReg: | |||
| 6658 | case Match_InvalidSVEPredicateSReg: | |||
| 6659 | case Match_InvalidSVEPredicateDReg: | |||
| 6660 | case Match_InvalidSVEPredicate3bAnyReg: | |||
| 6661 | case Match_InvalidSVEPNPredicateB_p8to15Reg: | |||
| 6662 | case Match_InvalidSVEPNPredicateH_p8to15Reg: | |||
| 6663 | case Match_InvalidSVEPNPredicateS_p8to15Reg: | |||
| 6664 | case Match_InvalidSVEPNPredicateD_p8to15Reg: | |||
| 6665 | case Match_InvalidSVEPNPredicateAny_p8to15Reg: | |||
| 6666 | case Match_InvalidSVEPNPredicateBReg: | |||
| 6667 | case Match_InvalidSVEPNPredicateHReg: | |||
| 6668 | case Match_InvalidSVEPNPredicateSReg: | |||
| 6669 | case Match_InvalidSVEPNPredicateDReg: | |||
| 6670 | case Match_InvalidSVEPredicateListMul2x8: | |||
| 6671 | case Match_InvalidSVEPredicateListMul2x16: | |||
| 6672 | case Match_InvalidSVEPredicateListMul2x32: | |||
| 6673 | case Match_InvalidSVEPredicateListMul2x64: | |||
| 6674 | case Match_InvalidSVEExactFPImmOperandHalfOne: | |||
| 6675 | case Match_InvalidSVEExactFPImmOperandHalfTwo: | |||
| 6676 | case Match_InvalidSVEExactFPImmOperandZeroOne: | |||
| 6677 | case Match_InvalidMatrixTile32: | |||
| 6678 | case Match_InvalidMatrixTile64: | |||
| 6679 | case Match_InvalidMatrix: | |||
| 6680 | case Match_InvalidMatrix8: | |||
| 6681 | case Match_InvalidMatrix16: | |||
| 6682 | case Match_InvalidMatrix32: | |||
| 6683 | case Match_InvalidMatrix64: | |||
| 6684 | case Match_InvalidMatrixTileVectorH8: | |||
| 6685 | case Match_InvalidMatrixTileVectorH16: | |||
| 6686 | case Match_InvalidMatrixTileVectorH32: | |||
| 6687 | case Match_InvalidMatrixTileVectorH64: | |||
| 6688 | case Match_InvalidMatrixTileVectorH128: | |||
| 6689 | case Match_InvalidMatrixTileVectorV8: | |||
| 6690 | case Match_InvalidMatrixTileVectorV16: | |||
| 6691 | case Match_InvalidMatrixTileVectorV32: | |||
| 6692 | case Match_InvalidMatrixTileVectorV64: | |||
| 6693 | case Match_InvalidMatrixTileVectorV128: | |||
| 6694 | case Match_InvalidSVCR: | |||
| 6695 | case Match_InvalidMatrixIndexGPR32_12_15: | |||
| 6696 | case Match_InvalidMatrixIndexGPR32_8_11: | |||
| 6697 | case Match_InvalidLookupTable: | |||
| 6698 | case Match_InvalidSVEVectorListMul2x8: | |||
| 6699 | case Match_InvalidSVEVectorListMul2x16: | |||
| 6700 | case Match_InvalidSVEVectorListMul2x32: | |||
| 6701 | case Match_InvalidSVEVectorListMul2x64: | |||
| 6702 | case Match_InvalidSVEVectorListMul4x8: | |||
| 6703 | case Match_InvalidSVEVectorListMul4x16: | |||
| 6704 | case Match_InvalidSVEVectorListMul4x32: | |||
| 6705 | case Match_InvalidSVEVectorListMul4x64: | |||
| 6706 | case Match_InvalidSVEVectorListStrided2x8: | |||
| 6707 | case Match_InvalidSVEVectorListStrided2x16: | |||
| 6708 | case Match_InvalidSVEVectorListStrided2x32: | |||
| 6709 | case Match_InvalidSVEVectorListStrided2x64: | |||
| 6710 | case Match_InvalidSVEVectorListStrided4x8: | |||
| 6711 | case Match_InvalidSVEVectorListStrided4x16: | |||
| 6712 | case Match_InvalidSVEVectorListStrided4x32: | |||
| 6713 | case Match_InvalidSVEVectorListStrided4x64: | |||
| 6714 | case Match_MSR: | |||
| 6715 | case Match_MRS: { | |||
| 6716 | if (ErrorInfo >= Operands.size()) | |||
| 6717 | return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc())); | |||
| 6718 | // Any time we get here, there's nothing fancy to do. Just get the | |||
| 6719 | // operand SMLoc and display the diagnostic. | |||
| 6720 | SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); | |||
| 6721 | if (ErrorLoc == SMLoc()) | |||
| 6722 | ErrorLoc = IDLoc; | |||
| 6723 | return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); | |||
| 6724 | } | |||
| 6725 | } | |||
| 6726 | ||||
| 6727 | llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 6727 ); | |||
| 6728 | } | |||
| 6729 | ||||
| 6730 | /// ParseDirective parses the arm specific directives | |||
| 6731 | bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) { | |||
| 6732 | const MCContext::Environment Format = getContext().getObjectFileType(); | |||
| 6733 | bool IsMachO = Format == MCContext::IsMachO; | |||
| ||||
| 6734 | bool IsCOFF = Format == MCContext::IsCOFF; | |||
| 6735 | ||||
| 6736 | auto IDVal = DirectiveID.getIdentifier().lower(); | |||
| 6737 | SMLoc Loc = DirectiveID.getLoc(); | |||
| 6738 | if (IDVal == ".arch") | |||
| 6739 | parseDirectiveArch(Loc); | |||
| 6740 | else if (IDVal == ".cpu") | |||
| 6741 | parseDirectiveCPU(Loc); | |||
| 6742 | else if (IDVal == ".tlsdesccall") | |||
| 6743 | parseDirectiveTLSDescCall(Loc); | |||
| 6744 | else if (IDVal == ".ltorg" || IDVal == ".pool") | |||
| 6745 | parseDirectiveLtorg(Loc); | |||
| 6746 | else if (IDVal == ".unreq") | |||
| 6747 | parseDirectiveUnreq(Loc); | |||
| 6748 | else if (IDVal == ".inst") | |||
| 6749 | parseDirectiveInst(Loc); | |||
| 6750 | else if (IDVal == ".cfi_negate_ra_state") | |||
| 6751 | parseDirectiveCFINegateRAState(); | |||
| 6752 | else if (IDVal == ".cfi_b_key_frame") | |||
| 6753 | parseDirectiveCFIBKeyFrame(); | |||
| 6754 | else if (IDVal == ".cfi_mte_tagged_frame") | |||
| 6755 | parseDirectiveCFIMTETaggedFrame(); | |||
| 6756 | else if (IDVal == ".arch_extension") | |||
| 6757 | parseDirectiveArchExtension(Loc); | |||
| 6758 | else if (IDVal == ".variant_pcs") | |||
| 6759 | parseDirectiveVariantPCS(Loc); | |||
| 6760 | else if (IsMachO
| |||
| 6761 | if (IDVal == MCLOHDirectiveName()) | |||
| 6762 | parseDirectiveLOH(IDVal, Loc); | |||
| 6763 | else | |||
| 6764 | return true; | |||
| 6765 | } else if (IsCOFF
| |||
| 6766 | if (IDVal == ".seh_stackalloc") | |||
| 6767 | parseDirectiveSEHAllocStack(Loc); | |||
| 6768 | else if (IDVal == ".seh_endprologue") | |||
| 6769 | parseDirectiveSEHPrologEnd(Loc); | |||
| 6770 | else if (IDVal == ".seh_save_r19r20_x") | |||
| 6771 | parseDirectiveSEHSaveR19R20X(Loc); | |||
| 6772 | else if (IDVal == ".seh_save_fplr") | |||
| 6773 | parseDirectiveSEHSaveFPLR(Loc); | |||
| 6774 | else if (IDVal == ".seh_save_fplr_x") | |||
| 6775 | parseDirectiveSEHSaveFPLRX(Loc); | |||
| 6776 | else if (IDVal == ".seh_save_reg") | |||
| 6777 | parseDirectiveSEHSaveReg(Loc); | |||
| 6778 | else if (IDVal == ".seh_save_reg_x") | |||
| 6779 | parseDirectiveSEHSaveRegX(Loc); | |||
| 6780 | else if (IDVal == ".seh_save_regp") | |||
| 6781 | parseDirectiveSEHSaveRegP(Loc); | |||
| 6782 | else if (IDVal == ".seh_save_regp_x") | |||
| 6783 | parseDirectiveSEHSaveRegPX(Loc); | |||
| 6784 | else if (IDVal == ".seh_save_lrpair") | |||
| 6785 | parseDirectiveSEHSaveLRPair(Loc); | |||
| 6786 | else if (IDVal == ".seh_save_freg") | |||
| 6787 | parseDirectiveSEHSaveFReg(Loc); | |||
| 6788 | else if (IDVal == ".seh_save_freg_x") | |||
| 6789 | parseDirectiveSEHSaveFRegX(Loc); | |||
| 6790 | else if (IDVal == ".seh_save_fregp") | |||
| 6791 | parseDirectiveSEHSaveFRegP(Loc); | |||
| 6792 | else if (IDVal == ".seh_save_fregp_x") | |||
| 6793 | parseDirectiveSEHSaveFRegPX(Loc); | |||
| 6794 | else if (IDVal == ".seh_set_fp") | |||
| 6795 | parseDirectiveSEHSetFP(Loc); | |||
| 6796 | else if (IDVal == ".seh_add_fp") | |||
| 6797 | parseDirectiveSEHAddFP(Loc); | |||
| 6798 | else if (IDVal == ".seh_nop") | |||
| 6799 | parseDirectiveSEHNop(Loc); | |||
| 6800 | else if (IDVal == ".seh_save_next") | |||
| 6801 | parseDirectiveSEHSaveNext(Loc); | |||
| 6802 | else if (IDVal == ".seh_startepilogue") | |||
| 6803 | parseDirectiveSEHEpilogStart(Loc); | |||
| 6804 | else if (IDVal == ".seh_endepilogue") | |||
| 6805 | parseDirectiveSEHEpilogEnd(Loc); | |||
| 6806 | else if (IDVal == ".seh_trap_frame") | |||
| 6807 | parseDirectiveSEHTrapFrame(Loc); | |||
| 6808 | else if (IDVal == ".seh_pushframe") | |||
| 6809 | parseDirectiveSEHMachineFrame(Loc); | |||
| 6810 | else if (IDVal == ".seh_context") | |||
| 6811 | parseDirectiveSEHContext(Loc); | |||
| 6812 | else if (IDVal == ".seh_clear_unwound_to_call") | |||
| 6813 | parseDirectiveSEHClearUnwoundToCall(Loc); | |||
| 6814 | else if (IDVal == ".seh_pac_sign_lr") | |||
| 6815 | parseDirectiveSEHPACSignLR(Loc); | |||
| 6816 | else if (IDVal == ".seh_save_any_reg") | |||
| 6817 | parseDirectiveSEHSaveAnyReg(Loc, false, false); | |||
| 6818 | else if (IDVal == ".seh_save_any_reg_p") | |||
| 6819 | parseDirectiveSEHSaveAnyReg(Loc, true, false); | |||
| 6820 | else if (IDVal == ".seh_save_any_reg_x") | |||
| 6821 | parseDirectiveSEHSaveAnyReg(Loc, false, true); | |||
| 6822 | else if (IDVal == ".seh_save_any_reg_px") | |||
| 6823 | parseDirectiveSEHSaveAnyReg(Loc, true, true); | |||
| 6824 | else | |||
| 6825 | return true; | |||
| 6826 | } else | |||
| 6827 | return true; | |||
| 6828 | return false; | |||
| 6829 | } | |||
| 6830 | ||||
| 6831 | static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, | |||
| 6832 | SmallVector<StringRef, 4> &RequestedExtensions) { | |||
| 6833 | const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto"); | |||
| 6834 | const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto"); | |||
| 6835 | ||||
| 6836 | if (!NoCrypto && Crypto) { | |||
| 6837 | // Map 'generic' (and others) to sha2 and aes, because | |||
| 6838 | // that was the traditional meaning of crypto. | |||
| 6839 | if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A || | |||
| 6840 | ArchInfo == AArch64::ARMV8_3A) { | |||
| 6841 | RequestedExtensions.push_back("sha2"); | |||
| 6842 | RequestedExtensions.push_back("aes"); | |||
| 6843 | } | |||
| 6844 | if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A || | |||
| 6845 | ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A || | |||
| 6846 | ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A || | |||
| 6847 | ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A || | |||
| 6848 | ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A || | |||
| 6849 | ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) { | |||
| 6850 | RequestedExtensions.push_back("sm4"); | |||
| 6851 | RequestedExtensions.push_back("sha3"); | |||
| 6852 | RequestedExtensions.push_back("sha2"); | |||
| 6853 | RequestedExtensions.push_back("aes"); | |||
| 6854 | } | |||
| 6855 | } else if (NoCrypto) { | |||
| 6856 | // Map 'generic' (and others) to sha2 and aes, because | |||
| 6857 | // that was the traditional meaning of crypto. | |||
| 6858 | if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A || | |||
| 6859 | ArchInfo == AArch64::ARMV8_3A) { | |||
| 6860 | RequestedExtensions.push_back("nosha2"); | |||
| 6861 | RequestedExtensions.push_back("noaes"); | |||
| 6862 | } | |||
| 6863 | if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A || | |||
| 6864 | ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A || | |||
| 6865 | ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A || | |||
| 6866 | ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A || | |||
| 6867 | ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A || | |||
| 6868 | ArchInfo == AArch64::ARMV9_4A) { | |||
| 6869 | RequestedExtensions.push_back("nosm4"); | |||
| 6870 | RequestedExtensions.push_back("nosha3"); | |||
| 6871 | RequestedExtensions.push_back("nosha2"); | |||
| 6872 | RequestedExtensions.push_back("noaes"); | |||
| 6873 | } | |||
| 6874 | } | |||
| 6875 | } | |||
| 6876 | ||||
| 6877 | /// parseDirectiveArch | |||
| 6878 | /// ::= .arch token | |||
| 6879 | bool AArch64AsmParser::parseDirectiveArch(SMLoc L) { | |||
| 6880 | SMLoc ArchLoc = getLoc(); | |||
| 6881 | ||||
| 6882 | StringRef Arch, ExtensionString; | |||
| 6883 | std::tie(Arch, ExtensionString) = | |||
| 6884 | getParser().parseStringToEndOfStatement().trim().split('+'); | |||
| 6885 | ||||
| 6886 | std::optional<AArch64::ArchInfo> ArchInfo = AArch64::parseArch(Arch); | |||
| 6887 | if (!ArchInfo) | |||
| 6888 | return Error(ArchLoc, "unknown arch name"); | |||
| 6889 | ||||
| 6890 | if (parseToken(AsmToken::EndOfStatement)) | |||
| 6891 | return true; | |||
| 6892 | ||||
| 6893 | // Get the architecture and extension features. | |||
| 6894 | std::vector<StringRef> AArch64Features; | |||
| 6895 | AArch64Features.push_back(ArchInfo->ArchFeature); | |||
| 6896 | AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features); | |||
| 6897 | ||||
| 6898 | MCSubtargetInfo &STI = copySTI(); | |||
| 6899 | std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end()); | |||
| 6900 | STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic", | |||
| 6901 | join(ArchFeatures.begin(), ArchFeatures.end(), ",")); | |||
| 6902 | ||||
| 6903 | SmallVector<StringRef, 4> RequestedExtensions; | |||
| 6904 | if (!ExtensionString.empty()) | |||
| 6905 | ExtensionString.split(RequestedExtensions, '+'); | |||
| 6906 | ||||
| 6907 | ExpandCryptoAEK(*ArchInfo, RequestedExtensions); | |||
| 6908 | ||||
| 6909 | FeatureBitset Features = STI.getFeatureBits(); | |||
| 6910 | for (auto Name : RequestedExtensions) { | |||
| 6911 | bool EnableFeature = true; | |||
| 6912 | ||||
| 6913 | if (Name.startswith_insensitive("no")) { | |||
| 6914 | EnableFeature = false; | |||
| 6915 | Name = Name.substr(2); | |||
| 6916 | } | |||
| 6917 | ||||
| 6918 | for (const auto &Extension : ExtensionMap) { | |||
| 6919 | if (Extension.Name != Name) | |||
| 6920 | continue; | |||
| 6921 | ||||
| 6922 | if (Extension.Features.none()) | |||
| 6923 | report_fatal_error("unsupported architectural extension: " + Name); | |||
| 6924 | ||||
| 6925 | FeatureBitset ToggleFeatures = | |||
| 6926 | EnableFeature | |||
| 6927 | ? STI.SetFeatureBitsTransitively(~Features & Extension.Features) | |||
| 6928 | : STI.ToggleFeature(Features & Extension.Features); | |||
| 6929 | setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures)); | |||
| 6930 | break; | |||
| 6931 | } | |||
| 6932 | } | |||
| 6933 | return false; | |||
| 6934 | } | |||
| 6935 | ||||
| 6936 | /// parseDirectiveArchExtension | |||
| 6937 | /// ::= .arch_extension [no]feature | |||
| 6938 | bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) { | |||
| 6939 | SMLoc ExtLoc = getLoc(); | |||
| 6940 | ||||
| 6941 | StringRef Name = getParser().parseStringToEndOfStatement().trim(); | |||
| 6942 | ||||
| 6943 | if (parseEOL()) | |||
| 6944 | return true; | |||
| 6945 | ||||
| 6946 | bool EnableFeature = true; | |||
| 6947 | if (Name.startswith_insensitive("no")) { | |||
| 6948 | EnableFeature = false; | |||
| 6949 | Name = Name.substr(2); | |||
| 6950 | } | |||
| 6951 | ||||
| 6952 | MCSubtargetInfo &STI = copySTI(); | |||
| 6953 | FeatureBitset Features = STI.getFeatureBits(); | |||
| 6954 | for (const auto &Extension : ExtensionMap) { | |||
| 6955 | if (Extension.Name != Name) | |||
| 6956 | continue; | |||
| 6957 | ||||
| 6958 | if (Extension.Features.none()) | |||
| 6959 | return Error(ExtLoc, "unsupported architectural extension: " + Name); | |||
| 6960 | ||||
| 6961 | FeatureBitset ToggleFeatures = | |||
| 6962 | EnableFeature | |||
| 6963 | ? STI.SetFeatureBitsTransitively(~Features & Extension.Features) | |||
| 6964 | : STI.ToggleFeature(Features & Extension.Features); | |||
| 6965 | setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures)); | |||
| 6966 | return false; | |||
| 6967 | } | |||
| 6968 | ||||
| 6969 | return Error(ExtLoc, "unknown architectural extension: " + Name); | |||
| 6970 | } | |||
| 6971 | ||||
| 6972 | static SMLoc incrementLoc(SMLoc L, int Offset) { | |||
| 6973 | return SMLoc::getFromPointer(L.getPointer() + Offset); | |||
| 6974 | } | |||
| 6975 | ||||
| 6976 | /// parseDirectiveCPU | |||
| 6977 | /// ::= .cpu id | |||
| 6978 | bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) { | |||
| 6979 | SMLoc CurLoc = getLoc(); | |||
| 6980 | ||||
| 6981 | StringRef CPU, ExtensionString; | |||
| 6982 | std::tie(CPU, ExtensionString) = | |||
| 6983 | getParser().parseStringToEndOfStatement().trim().split('+'); | |||
| 6984 | ||||
| 6985 | if (parseToken(AsmToken::EndOfStatement)) | |||
| 6986 | return true; | |||
| 6987 | ||||
| 6988 | SmallVector<StringRef, 4> RequestedExtensions; | |||
| 6989 | if (!ExtensionString.empty()) | |||
| 6990 | ExtensionString.split(RequestedExtensions, '+'); | |||
| 6991 | ||||
| 6992 | const std::optional<llvm::AArch64::ArchInfo> CpuArch = llvm::AArch64::getArchForCpu(CPU); | |||
| 6993 | if (!CpuArch) { | |||
| 6994 | Error(CurLoc, "unknown CPU name"); | |||
| 6995 | return false; | |||
| 6996 | } | |||
| 6997 | ExpandCryptoAEK(*CpuArch, RequestedExtensions); | |||
| 6998 | ||||
| 6999 | MCSubtargetInfo &STI = copySTI(); | |||
| 7000 | STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, ""); | |||
| 7001 | CurLoc = incrementLoc(CurLoc, CPU.size()); | |||
| 7002 | ||||
| 7003 | for (auto Name : RequestedExtensions) { | |||
| 7004 | // Advance source location past '+'. | |||
| 7005 | CurLoc = incrementLoc(CurLoc, 1); | |||
| 7006 | ||||
| 7007 | bool EnableFeature = true; | |||
| 7008 | ||||
| 7009 | if (Name.startswith_insensitive("no")) { | |||
| 7010 | EnableFeature = false; | |||
| 7011 | Name = Name.substr(2); | |||
| 7012 | } | |||
| 7013 | ||||
| 7014 | bool FoundExtension = false; | |||
| 7015 | for (const auto &Extension : ExtensionMap) { | |||
| 7016 | if (Extension.Name != Name) | |||
| 7017 | continue; | |||
| 7018 | ||||
| 7019 | if (Extension.Features.none()) | |||
| 7020 | report_fatal_error("unsupported architectural extension: " + Name); | |||
| 7021 | ||||
| 7022 | FeatureBitset Features = STI.getFeatureBits(); | |||
| 7023 | FeatureBitset ToggleFeatures = | |||
| 7024 | EnableFeature | |||
| 7025 | ? STI.SetFeatureBitsTransitively(~Features & Extension.Features) | |||
| 7026 | : STI.ToggleFeature(Features & Extension.Features); | |||
| 7027 | setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures)); | |||
| 7028 | FoundExtension = true; | |||
| 7029 | ||||
| 7030 | break; | |||
| 7031 | } | |||
| 7032 | ||||
| 7033 | if (!FoundExtension) | |||
| 7034 | Error(CurLoc, "unsupported architectural extension"); | |||
| 7035 | ||||
| 7036 | CurLoc = incrementLoc(CurLoc, Name.size()); | |||
| 7037 | } | |||
| 7038 | return false; | |||
| 7039 | } | |||
| 7040 | ||||
| 7041 | /// parseDirectiveInst | |||
| 7042 | /// ::= .inst opcode [, ...] | |||
| 7043 | bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) { | |||
| 7044 | if (getLexer().is(AsmToken::EndOfStatement)) | |||
| 7045 | return Error(Loc, "expected expression following '.inst' directive"); | |||
| 7046 | ||||
| 7047 | auto parseOp = [&]() -> bool { | |||
| 7048 | SMLoc L = getLoc(); | |||
| 7049 | const MCExpr *Expr = nullptr; | |||
| 7050 | if (check(getParser().parseExpression(Expr), L, "expected expression")) | |||
| 7051 | return true; | |||
| 7052 | const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); | |||
| 7053 | if (check(!Value, L, "expected constant expression")) | |||
| 7054 | return true; | |||
| 7055 | getTargetStreamer().emitInst(Value->getValue()); | |||
| 7056 | return false; | |||
| 7057 | }; | |||
| 7058 | ||||
| 7059 | return parseMany(parseOp); | |||
| 7060 | } | |||
| 7061 | ||||
| 7062 | // parseDirectiveTLSDescCall: | |||
| 7063 | // ::= .tlsdesccall symbol | |||
| 7064 | bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) { | |||
| 7065 | StringRef Name; | |||
| 7066 | if (check(getParser().parseIdentifier(Name), L, "expected symbol") || | |||
| 7067 | parseToken(AsmToken::EndOfStatement)) | |||
| 7068 | return true; | |||
| 7069 | ||||
| 7070 | MCSymbol *Sym = getContext().getOrCreateSymbol(Name); | |||
| 7071 | const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext()); | |||
| 7072 | Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext()); | |||
| 7073 | ||||
| 7074 | MCInst Inst; | |||
| 7075 | Inst.setOpcode(AArch64::TLSDESCCALL); | |||
| 7076 | Inst.addOperand(MCOperand::createExpr(Expr)); | |||
| 7077 | ||||
| 7078 | getParser().getStreamer().emitInstruction(Inst, getSTI()); | |||
| 7079 | return false; | |||
| 7080 | } | |||
| 7081 | ||||
| 7082 | /// ::= .loh <lohName | lohId> label1, ..., labelN | |||
| 7083 | /// The number of arguments depends on the loh identifier. | |||
| 7084 | bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) { | |||
| 7085 | MCLOHType Kind; | |||
| 7086 | if (getTok().isNot(AsmToken::Identifier)) { | |||
| 7087 | if (getTok().isNot(AsmToken::Integer)) | |||
| 7088 | return TokError("expected an identifier or a number in directive"); | |||
| 7089 | // We successfully get a numeric value for the identifier. | |||
| 7090 | // Check if it is valid. | |||
| 7091 | int64_t Id = getTok().getIntVal(); | |||
| 7092 | if (Id <= -1U && !isValidMCLOHType(Id)) | |||
| 7093 | return TokError("invalid numeric identifier in directive"); | |||
| 7094 | Kind = (MCLOHType)Id; | |||
| 7095 | } else { | |||
| 7096 | StringRef Name = getTok().getIdentifier(); | |||
| 7097 | // We successfully parse an identifier. | |||
| 7098 | // Check if it is a recognized one. | |||
| 7099 | int Id = MCLOHNameToId(Name); | |||
| 7100 | ||||
| 7101 | if (Id == -1) | |||
| 7102 | return TokError("invalid identifier in directive"); | |||
| 7103 | Kind = (MCLOHType)Id; | |||
| 7104 | } | |||
| 7105 | // Consume the identifier. | |||
| 7106 | Lex(); | |||
| 7107 | // Get the number of arguments of this LOH. | |||
| 7108 | int NbArgs = MCLOHIdToNbArgs(Kind); | |||
| 7109 | ||||
| 7110 | assert(NbArgs != -1 && "Invalid number of arguments")(static_cast <bool> (NbArgs != -1 && "Invalid number of arguments" ) ? void (0) : __assert_fail ("NbArgs != -1 && \"Invalid number of arguments\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 7110 , __extension__ __PRETTY_FUNCTION__)); | |||
| 7111 | ||||
| 7112 | SmallVector<MCSymbol *, 3> Args; | |||
| 7113 | for (int Idx = 0; Idx < NbArgs; ++Idx) { | |||
| 7114 | StringRef Name; | |||
| 7115 | if (getParser().parseIdentifier(Name)) | |||
| 7116 | return TokError("expected identifier in directive"); | |||
| 7117 | Args.push_back(getContext().getOrCreateSymbol(Name)); | |||
| 7118 | ||||
| 7119 | if (Idx + 1 == NbArgs) | |||
| 7120 | break; | |||
| 7121 | if (parseComma()) | |||
| 7122 | return true; | |||
| 7123 | } | |||
| 7124 | if (parseEOL()) | |||
| 7125 | return true; | |||
| 7126 | ||||
| 7127 | getStreamer().emitLOHDirective((MCLOHType)Kind, Args); | |||
| 7128 | return false; | |||
| 7129 | } | |||
| 7130 | ||||
| 7131 | /// parseDirectiveLtorg | |||
| 7132 | /// ::= .ltorg | .pool | |||
| 7133 | bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) { | |||
| 7134 | if (parseEOL()) | |||
| 7135 | return true; | |||
| 7136 | getTargetStreamer().emitCurrentConstantPool(); | |||
| 7137 | return false; | |||
| 7138 | } | |||
| 7139 | ||||
| 7140 | /// parseDirectiveReq | |||
| 7141 | /// ::= name .req registername | |||
| 7142 | bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { | |||
| 7143 | Lex(); // Eat the '.req' token. | |||
| 7144 | SMLoc SRegLoc = getLoc(); | |||
| 7145 | RegKind RegisterKind = RegKind::Scalar; | |||
| 7146 | MCRegister RegNum; | |||
| 7147 | OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum); | |||
| 7148 | ||||
| 7149 | if (ParseRes != MatchOperand_Success) { | |||
| 7150 | StringRef Kind; | |||
| 7151 | RegisterKind = RegKind::NeonVector; | |||
| 7152 | ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector); | |||
| 7153 | ||||
| 7154 | if (ParseRes == MatchOperand_ParseFail) | |||
| 7155 | return true; | |||
| 7156 | ||||
| 7157 | if (ParseRes == MatchOperand_Success && !Kind.empty()) | |||
| 7158 | return Error(SRegLoc, "vector register without type specifier expected"); | |||
| 7159 | } | |||
| 7160 | ||||
| 7161 | if (ParseRes != MatchOperand_Success) { | |||
| 7162 | StringRef Kind; | |||
| 7163 | RegisterKind = RegKind::SVEDataVector; | |||
| 7164 | ParseRes = | |||
| 7165 | tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); | |||
| 7166 | ||||
| 7167 | if (ParseRes == MatchOperand_ParseFail) | |||
| 7168 | return true; | |||
| 7169 | ||||
| 7170 | if (ParseRes == MatchOperand_Success && !Kind.empty()) | |||
| 7171 | return Error(SRegLoc, | |||
| 7172 | "sve vector register without type specifier expected"); | |||
| 7173 | } | |||
| 7174 | ||||
| 7175 | if (ParseRes != MatchOperand_Success) { | |||
| 7176 | StringRef Kind; | |||
| 7177 | RegisterKind = RegKind::SVEPredicateVector; | |||
| 7178 | ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); | |||
| 7179 | ||||
| 7180 | if (ParseRes == MatchOperand_ParseFail) | |||
| 7181 | return true; | |||
| 7182 | ||||
| 7183 | if (ParseRes == MatchOperand_Success && !Kind.empty()) | |||
| 7184 | return Error(SRegLoc, | |||
| 7185 | "sve predicate register without type specifier expected"); | |||
| 7186 | } | |||
| 7187 | ||||
| 7188 | if (ParseRes != MatchOperand_Success) | |||
| 7189 | return Error(SRegLoc, "register name or alias expected"); | |||
| 7190 | ||||
| 7191 | // Shouldn't be anything else. | |||
| 7192 | if (parseEOL()) | |||
| 7193 | return true; | |||
| 7194 | ||||
| 7195 | auto pair = std::make_pair(RegisterKind, (unsigned) RegNum); | |||
| 7196 | if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair) | |||
| 7197 | Warning(L, "ignoring redefinition of register alias '" + Name + "'"); | |||
| 7198 | ||||
| 7199 | return false; | |||
| 7200 | } | |||
| 7201 | ||||
| 7202 | /// parseDirectiveUneq | |||
| 7203 | /// ::= .unreq registername | |||
| 7204 | bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) { | |||
| 7205 | if (getTok().isNot(AsmToken::Identifier)) | |||
| 7206 | return TokError("unexpected input in .unreq directive."); | |||
| 7207 | RegisterReqs.erase(getTok().getIdentifier().lower()); | |||
| 7208 | Lex(); // Eat the identifier. | |||
| 7209 | return parseToken(AsmToken::EndOfStatement); | |||
| 7210 | } | |||
| 7211 | ||||
| 7212 | bool AArch64AsmParser::parseDirectiveCFINegateRAState() { | |||
| 7213 | if (parseEOL()) | |||
| 7214 | return true; | |||
| 7215 | getStreamer().emitCFINegateRAState(); | |||
| 7216 | return false; | |||
| 7217 | } | |||
| 7218 | ||||
| 7219 | /// parseDirectiveCFIBKeyFrame | |||
| 7220 | /// ::= .cfi_b_key | |||
| 7221 | bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() { | |||
| 7222 | if (parseEOL()) | |||
| 7223 | return true; | |||
| 7224 | getStreamer().emitCFIBKeyFrame(); | |||
| 7225 | return false; | |||
| 7226 | } | |||
| 7227 | ||||
| 7228 | /// parseDirectiveCFIMTETaggedFrame | |||
| 7229 | /// ::= .cfi_mte_tagged_frame | |||
| 7230 | bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() { | |||
| 7231 | if (parseEOL()) | |||
| 7232 | return true; | |||
| 7233 | getStreamer().emitCFIMTETaggedFrame(); | |||
| 7234 | return false; | |||
| 7235 | } | |||
| 7236 | ||||
| 7237 | /// parseDirectiveVariantPCS | |||
| 7238 | /// ::= .variant_pcs symbolname | |||
| 7239 | bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) { | |||
| 7240 | StringRef Name; | |||
| 7241 | if (getParser().parseIdentifier(Name)) | |||
| 7242 | return TokError("expected symbol name"); | |||
| 7243 | if (parseEOL()) | |||
| 7244 | return true; | |||
| 7245 | getTargetStreamer().emitDirectiveVariantPCS( | |||
| 7246 | getContext().getOrCreateSymbol(Name)); | |||
| 7247 | return false; | |||
| 7248 | } | |||
| 7249 | ||||
| 7250 | /// parseDirectiveSEHAllocStack | |||
| 7251 | /// ::= .seh_stackalloc | |||
| 7252 | bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) { | |||
| 7253 | int64_t Size; | |||
| 7254 | if (parseImmExpr(Size)) | |||
| 7255 | return true; | |||
| 7256 | getTargetStreamer().emitARM64WinCFIAllocStack(Size); | |||
| 7257 | return false; | |||
| 7258 | } | |||
| 7259 | ||||
| 7260 | /// parseDirectiveSEHPrologEnd | |||
| 7261 | /// ::= .seh_endprologue | |||
| 7262 | bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) { | |||
| 7263 | getTargetStreamer().emitARM64WinCFIPrologEnd(); | |||
| 7264 | return false; | |||
| 7265 | } | |||
| 7266 | ||||
| 7267 | /// parseDirectiveSEHSaveR19R20X | |||
| 7268 | /// ::= .seh_save_r19r20_x | |||
| 7269 | bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) { | |||
| 7270 | int64_t Offset; | |||
| 7271 | if (parseImmExpr(Offset)) | |||
| 7272 | return true; | |||
| 7273 | getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset); | |||
| 7274 | return false; | |||
| 7275 | } | |||
| 7276 | ||||
| 7277 | /// parseDirectiveSEHSaveFPLR | |||
| 7278 | /// ::= .seh_save_fplr | |||
| 7279 | bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) { | |||
| 7280 | int64_t Offset; | |||
| 7281 | if (parseImmExpr(Offset)) | |||
| 7282 | return true; | |||
| 7283 | getTargetStreamer().emitARM64WinCFISaveFPLR(Offset); | |||
| 7284 | return false; | |||
| 7285 | } | |||
| 7286 | ||||
| 7287 | /// parseDirectiveSEHSaveFPLRX | |||
| 7288 | /// ::= .seh_save_fplr_x | |||
| 7289 | bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) { | |||
| 7290 | int64_t Offset; | |||
| 7291 | if (parseImmExpr(Offset)) | |||
| 7292 | return true; | |||
| 7293 | getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset); | |||
| 7294 | return false; | |||
| 7295 | } | |||
| 7296 | ||||
| 7297 | /// parseDirectiveSEHSaveReg | |||
| 7298 | /// ::= .seh_save_reg | |||
| 7299 | bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) { | |||
| 7300 | unsigned Reg; | |||
| 7301 | int64_t Offset; | |||
| 7302 | if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) || | |||
| 7303 | parseComma() || parseImmExpr(Offset)) | |||
| 7304 | return true; | |||
| 7305 | getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset); | |||
| 7306 | return false; | |||
| 7307 | } | |||
| 7308 | ||||
| 7309 | /// parseDirectiveSEHSaveRegX | |||
| 7310 | /// ::= .seh_save_reg_x | |||
| 7311 | bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) { | |||
| 7312 | unsigned Reg; | |||
| 7313 | int64_t Offset; | |||
| 7314 | if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) || | |||
| 7315 | parseComma() || parseImmExpr(Offset)) | |||
| 7316 | return true; | |||
| 7317 | getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset); | |||
| 7318 | return false; | |||
| 7319 | } | |||
| 7320 | ||||
| 7321 | /// parseDirectiveSEHSaveRegP | |||
| 7322 | /// ::= .seh_save_regp | |||
| 7323 | bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) { | |||
| 7324 | unsigned Reg; | |||
| 7325 | int64_t Offset; | |||
| 7326 | if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) || | |||
| 7327 | parseComma() || parseImmExpr(Offset)) | |||
| 7328 | return true; | |||
| 7329 | getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset); | |||
| 7330 | return false; | |||
| 7331 | } | |||
| 7332 | ||||
| 7333 | /// parseDirectiveSEHSaveRegPX | |||
| 7334 | /// ::= .seh_save_regp_x | |||
| 7335 | bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) { | |||
| 7336 | unsigned Reg; | |||
| 7337 | int64_t Offset; | |||
| 7338 | if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) || | |||
| 7339 | parseComma() || parseImmExpr(Offset)) | |||
| 7340 | return true; | |||
| 7341 | getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset); | |||
| 7342 | return false; | |||
| 7343 | } | |||
| 7344 | ||||
| 7345 | /// parseDirectiveSEHSaveLRPair | |||
| 7346 | /// ::= .seh_save_lrpair | |||
| 7347 | bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) { | |||
| 7348 | unsigned Reg; | |||
| 7349 | int64_t Offset; | |||
| 7350 | L = getLoc(); | |||
| 7351 | if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) || | |||
| 7352 | parseComma() || parseImmExpr(Offset)) | |||
| 7353 | return true; | |||
| 7354 | if (check(((Reg - 19) % 2 != 0), L, | |||
| 7355 | "expected register with even offset from x19")) | |||
| 7356 | return true; | |||
| 7357 | getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset); | |||
| 7358 | return false; | |||
| 7359 | } | |||
| 7360 | ||||
| 7361 | /// parseDirectiveSEHSaveFReg | |||
| 7362 | /// ::= .seh_save_freg | |||
| 7363 | bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) { | |||
| 7364 | unsigned Reg; | |||
| 7365 | int64_t Offset; | |||
| 7366 | if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) || | |||
| 7367 | parseComma() || parseImmExpr(Offset)) | |||
| 7368 | return true; | |||
| 7369 | getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset); | |||
| 7370 | return false; | |||
| 7371 | } | |||
| 7372 | ||||
| 7373 | /// parseDirectiveSEHSaveFRegX | |||
| 7374 | /// ::= .seh_save_freg_x | |||
| 7375 | bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) { | |||
| 7376 | unsigned Reg; | |||
| 7377 | int64_t Offset; | |||
| 7378 | if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) || | |||
| 7379 | parseComma() || parseImmExpr(Offset)) | |||
| 7380 | return true; | |||
| 7381 | getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset); | |||
| 7382 | return false; | |||
| 7383 | } | |||
| 7384 | ||||
| 7385 | /// parseDirectiveSEHSaveFRegP | |||
| 7386 | /// ::= .seh_save_fregp | |||
| 7387 | bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) { | |||
| 7388 | unsigned Reg; | |||
| 7389 | int64_t Offset; | |||
| 7390 | if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) || | |||
| 7391 | parseComma() || parseImmExpr(Offset)) | |||
| 7392 | return true; | |||
| 7393 | getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset); | |||
| 7394 | return false; | |||
| 7395 | } | |||
| 7396 | ||||
| 7397 | /// parseDirectiveSEHSaveFRegPX | |||
| 7398 | /// ::= .seh_save_fregp_x | |||
| 7399 | bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) { | |||
| 7400 | unsigned Reg; | |||
| 7401 | int64_t Offset; | |||
| 7402 | if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) || | |||
| 7403 | parseComma() || parseImmExpr(Offset)) | |||
| 7404 | return true; | |||
| 7405 | getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset); | |||
| 7406 | return false; | |||
| 7407 | } | |||
| 7408 | ||||
| 7409 | /// parseDirectiveSEHSetFP | |||
| 7410 | /// ::= .seh_set_fp | |||
| 7411 | bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) { | |||
| 7412 | getTargetStreamer().emitARM64WinCFISetFP(); | |||
| 7413 | return false; | |||
| 7414 | } | |||
| 7415 | ||||
| 7416 | /// parseDirectiveSEHAddFP | |||
| 7417 | /// ::= .seh_add_fp | |||
| 7418 | bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) { | |||
| 7419 | int64_t Size; | |||
| 7420 | if (parseImmExpr(Size)) | |||
| 7421 | return true; | |||
| 7422 | getTargetStreamer().emitARM64WinCFIAddFP(Size); | |||
| 7423 | return false; | |||
| 7424 | } | |||
| 7425 | ||||
| 7426 | /// parseDirectiveSEHNop | |||
| 7427 | /// ::= .seh_nop | |||
| 7428 | bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) { | |||
| 7429 | getTargetStreamer().emitARM64WinCFINop(); | |||
| 7430 | return false; | |||
| 7431 | } | |||
| 7432 | ||||
| 7433 | /// parseDirectiveSEHSaveNext | |||
| 7434 | /// ::= .seh_save_next | |||
| 7435 | bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) { | |||
| 7436 | getTargetStreamer().emitARM64WinCFISaveNext(); | |||
| 7437 | return false; | |||
| 7438 | } | |||
| 7439 | ||||
| 7440 | /// parseDirectiveSEHEpilogStart | |||
| 7441 | /// ::= .seh_startepilogue | |||
| 7442 | bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) { | |||
| 7443 | getTargetStreamer().emitARM64WinCFIEpilogStart(); | |||
| 7444 | return false; | |||
| 7445 | } | |||
| 7446 | ||||
| 7447 | /// parseDirectiveSEHEpilogEnd | |||
| 7448 | /// ::= .seh_endepilogue | |||
| 7449 | bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) { | |||
| 7450 | getTargetStreamer().emitARM64WinCFIEpilogEnd(); | |||
| 7451 | return false; | |||
| 7452 | } | |||
| 7453 | ||||
| 7454 | /// parseDirectiveSEHTrapFrame | |||
| 7455 | /// ::= .seh_trap_frame | |||
| 7456 | bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) { | |||
| 7457 | getTargetStreamer().emitARM64WinCFITrapFrame(); | |||
| 7458 | return false; | |||
| 7459 | } | |||
| 7460 | ||||
| 7461 | /// parseDirectiveSEHMachineFrame | |||
| 7462 | /// ::= .seh_pushframe | |||
| 7463 | bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) { | |||
| 7464 | getTargetStreamer().emitARM64WinCFIMachineFrame(); | |||
| 7465 | return false; | |||
| 7466 | } | |||
| 7467 | ||||
| 7468 | /// parseDirectiveSEHContext | |||
| 7469 | /// ::= .seh_context | |||
| 7470 | bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) { | |||
| 7471 | getTargetStreamer().emitARM64WinCFIContext(); | |||
| 7472 | return false; | |||
| 7473 | } | |||
| 7474 | ||||
| 7475 | /// parseDirectiveSEHClearUnwoundToCall | |||
| 7476 | /// ::= .seh_clear_unwound_to_call | |||
| 7477 | bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) { | |||
| 7478 | getTargetStreamer().emitARM64WinCFIClearUnwoundToCall(); | |||
| 7479 | return false; | |||
| 7480 | } | |||
| 7481 | ||||
| 7482 | /// parseDirectiveSEHPACSignLR | |||
| 7483 | /// ::= .seh_pac_sign_lr | |||
| 7484 | bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) { | |||
| 7485 | getTargetStreamer().emitARM64WinCFIPACSignLR(); | |||
| 7486 | return false; | |||
| 7487 | } | |||
| 7488 | ||||
| 7489 | /// parseDirectiveSEHSaveAnyReg | |||
| 7490 | /// ::= .seh_save_any_reg | |||
| 7491 | /// ::= .seh_save_any_reg_p | |||
| 7492 | /// ::= .seh_save_any_reg_x | |||
| 7493 | /// ::= .seh_save_any_reg_px | |||
| 7494 | bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, | |||
| 7495 | bool Writeback) { | |||
| 7496 | MCRegister Reg; | |||
| 7497 | SMLoc Start, End; | |||
| 7498 | int64_t Offset; | |||
| 7499 | if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") || | |||
| 7500 | parseComma() || parseImmExpr(Offset)) | |||
| 7501 | return true; | |||
| 7502 | ||||
| 7503 | if (Reg == AArch64::FP || Reg == AArch64::LR || | |||
| 7504 | (Reg >= AArch64::X0 && Reg <= AArch64::X28)) { | |||
| 7505 | if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8)) | |||
| 7506 | return Error(L, "invalid save_any_reg offset"); | |||
| 7507 | unsigned EncodedReg; | |||
| 7508 | if (Reg == AArch64::FP) | |||
| 7509 | EncodedReg = 29; | |||
| 7510 | else if (Reg == AArch64::LR) | |||
| 7511 | EncodedReg = 30; | |||
| 7512 | else | |||
| 7513 | EncodedReg = Reg - AArch64::X0; | |||
| 7514 | if (Paired) { | |||
| 7515 | if (Reg == AArch64::LR) | |||
| 7516 | return Error(Start, "lr cannot be paired with another register"); | |||
| 7517 | if (Writeback) | |||
| 7518 | getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset); | |||
| 7519 | else | |||
| 7520 | getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset); | |||
| 7521 | } else { | |||
| 7522 | if (Writeback) | |||
| 7523 | getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset); | |||
| 7524 | else | |||
| 7525 | getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset); | |||
| 7526 | } | |||
| 7527 | } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) { | |||
| 7528 | unsigned EncodedReg = Reg - AArch64::D0; | |||
| 7529 | if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8)) | |||
| 7530 | return Error(L, "invalid save_any_reg offset"); | |||
| 7531 | if (Paired) { | |||
| 7532 | if (Reg == AArch64::D31) | |||
| 7533 | return Error(Start, "d31 cannot be paired with another register"); | |||
| 7534 | if (Writeback) | |||
| 7535 | getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset); | |||
| 7536 | else | |||
| 7537 | getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset); | |||
| 7538 | } else { | |||
| 7539 | if (Writeback) | |||
| 7540 | getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset); | |||
| 7541 | else | |||
| 7542 | getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset); | |||
| 7543 | } | |||
| 7544 | } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) { | |||
| 7545 | unsigned EncodedReg = Reg - AArch64::Q0; | |||
| 7546 | if (Offset < 0 || Offset % 16) | |||
| 7547 | return Error(L, "invalid save_any_reg offset"); | |||
| 7548 | if (Paired) { | |||
| 7549 | if (Reg == AArch64::Q31) | |||
| 7550 | return Error(Start, "q31 cannot be paired with another register"); | |||
| 7551 | if (Writeback) | |||
| 7552 | getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset); | |||
| 7553 | else | |||
| 7554 | getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset); | |||
| 7555 | } else { | |||
| 7556 | if (Writeback) | |||
| 7557 | getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset); | |||
| 7558 | else | |||
| 7559 | getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset); | |||
| 7560 | } | |||
| 7561 | } else { | |||
| 7562 | return Error(Start, "save_any_reg register must be x, q or d register"); | |||
| 7563 | } | |||
| 7564 | return false; | |||
| 7565 | } | |||
| 7566 | ||||
| 7567 | bool | |||
| 7568 | AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, | |||
| 7569 | AArch64MCExpr::VariantKind &ELFRefKind, | |||
| 7570 | MCSymbolRefExpr::VariantKind &DarwinRefKind, | |||
| 7571 | int64_t &Addend) { | |||
| 7572 | ELFRefKind = AArch64MCExpr::VK_INVALID; | |||
| 7573 | DarwinRefKind = MCSymbolRefExpr::VK_None; | |||
| 7574 | Addend = 0; | |||
| 7575 | ||||
| 7576 | if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) { | |||
| 7577 | ELFRefKind = AE->getKind(); | |||
| 7578 | Expr = AE->getSubExpr(); | |||
| 7579 | } | |||
| 7580 | ||||
| 7581 | const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr); | |||
| 7582 | if (SE) { | |||
| 7583 | // It's a simple symbol reference with no addend. | |||
| 7584 | DarwinRefKind = SE->getKind(); | |||
| 7585 | return true; | |||
| 7586 | } | |||
| 7587 | ||||
| 7588 | // Check that it looks like a symbol + an addend | |||
| 7589 | MCValue Res; | |||
| 7590 | bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr); | |||
| 7591 | if (!Relocatable || Res.getSymB()) | |||
| 7592 | return false; | |||
| 7593 | ||||
| 7594 | // Treat expressions with an ELFRefKind (like ":abs_g1:3", or | |||
| 7595 | // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol. | |||
| 7596 | if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID) | |||
| 7597 | return false; | |||
| 7598 | ||||
| 7599 | if (Res.getSymA()) | |||
| 7600 | DarwinRefKind = Res.getSymA()->getKind(); | |||
| 7601 | Addend = Res.getConstant(); | |||
| 7602 | ||||
| 7603 | // It's some symbol reference + a constant addend, but really | |||
| 7604 | // shouldn't use both Darwin and ELF syntax. | |||
| 7605 | return ELFRefKind == AArch64MCExpr::VK_INVALID || | |||
| 7606 | DarwinRefKind == MCSymbolRefExpr::VK_None; | |||
| 7607 | } | |||
| 7608 | ||||
| 7609 | /// Force static initialization. | |||
| 7610 | extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__((visibility("default"))) void LLVMInitializeAArch64AsmParser() { | |||
| 7611 | RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget()); | |||
| 7612 | RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget()); | |||
| 7613 | RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target()); | |||
| 7614 | RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target()); | |||
| 7615 | RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target()); | |||
| 7616 | } | |||
| 7617 | ||||
| 7618 | #define GET_REGISTER_MATCHER | |||
| 7619 | #define GET_SUBTARGET_FEATURE_NAME | |||
| 7620 | #define GET_MATCHER_IMPLEMENTATION | |||
| 7621 | #define GET_MNEMONIC_SPELL_CHECKER | |||
| 7622 | #include "AArch64GenAsmMatcher.inc" | |||
| 7623 | ||||
| 7624 | // Define this matcher function after the auto-generated include so we | |||
| 7625 | // have the match class enum definitions. | |||
| 7626 | unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, | |||
| 7627 | unsigned Kind) { | |||
| 7628 | AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp); | |||
| 7629 | ||||
| 7630 | auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy { | |||
| 7631 | if (!Op.isImm()) | |||
| 7632 | return Match_InvalidOperand; | |||
| 7633 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); | |||
| 7634 | if (!CE) | |||
| 7635 | return Match_InvalidOperand; | |||
| 7636 | if (CE->getValue() == ExpectedVal) | |||
| 7637 | return Match_Success; | |||
| 7638 | return Match_InvalidOperand; | |||
| 7639 | }; | |||
| 7640 | ||||
| 7641 | switch (Kind) { | |||
| 7642 | default: | |||
| 7643 | return Match_InvalidOperand; | |||
| 7644 | case MCK_MPR: | |||
| 7645 | // If the Kind is a token for the MPR register class which has the "za" | |||
| 7646 | // register (SME accumulator array), check if the asm is a literal "za" | |||
| 7647 | // token. This is for the "smstart za" alias that defines the register | |||
| 7648 | // as a literal token. | |||
| 7649 | if (Op.isTokenEqual("za")) | |||
| 7650 | return Match_Success; | |||
| 7651 | return Match_InvalidOperand; | |||
| 7652 | ||||
| 7653 | // If the kind is a token for a literal immediate, check if our asm operand | |||
| 7654 | // matches. This is for InstAliases which have a fixed-value immediate in | |||
| 7655 | // the asm string, such as hints which are parsed into a specific | |||
| 7656 | // instruction definition. | |||
| 7657 | #define MATCH_HASH(N) \ | |||
| 7658 | case MCK__HASH_##N: \ | |||
| 7659 | return MatchesOpImmediate(N); | |||
| 7660 | MATCH_HASH(0) | |||
| 7661 | MATCH_HASH(1) | |||
| 7662 | MATCH_HASH(2) | |||
| 7663 | MATCH_HASH(3) | |||
| 7664 | MATCH_HASH(4) | |||
| 7665 | MATCH_HASH(6) | |||
| 7666 | MATCH_HASH(7) | |||
| 7667 | MATCH_HASH(8) | |||
| 7668 | MATCH_HASH(10) | |||
| 7669 | MATCH_HASH(12) | |||
| 7670 | MATCH_HASH(14) | |||
| 7671 | MATCH_HASH(16) | |||
| 7672 | MATCH_HASH(24) | |||
| 7673 | MATCH_HASH(25) | |||
| 7674 | MATCH_HASH(26) | |||
| 7675 | MATCH_HASH(27) | |||
| 7676 | MATCH_HASH(28) | |||
| 7677 | MATCH_HASH(29) | |||
| 7678 | MATCH_HASH(30) | |||
| 7679 | MATCH_HASH(31) | |||
| 7680 | MATCH_HASH(32) | |||
| 7681 | MATCH_HASH(40) | |||
| 7682 | MATCH_HASH(48) | |||
| 7683 | MATCH_HASH(64) | |||
| 7684 | #undef MATCH_HASH | |||
| 7685 | #define MATCH_HASH_MINUS(N) \ | |||
| 7686 | case MCK__HASH__MINUS_##N: \ | |||
| 7687 | return MatchesOpImmediate(-N); | |||
| 7688 | MATCH_HASH_MINUS(4) | |||
| 7689 | MATCH_HASH_MINUS(8) | |||
| 7690 | MATCH_HASH_MINUS(16) | |||
| 7691 | #undef MATCH_HASH_MINUS | |||
| 7692 | } | |||
| 7693 | } | |||
| 7694 | ||||
| 7695 | OperandMatchResultTy | |||
| 7696 | AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { | |||
| 7697 | ||||
| 7698 | SMLoc S = getLoc(); | |||
| 7699 | ||||
| 7700 | if (getTok().isNot(AsmToken::Identifier)) { | |||
| 7701 | Error(S, "expected register"); | |||
| 7702 | return MatchOperand_ParseFail; | |||
| 7703 | } | |||
| 7704 | ||||
| 7705 | MCRegister FirstReg; | |||
| 7706 | OperandMatchResultTy Res = tryParseScalarRegister(FirstReg); | |||
| 7707 | if (Res != MatchOperand_Success) { | |||
| 7708 | Error(S, "expected first even register of a " | |||
| 7709 | "consecutive same-size even/odd register pair"); | |||
| 7710 | return MatchOperand_ParseFail; | |||
| 7711 | } | |||
| 7712 | ||||
| 7713 | const MCRegisterClass &WRegClass = | |||
| 7714 | AArch64MCRegisterClasses[AArch64::GPR32RegClassID]; | |||
| 7715 | const MCRegisterClass &XRegClass = | |||
| 7716 | AArch64MCRegisterClasses[AArch64::GPR64RegClassID]; | |||
| 7717 | ||||
| 7718 | bool isXReg = XRegClass.contains(FirstReg), | |||
| 7719 | isWReg = WRegClass.contains(FirstReg); | |||
| 7720 | if (!isXReg && !isWReg) { | |||
| 7721 | Error(S, "expected first even register of a " | |||
| 7722 | "consecutive same-size even/odd register pair"); | |||
| 7723 | return MatchOperand_ParseFail; | |||
| 7724 | } | |||
| 7725 | ||||
| 7726 | const MCRegisterInfo *RI = getContext().getRegisterInfo(); | |||
| 7727 | unsigned FirstEncoding = RI->getEncodingValue(FirstReg); | |||
| 7728 | ||||
| 7729 | if (FirstEncoding & 0x1) { | |||
| 7730 | Error(S, "expected first even register of a " | |||
| 7731 | "consecutive same-size even/odd register pair"); | |||
| 7732 | return MatchOperand_ParseFail; | |||
| 7733 | } | |||
| 7734 | ||||
| 7735 | if (getTok().isNot(AsmToken::Comma)) { | |||
| 7736 | Error(getLoc(), "expected comma"); | |||
| 7737 | return MatchOperand_ParseFail; | |||
| 7738 | } | |||
| 7739 | // Eat the comma | |||
| 7740 | Lex(); | |||
| 7741 | ||||
| 7742 | SMLoc E = getLoc(); | |||
| 7743 | MCRegister SecondReg; | |||
| 7744 | Res = tryParseScalarRegister(SecondReg); | |||
| 7745 | if (Res != MatchOperand_Success) { | |||
| 7746 | Error(E, "expected second odd register of a " | |||
| 7747 | "consecutive same-size even/odd register pair"); | |||
| 7748 | return MatchOperand_ParseFail; | |||
| 7749 | } | |||
| 7750 | ||||
| 7751 | if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 || | |||
| 7752 | (isXReg && !XRegClass.contains(SecondReg)) || | |||
| 7753 | (isWReg && !WRegClass.contains(SecondReg))) { | |||
| 7754 | Error(E, "expected second odd register of a " | |||
| 7755 | "consecutive same-size even/odd register pair"); | |||
| 7756 | return MatchOperand_ParseFail; | |||
| 7757 | } | |||
| 7758 | ||||
| 7759 | unsigned Pair = 0; | |||
| 7760 | if (isXReg) { | |||
| 7761 | Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64, | |||
| 7762 | &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]); | |||
| 7763 | } else { | |||
| 7764 | Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32, | |||
| 7765 | &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]); | |||
| 7766 | } | |||
| 7767 | ||||
| 7768 | Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S, | |||
| 7769 | getLoc(), getContext())); | |||
| 7770 | ||||
| 7771 | return MatchOperand_Success; | |||
| 7772 | } | |||
| 7773 | ||||
| 7774 | template <bool ParseShiftExtend, bool ParseSuffix> | |||
| 7775 | OperandMatchResultTy | |||
| 7776 | AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { | |||
| 7777 | const SMLoc S = getLoc(); | |||
| 7778 | // Check for a SVE vector register specifier first. | |||
| 7779 | MCRegister RegNum; | |||
| 7780 | StringRef Kind; | |||
| 7781 | ||||
| 7782 | OperandMatchResultTy Res = | |||
| 7783 | tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); | |||
| 7784 | ||||
| 7785 | if (Res != MatchOperand_Success) | |||
| 7786 | return Res; | |||
| 7787 | ||||
| 7788 | if (ParseSuffix && Kind.empty()) | |||
| 7789 | return MatchOperand_NoMatch; | |||
| 7790 | ||||
| 7791 | const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector); | |||
| 7792 | if (!KindRes) | |||
| 7793 | return MatchOperand_NoMatch; | |||
| 7794 | ||||
| 7795 | unsigned ElementWidth = KindRes->second; | |||
| 7796 | ||||
| 7797 | // No shift/extend is the default. | |||
| 7798 | if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) { | |||
| 7799 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
| 7800 | RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext())); | |||
| 7801 | ||||
| 7802 | OperandMatchResultTy Res = tryParseVectorIndex(Operands); | |||
| 7803 | if (Res == MatchOperand_ParseFail) | |||
| 7804 | return MatchOperand_ParseFail; | |||
| 7805 | return MatchOperand_Success; | |||
| 7806 | } | |||
| 7807 | ||||
| 7808 | // Eat the comma | |||
| 7809 | Lex(); | |||
| 7810 | ||||
| 7811 | // Match the shift | |||
| 7812 | SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; | |||
| 7813 | Res = tryParseOptionalShiftExtend(ExtOpnd); | |||
| 7814 | if (Res != MatchOperand_Success) | |||
| 7815 | return Res; | |||
| 7816 | ||||
| 7817 | auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get()); | |||
| 7818 | Operands.push_back(AArch64Operand::CreateVectorReg( | |||
| 7819 | RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(), | |||
| 7820 | getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), | |||
| 7821 | Ext->hasShiftExtendAmount())); | |||
| 7822 | ||||
| 7823 | return MatchOperand_Success; | |||
| 7824 | } | |||
| 7825 | ||||
| 7826 | OperandMatchResultTy | |||
| 7827 | AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) { | |||
| 7828 | MCAsmParser &Parser = getParser(); | |||
| 7829 | ||||
| 7830 | SMLoc SS = getLoc(); | |||
| 7831 | const AsmToken &TokE = getTok(); | |||
| 7832 | bool IsHash = TokE.is(AsmToken::Hash); | |||
| 7833 | ||||
| 7834 | if (!IsHash && TokE.isNot(AsmToken::Identifier)) | |||
| 7835 | return MatchOperand_NoMatch; | |||
| 7836 | ||||
| 7837 | int64_t Pattern; | |||
| 7838 | if (IsHash) { | |||
| 7839 | Lex(); // Eat hash | |||
| 7840 | ||||
| 7841 | // Parse the immediate operand. | |||
| 7842 | const MCExpr *ImmVal; | |||
| 7843 | SS = getLoc(); | |||
| 7844 | if (Parser.parseExpression(ImmVal)) | |||
| 7845 | return MatchOperand_ParseFail; | |||
| 7846 | ||||
| 7847 | auto *MCE = dyn_cast<MCConstantExpr>(ImmVal); | |||
| 7848 | if (!MCE) | |||
| 7849 | return MatchOperand_ParseFail; | |||
| 7850 | ||||
| 7851 | Pattern = MCE->getValue(); | |||
| 7852 | } else { | |||
| 7853 | // Parse the pattern | |||
| 7854 | auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString()); | |||
| 7855 | if (!Pat) | |||
| 7856 | return MatchOperand_NoMatch; | |||
| 7857 | ||||
| 7858 | Lex(); | |||
| 7859 | Pattern = Pat->Encoding; | |||
| 7860 | assert(Pattern >= 0 && Pattern < 32)(static_cast <bool> (Pattern >= 0 && Pattern < 32) ? void (0) : __assert_fail ("Pattern >= 0 && Pattern < 32" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 7860 , __extension__ __PRETTY_FUNCTION__)); | |||
| 7861 | } | |||
| 7862 | ||||
| 7863 | Operands.push_back( | |||
| 7864 | AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()), | |||
| 7865 | SS, getLoc(), getContext())); | |||
| 7866 | ||||
| 7867 | return MatchOperand_Success; | |||
| 7868 | } | |||
| 7869 | ||||
| 7870 | OperandMatchResultTy | |||
| 7871 | AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) { | |||
| 7872 | int64_t Pattern; | |||
| 7873 | SMLoc SS = getLoc(); | |||
| 7874 | const AsmToken &TokE = getTok(); | |||
| 7875 | // Parse the pattern | |||
| 7876 | auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName( | |||
| 7877 | TokE.getString()); | |||
| 7878 | if (!Pat) | |||
| 7879 | return MatchOperand_NoMatch; | |||
| 7880 | ||||
| 7881 | Lex(); | |||
| 7882 | Pattern = Pat->Encoding; | |||
| 7883 | assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist")(static_cast <bool> (Pattern >= 0 && Pattern <= 1 && "Pattern does not exist") ? void (0) : __assert_fail ("Pattern >= 0 && Pattern <= 1 && \"Pattern does not exist\"" , "llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp", 7883 , __extension__ __PRETTY_FUNCTION__)); | |||
| 7884 | ||||
| 7885 | Operands.push_back( | |||
| 7886 | AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()), | |||
| 7887 | SS, getLoc(), getContext())); | |||
| 7888 | ||||
| 7889 | return MatchOperand_Success; | |||
| 7890 | } | |||
| 7891 | ||||
| 7892 | OperandMatchResultTy | |||
| 7893 | AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) { | |||
| 7894 | SMLoc SS = getLoc(); | |||
| 7895 | ||||
| 7896 | MCRegister XReg; | |||
| 7897 | if (tryParseScalarRegister(XReg) != MatchOperand_Success) | |||
| 7898 | return MatchOperand_NoMatch; | |||
| 7899 | ||||
| 7900 | MCContext &ctx = getContext(); | |||
| 7901 | const MCRegisterInfo *RI = ctx.getRegisterInfo(); | |||
| 7902 | int X8Reg = RI->getMatchingSuperReg( | |||
| 7903 | XReg, AArch64::x8sub_0, | |||
| 7904 | &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]); | |||
| 7905 | if (!X8Reg) { | |||
| 7906 | Error(SS, "expected an even-numbered x-register in the range [x0,x22]"); | |||
| 7907 | return MatchOperand_ParseFail; | |||
| 7908 | } | |||
| 7909 | ||||
| 7910 | Operands.push_back( | |||
| 7911 | AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx)); | |||
| 7912 | return MatchOperand_Success; | |||
| 7913 | } | |||
| 7914 | ||||
| 7915 | OperandMatchResultTy | |||
| 7916 | AArch64AsmParser::tryParseImmRange(OperandVector &Operands) { | |||
| 7917 | SMLoc S = getLoc(); | |||
| 7918 | ||||
| 7919 | if (getTok().isNot(AsmToken::Integer)) | |||
| 7920 | return MatchOperand_NoMatch; | |||
| 7921 | ||||
| 7922 | if (getLexer().peekTok().isNot(AsmToken::Colon)) | |||
| 7923 | return MatchOperand_NoMatch; | |||
| 7924 | ||||
| 7925 | const MCExpr *ImmF; | |||
| 7926 | if (getParser().parseExpression(ImmF)) | |||
| 7927 | return MatchOperand_NoMatch; | |||
| 7928 | ||||
| 7929 | if (getTok().isNot(AsmToken::Colon)) | |||
| 7930 | return MatchOperand_NoMatch; | |||
| 7931 | ||||
| 7932 | Lex(); // Eat ':' | |||
| 7933 | if (getTok().isNot(AsmToken::Integer)) | |||
| 7934 | return MatchOperand_NoMatch; | |||
| 7935 | ||||
| 7936 | SMLoc E = getTok().getLoc(); | |||
| 7937 | const MCExpr *ImmL; | |||
| 7938 | if (getParser().parseExpression(ImmL)) | |||
| 7939 | return MatchOperand_NoMatch; | |||
| 7940 | ||||
| 7941 | unsigned ImmFVal = dyn_cast<MCConstantExpr>(ImmF)->getValue(); | |||
| 7942 | unsigned ImmLVal = dyn_cast<MCConstantExpr>(ImmL)->getValue(); | |||
| 7943 | ||||
| 7944 | Operands.push_back( | |||
| 7945 | AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext())); | |||
| 7946 | return MatchOperand_Success; | |||
| 7947 | } |