Bug Summary

File:build/source/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Warning:line 395, column 36
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name ARMAsmParser.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-17/lib/clang/17 -D _DEBUG -D _GLIBCXX_ASSERTIONS -D _GNU_SOURCE -D _LIBCPP_ENABLE_ASSERTIONS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/ARM/AsmParser -I /build/source/llvm/lib/Target/ARM/AsmParser -I /build/source/llvm/lib/Target/ARM -I lib/Target/ARM -I include -I /build/source/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/source/= -source-date-epoch 1683717183 -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-05-10-133810-16478-1 -x c++ /build/source/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp

/build/source/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp

1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMBaseInstrInfo.h"
10#include "ARMFeatures.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMBaseInfo.h"
13#include "MCTargetDesc/ARMInstPrinter.h"
14#include "MCTargetDesc/ARMMCExpr.h"
15#include "MCTargetDesc/ARMMCTargetDesc.h"
16#include "TargetInfo/ARMTargetInfo.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSet.h"
26#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCInstrDesc.h"
32#include "llvm/MC/MCInstrInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
35#include "llvm/MC/MCParser/MCAsmParserExtension.h"
36#include "llvm/MC/MCParser/MCAsmParserUtils.h"
37#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38#include "llvm/MC/MCParser/MCTargetAsmParser.h"
39#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCSection.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
43#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/SubtargetFeature.h"
45#include "llvm/MC/TargetRegistry.h"
46#include "llvm/Support/ARMBuildAttributes.h"
47#include "llvm/Support/ARMEHABI.h"
48#include "llvm/Support/Casting.h"
49#include "llvm/Support/CommandLine.h"
50#include "llvm/Support/Compiler.h"
51#include "llvm/Support/ErrorHandling.h"
52#include "llvm/Support/MathExtras.h"
53#include "llvm/Support/SMLoc.h"
54#include "llvm/Support/raw_ostream.h"
55#include "llvm/TargetParser/TargetParser.h"
56#include "llvm/TargetParser/Triple.h"
57#include <algorithm>
58#include <cassert>
59#include <cstddef>
60#include <cstdint>
61#include <iterator>
62#include <limits>
63#include <memory>
64#include <string>
65#include <utility>
66#include <vector>
67
68#define DEBUG_TYPE"asm-parser" "asm-parser"
69
70using namespace llvm;
71
72namespace llvm {
73struct ARMInstrTable {
74 MCInstrDesc Insts[4445];
75 MCOperandInfo OperandInfo[3026];
76 MCPhysReg ImplicitOps[130];
77};
78extern const ARMInstrTable ARMDescs;
79} // end namespace llvm
80
81namespace {
82
83enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
84
85static cl::opt<ImplicitItModeTy> ImplicitItMode(
86 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
87 cl::desc("Allow conditional instructions outdside of an IT block"),
88 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
89 "Accept in both ISAs, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
,
90 clEnumValN(ImplicitItModeTy::Never, "never",llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
91 "Warn in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
,
92 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
93 "Accept in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
,
94 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
95 "Warn in ARM, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
));
96
97static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
98 cl::init(false));
99
100enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
101
102static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
103 // Position==0 means we're not in an IT block at all. Position==1
104 // means we want the first state bit, which is always 0 (Then).
105 // Position==2 means we want the second state bit, stored at bit 3
106 // of Mask, and so on downwards. So (5 - Position) will shift the
107 // right bit down to bit 0, including the always-0 bit at bit 4 for
108 // the mandatory initial Then.
109 return (Mask >> (5 - Position) & 1);
110}
111
112class UnwindContext {
113 using Locs = SmallVector<SMLoc, 4>;
114
115 MCAsmParser &Parser;
116 Locs FnStartLocs;
117 Locs CantUnwindLocs;
118 Locs PersonalityLocs;
119 Locs PersonalityIndexLocs;
120 Locs HandlerDataLocs;
121 int FPReg;
122
123public:
124 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
125
126 bool hasFnStart() const { return !FnStartLocs.empty(); }
127 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
128 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
129
130 bool hasPersonality() const {
131 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
132 }
133
134 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
135 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
136 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
137 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
138 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
139
140 void saveFPReg(int Reg) { FPReg = Reg; }
141 int getFPReg() const { return FPReg; }
142
143 void emitFnStartLocNotes() const {
144 for (const SMLoc &Loc : FnStartLocs)
145 Parser.Note(Loc, ".fnstart was specified here");
146 }
147
148 void emitCantUnwindLocNotes() const {
149 for (const SMLoc &Loc : CantUnwindLocs)
150 Parser.Note(Loc, ".cantunwind was specified here");
151 }
152
153 void emitHandlerDataLocNotes() const {
154 for (const SMLoc &Loc : HandlerDataLocs)
155 Parser.Note(Loc, ".handlerdata was specified here");
156 }
157
158 void emitPersonalityLocNotes() const {
159 for (Locs::const_iterator PI = PersonalityLocs.begin(),
160 PE = PersonalityLocs.end(),
161 PII = PersonalityIndexLocs.begin(),
162 PIE = PersonalityIndexLocs.end();
163 PI != PE || PII != PIE;) {
164 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165 Parser.Note(*PI++, ".personality was specified here");
166 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167 Parser.Note(*PII++, ".personalityindex was specified here");
168 else
169 llvm_unreachable(".personality and .personalityindex cannot be "::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be "
"at the same location", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 170)
170 "at the same location")::llvm::llvm_unreachable_internal(".personality and .personalityindex cannot be "
"at the same location", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 170)
;
171 }
172 }
173
174 void reset() {
175 FnStartLocs = Locs();
176 CantUnwindLocs = Locs();
177 PersonalityLocs = Locs();
178 HandlerDataLocs = Locs();
179 PersonalityIndexLocs = Locs();
180 FPReg = ARM::SP;
181 }
182};
183
184// Various sets of ARM instruction mnemonics which are used by the asm parser
185class ARMMnemonicSets {
186 StringSet<> CDE;
187 StringSet<> CDEWithVPTSuffix;
188public:
189 ARMMnemonicSets(const MCSubtargetInfo &STI);
190
191 /// Returns true iff a given mnemonic is a CDE instruction
192 bool isCDEInstr(StringRef Mnemonic) {
193 // Quick check before searching the set
194 if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
195 return false;
196 return CDE.count(Mnemonic);
197 }
198
199 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200 /// (possibly with a predication suffix "e" or "t")
201 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202 if (!Mnemonic.startswith("vcx"))
203 return false;
204 return CDEWithVPTSuffix.count(Mnemonic);
205 }
206
207 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208 /// (possibly with a condition suffix)
209 bool isITPredicableCDEInstr(StringRef Mnemonic) {
210 if (!Mnemonic.startswith("cx"))
211 return false;
212 return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
213 Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
214 Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
215 }
216
217 /// Return true iff a given mnemonic is an integer CDE instruction with
218 /// dual-register destination
219 bool isCDEDualRegInstr(StringRef Mnemonic) {
220 if (!Mnemonic.startswith("cx"))
221 return false;
222 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224 Mnemonic == "cx3d" || Mnemonic == "cx3da";
225 }
226};
227
228ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230 "cx2", "cx2a", "cx2d", "cx2da",
231 "cx3", "cx3a", "cx3d", "cx3da", })
232 CDE.insert(Mnemonic);
233 for (StringRef Mnemonic :
234 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235 CDE.insert(Mnemonic);
236 CDEWithVPTSuffix.insert(Mnemonic);
237 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239 }
240}
241
242class ARMAsmParser : public MCTargetAsmParser {
243 const MCRegisterInfo *MRI;
244 UnwindContext UC;
245 ARMMnemonicSets MS;
246
247 ARMTargetStreamer &getTargetStreamer() {
248 assert(getParser().getStreamer().getTargetStreamer() &&(static_cast <bool> (getParser().getStreamer().getTargetStreamer
() && "do not have a target streamer") ? void (0) : __assert_fail
("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 249, __extension__
__PRETTY_FUNCTION__))
249 "do not have a target streamer")(static_cast <bool> (getParser().getStreamer().getTargetStreamer
() && "do not have a target streamer") ? void (0) : __assert_fail
("getParser().getStreamer().getTargetStreamer() && \"do not have a target streamer\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 249, __extension__
__PRETTY_FUNCTION__))
;
250 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
251 return static_cast<ARMTargetStreamer &>(TS);
252 }
253
254 // Map of register aliases registers via the .req directive.
255 StringMap<unsigned> RegisterReqs;
256
257 bool NextSymbolIsThumb;
258
259 bool useImplicitITThumb() const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262 }
263
264 bool useImplicitITARM() const {
265 return ImplicitItMode == ImplicitItModeTy::Always ||
266 ImplicitItMode == ImplicitItModeTy::ARMOnly;
267 }
268
269 struct {
270 ARMCC::CondCodes Cond; // Condition for IT block.
271 unsigned Mask:4; // Condition mask for instructions.
272 // Starting at first 1 (from lsb).
273 // '1' condition as indicated in IT.
274 // '0' inverse of condition (else).
275 // Count of instructions in IT block is
276 // 4 - trailingzeroes(mask)
277 // Note that this does not have the same encoding
278 // as in the IT instruction, which also depends
279 // on the low bit of the condition code.
280
281 unsigned CurPosition; // Current position in parsing of IT
282 // block. In range [0,4], with 0 being the IT
283 // instruction itself. Initialized according to
284 // count of instructions in block. ~0U if no
285 // active IT block.
286
287 bool IsExplicit; // true - The IT instruction was present in the
288 // input, we should not modify it.
289 // false - The IT instruction was added
290 // implicitly, we can extend it if that
291 // would be legal.
292 } ITState;
293
294 SmallVector<MCInst, 4> PendingConditionalInsts;
295
296 void flushPendingInstructions(MCStreamer &Out) override {
297 if (!inImplicitITBlock()) {
298 assert(PendingConditionalInsts.size() == 0)(static_cast <bool> (PendingConditionalInsts.size() == 0
) ? void (0) : __assert_fail ("PendingConditionalInsts.size() == 0"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 298, __extension__
__PRETTY_FUNCTION__))
;
299 return;
300 }
301
302 // Emit the IT instruction
303 MCInst ITInst;
304 ITInst.setOpcode(ARM::t2IT);
305 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306 ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307 Out.emitInstruction(ITInst, getSTI());
308
309 // Emit the conditional instructions
310 assert(PendingConditionalInsts.size() <= 4)(static_cast <bool> (PendingConditionalInsts.size() <=
4) ? void (0) : __assert_fail ("PendingConditionalInsts.size() <= 4"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 310, __extension__
__PRETTY_FUNCTION__))
;
311 for (const MCInst &Inst : PendingConditionalInsts) {
312 Out.emitInstruction(Inst, getSTI());
313 }
314 PendingConditionalInsts.clear();
315
316 // Clear the IT state
317 ITState.Mask = 0;
318 ITState.CurPosition = ~0U;
319 }
320
321 bool inITBlock() { return ITState.CurPosition != ~0U; }
322 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
323 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324
325 bool lastInITBlock() {
326 return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
327 }
328
329 void forwardITPosition() {
330 if (!inITBlock()) return;
331 // Move to the next instruction in the IT block, if there is one. If not,
332 // mark the block as done, except for implicit IT blocks, which we leave
333 // open until we find an instruction that can't be added to it.
334 unsigned TZ = llvm::countr_zero(ITState.Mask);
335 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336 ITState.CurPosition = ~0U; // Done with the IT block after this.
337 }
338
339 // Rewind the state of the current IT block, removing the last slot from it.
340 void rewindImplicitITPosition() {
341 assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail
("inImplicitITBlock()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 341, __extension__ __PRETTY_FUNCTION__))
;
342 assert(ITState.CurPosition > 1)(static_cast <bool> (ITState.CurPosition > 1) ? void
(0) : __assert_fail ("ITState.CurPosition > 1", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 342, __extension__ __PRETTY_FUNCTION__))
;
343 ITState.CurPosition--;
344 unsigned TZ = llvm::countr_zero(ITState.Mask);
345 unsigned NewMask = 0;
346 NewMask |= ITState.Mask & (0xC << TZ);
347 NewMask |= 0x2 << TZ;
348 ITState.Mask = NewMask;
349 }
350
351 // Rewind the state of the current IT block, removing the last slot from it.
352 // If we were at the first slot, this closes the IT block.
353 void discardImplicitITBlock() {
354 assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail
("inImplicitITBlock()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 354, __extension__ __PRETTY_FUNCTION__))
;
355 assert(ITState.CurPosition == 1)(static_cast <bool> (ITState.CurPosition == 1) ? void (
0) : __assert_fail ("ITState.CurPosition == 1", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 355, __extension__ __PRETTY_FUNCTION__))
;
356 ITState.CurPosition = ~0U;
357 }
358
359 // Return the low-subreg of a given Q register.
360 unsigned getDRegFromQReg(unsigned QReg) const {
361 return MRI->getSubReg(QReg, ARM::dsub_0);
362 }
363
364 // Get the condition code corresponding to the current IT block slot.
365 ARMCC::CondCodes currentITCond() {
366 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368 }
369
370 // Invert the condition of the current IT block slot without changing any
371 // other slots in the same block.
372 void invertCurrentITCondition() {
373 if (ITState.CurPosition == 1) {
374 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375 } else {
376 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377 }
378 }
379
380 // Returns true if the current IT block is full (all 4 slots used).
381 bool isITBlockFull() {
382 return inITBlock() && (ITState.Mask & 1);
383 }
384
385 // Extend the current implicit IT block to have one more slot with the given
386 // condition code.
387 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388 assert(inImplicitITBlock())(static_cast <bool> (inImplicitITBlock()) ? void (0) : __assert_fail
("inImplicitITBlock()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 388, __extension__ __PRETTY_FUNCTION__))
;
6
'?' condition is true
389 assert(!isITBlockFull())(static_cast <bool> (!isITBlockFull()) ? void (0) : __assert_fail
("!isITBlockFull()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 389, __extension__ __PRETTY_FUNCTION__))
;
7
Assuming the condition is true
8
'?' condition is true
390 assert(Cond == ITState.Cond ||(static_cast <bool> (Cond == ITState.Cond || Cond == ARMCC
::getOppositeCondition(ITState.Cond)) ? void (0) : __assert_fail
("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 391, __extension__
__PRETTY_FUNCTION__))
9
'?' condition is true
391 Cond == ARMCC::getOppositeCondition(ITState.Cond))(static_cast <bool> (Cond == ITState.Cond || Cond == ARMCC
::getOppositeCondition(ITState.Cond)) ? void (0) : __assert_fail
("Cond == ITState.Cond || Cond == ARMCC::getOppositeCondition(ITState.Cond)"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 391, __extension__
__PRETTY_FUNCTION__))
;
392 unsigned TZ = llvm::countr_zero(ITState.Mask);
10
Calling 'countr_zero<unsigned int>'
17
Returning from 'countr_zero<unsigned int>'
18
'TZ' initialized to 32
393 unsigned NewMask = 0;
394 // Keep any existing condition bits.
395 NewMask |= ITState.Mask & (0xE << TZ);
19
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'
396 // Insert the new condition bit.
397 NewMask |= (Cond != ITState.Cond) << TZ;
398 // Move the trailing 1 down one bit.
399 NewMask |= 1 << (TZ - 1);
400 ITState.Mask = NewMask;
401 }
402
403 // Create a new implicit IT block with a dummy condition code.
404 void startImplicitITBlock() {
405 assert(!inITBlock())(static_cast <bool> (!inITBlock()) ? void (0) : __assert_fail
("!inITBlock()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 405, __extension__ __PRETTY_FUNCTION__))
;
406 ITState.Cond = ARMCC::AL;
407 ITState.Mask = 8;
408 ITState.CurPosition = 1;
409 ITState.IsExplicit = false;
410 }
411
412 // Create a new explicit IT block with the given condition and mask.
413 // The mask should be in the format used in ARMOperand and
414 // MCOperand, with a 1 implying 'e', regardless of the low bit of
415 // the condition.
416 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417 assert(!inITBlock())(static_cast <bool> (!inITBlock()) ? void (0) : __assert_fail
("!inITBlock()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 417, __extension__ __PRETTY_FUNCTION__))
;
418 ITState.Cond = Cond;
419 ITState.Mask = Mask;
420 ITState.CurPosition = 0;
421 ITState.IsExplicit = true;
422 }
423
424 struct {
425 unsigned Mask : 4;
426 unsigned CurPosition;
427 } VPTState;
428 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
429 void forwardVPTPosition() {
430 if (!inVPTBlock()) return;
431 unsigned TZ = llvm::countr_zero(VPTState.Mask);
432 if (++VPTState.CurPosition == 5 - TZ)
433 VPTState.CurPosition = ~0U;
434 }
435
436 void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
437 return getParser().Note(L, Msg, Range);
438 }
439
440 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
441 return getParser().Warning(L, Msg, Range);
442 }
443
444 bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
445 return getParser().Error(L, Msg, Range);
446 }
447
448 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449 unsigned ListNo, bool IsARPop = false);
450 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451 unsigned ListNo);
452
453 int tryParseRegister();
454 bool tryParseRegisterWithWriteBack(OperandVector &);
455 int tryParseShiftRegister(OperandVector &);
456 bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
457 bool AllowRAAC = false);
458 bool parseMemory(OperandVector &);
459 bool parseOperand(OperandVector &, StringRef Mnemonic);
460 bool parseImmExpr(int64_t &Out);
461 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
462 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
463 unsigned &ShiftAmount);
464 bool parseLiteralValues(unsigned Size, SMLoc L);
465 bool parseDirectiveThumb(SMLoc L);
466 bool parseDirectiveARM(SMLoc L);
467 bool parseDirectiveThumbFunc(SMLoc L);
468 bool parseDirectiveCode(SMLoc L);
469 bool parseDirectiveSyntax(SMLoc L);
470 bool parseDirectiveReq(StringRef Name, SMLoc L);
471 bool parseDirectiveUnreq(SMLoc L);
472 bool parseDirectiveArch(SMLoc L);
473 bool parseDirectiveEabiAttr(SMLoc L);
474 bool parseDirectiveCPU(SMLoc L);
475 bool parseDirectiveFPU(SMLoc L);
476 bool parseDirectiveFnStart(SMLoc L);
477 bool parseDirectiveFnEnd(SMLoc L);
478 bool parseDirectiveCantUnwind(SMLoc L);
479 bool parseDirectivePersonality(SMLoc L);
480 bool parseDirectiveHandlerData(SMLoc L);
481 bool parseDirectiveSetFP(SMLoc L);
482 bool parseDirectivePad(SMLoc L);
483 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
484 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
485 bool parseDirectiveLtorg(SMLoc L);
486 bool parseDirectiveEven(SMLoc L);
487 bool parseDirectivePersonalityIndex(SMLoc L);
488 bool parseDirectiveUnwindRaw(SMLoc L);
489 bool parseDirectiveTLSDescSeq(SMLoc L);
490 bool parseDirectiveMovSP(SMLoc L);
491 bool parseDirectiveObjectArch(SMLoc L);
492 bool parseDirectiveArchExtension(SMLoc L);
493 bool parseDirectiveAlign(SMLoc L);
494 bool parseDirectiveThumbSet(SMLoc L);
495
496 bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
497 bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
498 bool parseDirectiveSEHSaveSP(SMLoc L);
499 bool parseDirectiveSEHSaveFRegs(SMLoc L);
500 bool parseDirectiveSEHSaveLR(SMLoc L);
501 bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
502 bool parseDirectiveSEHNop(SMLoc L, bool Wide);
503 bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
504 bool parseDirectiveSEHEpilogEnd(SMLoc L);
505 bool parseDirectiveSEHCustom(SMLoc L);
506
507 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
508 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
509 unsigned &PredicationCode,
510 unsigned &VPTPredicationCode, bool &CarrySetting,
511 unsigned &ProcessorIMod, StringRef &ITMask);
512 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
513 StringRef FullInst, bool &CanAcceptCarrySet,
514 bool &CanAcceptPredicationCode,
515 bool &CanAcceptVPTPredicationCode);
516 bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
517
518 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
519 OperandVector &Operands);
520 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
521
522 bool isThumb() const {
523 // FIXME: Can tablegen auto-generate this?
524 return getSTI().hasFeature(ARM::ModeThumb);
525 }
526
527 bool isThumbOne() const {
528 return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
529 }
530
531 bool isThumbTwo() const {
532 return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
533 }
534
535 bool hasThumb() const {
536 return getSTI().hasFeature(ARM::HasV4TOps);
537 }
538
539 bool hasThumb2() const {
540 return getSTI().hasFeature(ARM::FeatureThumb2);
541 }
542
543 bool hasV6Ops() const {
544 return getSTI().hasFeature(ARM::HasV6Ops);
545 }
546
547 bool hasV6T2Ops() const {
548 return getSTI().hasFeature(ARM::HasV6T2Ops);
549 }
550
551 bool hasV6MOps() const {
552 return getSTI().hasFeature(ARM::HasV6MOps);
553 }
554
555 bool hasV7Ops() const {
556 return getSTI().hasFeature(ARM::HasV7Ops);
557 }
558
559 bool hasV8Ops() const {
560 return getSTI().hasFeature(ARM::HasV8Ops);
561 }
562
563 bool hasV8MBaseline() const {
564 return getSTI().hasFeature(ARM::HasV8MBaselineOps);
565 }
566
567 bool hasV8MMainline() const {
568 return getSTI().hasFeature(ARM::HasV8MMainlineOps);
569 }
570 bool hasV8_1MMainline() const {
571 return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
572 }
573 bool hasMVE() const {
574 return getSTI().hasFeature(ARM::HasMVEIntegerOps);
575 }
576 bool hasMVEFloat() const {
577 return getSTI().hasFeature(ARM::HasMVEFloatOps);
578 }
579 bool hasCDE() const {
580 return getSTI().hasFeature(ARM::HasCDEOps);
581 }
582 bool has8MSecExt() const {
583 return getSTI().hasFeature(ARM::Feature8MSecExt);
584 }
585
586 bool hasARM() const {
587 return !getSTI().hasFeature(ARM::FeatureNoARM);
588 }
589
590 bool hasDSP() const {
591 return getSTI().hasFeature(ARM::FeatureDSP);
592 }
593
594 bool hasD32() const {
595 return getSTI().hasFeature(ARM::FeatureD32);
596 }
597
598 bool hasV8_1aOps() const {
599 return getSTI().hasFeature(ARM::HasV8_1aOps);
600 }
601
602 bool hasRAS() const {
603 return getSTI().hasFeature(ARM::FeatureRAS);
604 }
605
606 void SwitchMode() {
607 MCSubtargetInfo &STI = copySTI();
608 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
609 setAvailableFeatures(FB);
610 }
611
612 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
613
614 bool isMClass() const {
615 return getSTI().hasFeature(ARM::FeatureMClass);
616 }
617
618 /// @name Auto-generated Match Functions
619 /// {
620
621#define GET_ASSEMBLER_HEADER
622#include "ARMGenAsmMatcher.inc"
623
624 /// }
625
626 OperandMatchResultTy parseITCondCode(OperandVector &);
627 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
628 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
629 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
630 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
631 OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
632 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
633 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
634 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
635 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
636 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
637 int High);
638 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
639 return parsePKHImm(O, "lsl", 0, 31);
640 }
641 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
642 return parsePKHImm(O, "asr", 1, 32);
643 }
644 OperandMatchResultTy parseSetEndImm(OperandVector &);
645 OperandMatchResultTy parseShifterImm(OperandVector &);
646 OperandMatchResultTy parseRotImm(OperandVector &);
647 OperandMatchResultTy parseModImm(OperandVector &);
648 OperandMatchResultTy parseBitfield(OperandVector &);
649 OperandMatchResultTy parsePostIdxReg(OperandVector &);
650 OperandMatchResultTy parseAM3Offset(OperandVector &);
651 OperandMatchResultTy parseFPImm(OperandVector &);
652 OperandMatchResultTy parseVectorList(OperandVector &);
653 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
654 SMLoc &EndLoc);
655
656 // Asm Match Converter Methods
657 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
658 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
659 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
660
661 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
662 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
663 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
664 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
665 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
666 bool isITBlockTerminator(MCInst &Inst) const;
667 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
668 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
669 bool Load, bool ARMMode, bool Writeback);
670
671public:
672 enum ARMMatchResultTy {
673 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
674 Match_RequiresNotITBlock,
675 Match_RequiresV6,
676 Match_RequiresThumb2,
677 Match_RequiresV8,
678 Match_RequiresFlagSetting,
679#define GET_OPERAND_DIAGNOSTIC_TYPES
680#include "ARMGenAsmMatcher.inc"
681
682 };
683
684 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
685 const MCInstrInfo &MII, const MCTargetOptions &Options)
686 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
687 MCAsmParserExtension::Initialize(Parser);
688
689 // Cache the MCRegisterInfo.
690 MRI = getContext().getRegisterInfo();
691
692 // Initialize the set of available features.
693 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
694
695 // Add build attributes based on the selected target.
696 if (AddBuildAttributes)
697 getTargetStreamer().emitTargetAttributes(STI);
698
699 // Not in an ITBlock to start with.
700 ITState.CurPosition = ~0U;
701
702 VPTState.CurPosition = ~0U;
703
704 NextSymbolIsThumb = false;
705 }
706
707 // Implementation of the MCTargetAsmParser interface:
708 bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
709 SMLoc &EndLoc) override;
710 OperandMatchResultTy tryParseRegister(MCRegister &RegNo, SMLoc &StartLoc,
711 SMLoc &EndLoc) override;
712 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
713 SMLoc NameLoc, OperandVector &Operands) override;
714 bool ParseDirective(AsmToken DirectiveID) override;
715
716 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
717 unsigned Kind) override;
718 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
719
720 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
721 OperandVector &Operands, MCStreamer &Out,
722 uint64_t &ErrorInfo,
723 bool MatchingInlineAsm) override;
724 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
725 SmallVectorImpl<NearMissInfo> &NearMisses,
726 bool MatchingInlineAsm, bool &EmitInITBlock,
727 MCStreamer &Out);
728
729 struct NearMissMessage {
730 SMLoc Loc;
731 SmallString<128> Message;
732 };
733
734 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
735
736 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
737 SmallVectorImpl<NearMissMessage> &NearMissesOut,
738 SMLoc IDLoc, OperandVector &Operands);
739 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
740 OperandVector &Operands);
741
742 void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
743
744 void onLabelParsed(MCSymbol *Symbol) override;
745};
746
747/// ARMOperand - Instances of this class represent a parsed ARM machine
748/// operand.
749class ARMOperand : public MCParsedAsmOperand {
750 enum KindTy {
751 k_CondCode,
752 k_VPTPred,
753 k_CCOut,
754 k_ITCondMask,
755 k_CoprocNum,
756 k_CoprocReg,
757 k_CoprocOption,
758 k_Immediate,
759 k_MemBarrierOpt,
760 k_InstSyncBarrierOpt,
761 k_TraceSyncBarrierOpt,
762 k_Memory,
763 k_PostIndexRegister,
764 k_MSRMask,
765 k_BankedReg,
766 k_ProcIFlags,
767 k_VectorIndex,
768 k_Register,
769 k_RegisterList,
770 k_RegisterListWithAPSR,
771 k_DPRRegisterList,
772 k_SPRRegisterList,
773 k_FPSRegisterListWithVPR,
774 k_FPDRegisterListWithVPR,
775 k_VectorList,
776 k_VectorListAllLanes,
777 k_VectorListIndexed,
778 k_ShiftedRegister,
779 k_ShiftedImmediate,
780 k_ShifterImmediate,
781 k_RotateImmediate,
782 k_ModifiedImmediate,
783 k_ConstantPoolImmediate,
784 k_BitfieldDescriptor,
785 k_Token,
786 } Kind;
787
788 SMLoc StartLoc, EndLoc, AlignmentLoc;
789 SmallVector<unsigned, 8> Registers;
790
791 struct CCOp {
792 ARMCC::CondCodes Val;
793 };
794
795 struct VCCOp {
796 ARMVCC::VPTCodes Val;
797 };
798
799 struct CopOp {
800 unsigned Val;
801 };
802
803 struct CoprocOptionOp {
804 unsigned Val;
805 };
806
807 struct ITMaskOp {
808 unsigned Mask:4;
809 };
810
811 struct MBOptOp {
812 ARM_MB::MemBOpt Val;
813 };
814
815 struct ISBOptOp {
816 ARM_ISB::InstSyncBOpt Val;
817 };
818
819 struct TSBOptOp {
820 ARM_TSB::TraceSyncBOpt Val;
821 };
822
823 struct IFlagsOp {
824 ARM_PROC::IFlags Val;
825 };
826
827 struct MMaskOp {
828 unsigned Val;
829 };
830
831 struct BankedRegOp {
832 unsigned Val;
833 };
834
835 struct TokOp {
836 const char *Data;
837 unsigned Length;
838 };
839
840 struct RegOp {
841 unsigned RegNum;
842 };
843
844 // A vector register list is a sequential list of 1 to 4 registers.
845 struct VectorListOp {
846 unsigned RegNum;
847 unsigned Count;
848 unsigned LaneIndex;
849 bool isDoubleSpaced;
850 };
851
852 struct VectorIndexOp {
853 unsigned Val;
854 };
855
856 struct ImmOp {
857 const MCExpr *Val;
858 };
859
860 /// Combined record for all forms of ARM address expressions.
861 struct MemoryOp {
862 unsigned BaseRegNum;
863 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
864 // was specified.
865 const MCExpr *OffsetImm; // Offset immediate value
866 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
867 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
868 unsigned ShiftImm; // shift for OffsetReg.
869 unsigned Alignment; // 0 = no alignment specified
870 // n = alignment in bytes (2, 4, 8, 16, or 32)
871 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
872 };
873
874 struct PostIdxRegOp {
875 unsigned RegNum;
876 bool isAdd;
877 ARM_AM::ShiftOpc ShiftTy;
878 unsigned ShiftImm;
879 };
880
881 struct ShifterImmOp {
882 bool isASR;
883 unsigned Imm;
884 };
885
886 struct RegShiftedRegOp {
887 ARM_AM::ShiftOpc ShiftTy;
888 unsigned SrcReg;
889 unsigned ShiftReg;
890 unsigned ShiftImm;
891 };
892
893 struct RegShiftedImmOp {
894 ARM_AM::ShiftOpc ShiftTy;
895 unsigned SrcReg;
896 unsigned ShiftImm;
897 };
898
899 struct RotImmOp {
900 unsigned Imm;
901 };
902
903 struct ModImmOp {
904 unsigned Bits;
905 unsigned Rot;
906 };
907
908 struct BitfieldOp {
909 unsigned LSB;
910 unsigned Width;
911 };
912
913 union {
914 struct CCOp CC;
915 struct VCCOp VCC;
916 struct CopOp Cop;
917 struct CoprocOptionOp CoprocOption;
918 struct MBOptOp MBOpt;
919 struct ISBOptOp ISBOpt;
920 struct TSBOptOp TSBOpt;
921 struct ITMaskOp ITMask;
922 struct IFlagsOp IFlags;
923 struct MMaskOp MMask;
924 struct BankedRegOp BankedReg;
925 struct TokOp Tok;
926 struct RegOp Reg;
927 struct VectorListOp VectorList;
928 struct VectorIndexOp VectorIndex;
929 struct ImmOp Imm;
930 struct MemoryOp Memory;
931 struct PostIdxRegOp PostIdxReg;
932 struct ShifterImmOp ShifterImm;
933 struct RegShiftedRegOp RegShiftedReg;
934 struct RegShiftedImmOp RegShiftedImm;
935 struct RotImmOp RotImm;
936 struct ModImmOp ModImm;
937 struct BitfieldOp Bitfield;
938 };
939
940public:
941 ARMOperand(KindTy K) : Kind(K) {}
942
943 /// getStartLoc - Get the location of the first token of this operand.
944 SMLoc getStartLoc() const override { return StartLoc; }
945
946 /// getEndLoc - Get the location of the last token of this operand.
947 SMLoc getEndLoc() const override { return EndLoc; }
948
949 /// getLocRange - Get the range between the first and last token of this
950 /// operand.
951 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
952
953 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
954 SMLoc getAlignmentLoc() const {
955 assert(Kind == k_Memory && "Invalid access!")(static_cast <bool> (Kind == k_Memory && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Memory && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 955, __extension__
__PRETTY_FUNCTION__))
;
956 return AlignmentLoc;
957 }
958
959 ARMCC::CondCodes getCondCode() const {
960 assert(Kind == k_CondCode && "Invalid access!")(static_cast <bool> (Kind == k_CondCode && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_CondCode && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 960, __extension__
__PRETTY_FUNCTION__))
;
961 return CC.Val;
962 }
963
964 ARMVCC::VPTCodes getVPTPred() const {
965 assert(isVPTPred() && "Invalid access!")(static_cast <bool> (isVPTPred() && "Invalid access!"
) ? void (0) : __assert_fail ("isVPTPred() && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 965, __extension__
__PRETTY_FUNCTION__))
;
966 return VCC.Val;
967 }
968
969 unsigned getCoproc() const {
970 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!")(static_cast <bool> ((Kind == k_CoprocNum || Kind == k_CoprocReg
) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_CoprocNum || Kind == k_CoprocReg) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 970, __extension__
__PRETTY_FUNCTION__))
;
971 return Cop.Val;
972 }
973
974 StringRef getToken() const {
975 assert(Kind == k_Token && "Invalid access!")(static_cast <bool> (Kind == k_Token && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_Token && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 975, __extension__
__PRETTY_FUNCTION__))
;
976 return StringRef(Tok.Data, Tok.Length);
977 }
978
979 unsigned getReg() const override {
980 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!")(static_cast <bool> ((Kind == k_Register || Kind == k_CCOut
) && "Invalid access!") ? void (0) : __assert_fail ("(Kind == k_Register || Kind == k_CCOut) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 980, __extension__
__PRETTY_FUNCTION__))
;
981 return Reg.RegNum;
982 }
983
984 const SmallVectorImpl<unsigned> &getRegList() const {
985 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind ==
k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind
== k_FPDRegisterListWithVPR) && "Invalid access!") ?
void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 989, __extension__
__PRETTY_FUNCTION__))
986 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind ==
k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind
== k_FPDRegisterListWithVPR) && "Invalid access!") ?
void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 989, __extension__
__PRETTY_FUNCTION__))
987 Kind == k_FPSRegisterListWithVPR ||(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind ==
k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind
== k_FPDRegisterListWithVPR) && "Invalid access!") ?
void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 989, __extension__
__PRETTY_FUNCTION__))
988 Kind == k_FPDRegisterListWithVPR) &&(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind ==
k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind
== k_FPDRegisterListWithVPR) && "Invalid access!") ?
void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 989, __extension__
__PRETTY_FUNCTION__))
989 "Invalid access!")(static_cast <bool> ((Kind == k_RegisterList || Kind ==
k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind ==
k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind
== k_FPDRegisterListWithVPR) && "Invalid access!") ?
void (0) : __assert_fail ("(Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || Kind == k_FPSRegisterListWithVPR || Kind == k_FPDRegisterListWithVPR) && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 989, __extension__
__PRETTY_FUNCTION__))
;
990 return Registers;
991 }
992
993 const MCExpr *getImm() const {
994 assert(isImm() && "Invalid access!")(static_cast <bool> (isImm() && "Invalid access!"
) ? void (0) : __assert_fail ("isImm() && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 994, __extension__
__PRETTY_FUNCTION__))
;
995 return Imm.Val;
996 }
997
998 const MCExpr *getConstantPoolImm() const {
999 assert(isConstantPoolImm() && "Invalid access!")(static_cast <bool> (isConstantPoolImm() && "Invalid access!"
) ? void (0) : __assert_fail ("isConstantPoolImm() && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 999, __extension__
__PRETTY_FUNCTION__))
;
1000 return Imm.Val;
1001 }
1002
1003 unsigned getVectorIndex() const {
1004 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast <bool> (Kind == k_VectorIndex && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_VectorIndex && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1004, __extension__
__PRETTY_FUNCTION__))
;
1005 return VectorIndex.Val;
1006 }
1007
1008 ARM_MB::MemBOpt getMemBarrierOpt() const {
1009 assert(Kind == k_MemBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_MemBarrierOpt &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_MemBarrierOpt && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1009, __extension__
__PRETTY_FUNCTION__))
;
1010 return MBOpt.Val;
1011 }
1012
1013 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1014 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_InstSyncBarrierOpt &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_InstSyncBarrierOpt && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1014, __extension__
__PRETTY_FUNCTION__))
;
1015 return ISBOpt.Val;
1016 }
1017
1018 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1019 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!")(static_cast <bool> (Kind == k_TraceSyncBarrierOpt &&
"Invalid access!") ? void (0) : __assert_fail ("Kind == k_TraceSyncBarrierOpt && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1019, __extension__
__PRETTY_FUNCTION__))
;
1020 return TSBOpt.Val;
1021 }
1022
1023 ARM_PROC::IFlags getProcIFlags() const {
1024 assert(Kind == k_ProcIFlags && "Invalid access!")(static_cast <bool> (Kind == k_ProcIFlags && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_ProcIFlags && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1024, __extension__
__PRETTY_FUNCTION__))
;
1025 return IFlags.Val;
1026 }
1027
1028 unsigned getMSRMask() const {
1029 assert(Kind == k_MSRMask && "Invalid access!")(static_cast <bool> (Kind == k_MSRMask && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_MSRMask && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1029, __extension__
__PRETTY_FUNCTION__))
;
1030 return MMask.Val;
1031 }
1032
1033 unsigned getBankedReg() const {
1034 assert(Kind == k_BankedReg && "Invalid access!")(static_cast <bool> (Kind == k_BankedReg && "Invalid access!"
) ? void (0) : __assert_fail ("Kind == k_BankedReg && \"Invalid access!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 1034, __extension__
__PRETTY_FUNCTION__))
;
1035 return BankedReg.Val;
1036 }
1037
1038 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1039 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1040 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1041 bool isCondCode() const { return Kind == k_CondCode; }
1042 bool isVPTPred() const { return Kind == k_VPTPred; }
1043 bool isCCOut() const { return Kind == k_CCOut; }
1044 bool isITMask() const { return Kind == k_ITCondMask; }
1045 bool isITCondCode() const { return Kind == k_CondCode; }
1046 bool isImm() const override {
1047 return Kind == k_Immediate;
1048 }
1049
1050 bool isARMBranchTarget() const {
1051 if (!isImm()) return false;
1052
1053 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1054 return CE->getValue() % 4 == 0;
1055 return true;
1056 }
1057
1058
1059 bool isThumbBranchTarget() const {
1060 if (!isImm()) return false;
1061
1062 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1063 return CE->getValue() % 2 == 0;
1064 return true;
1065 }
1066
1067 // checks whether this operand is an unsigned offset which fits is a field
1068 // of specified width and scaled by a specific number of bits
1069 template<unsigned width, unsigned scale>
1070 bool isUnsignedOffset() const {
1071 if (!isImm()) return false;
1072 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1073 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1074 int64_t Val = CE->getValue();
1075 int64_t Align = 1LL << scale;
1076 int64_t Max = Align * ((1LL << width) - 1);
1077 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1078 }
1079 return false;
1080 }
1081
1082 // checks whether this operand is an signed offset which fits is a field
1083 // of specified width and scaled by a specific number of bits
1084 template<unsigned width, unsigned scale>
1085 bool isSignedOffset() const {
1086 if (!isImm()) return false;
1087 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 int64_t Align = 1LL << scale;
1091 int64_t Max = Align * ((1LL << (width-1)) - 1);
1092 int64_t Min = -Align * (1LL << (width-1));
1093 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1094 }
1095 return false;
1096 }
1097
1098 // checks whether this operand is an offset suitable for the LE /
1099 // LETP instructions in Arm v8.1M
1100 bool isLEOffset() const {
1101 if (!isImm()) return false;
1102 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1103 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1104 int64_t Val = CE->getValue();
1105 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1106 }
1107 return false;
1108 }
1109
1110 // checks whether this operand is a memory operand computed as an offset
1111 // applied to PC. the offset may have 8 bits of magnitude and is represented
1112 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1113 // relocable expression...
1114 bool isThumbMemPC() const {
1115 int64_t Val = 0;
1116 if (isImm()) {
1117 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1118 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1119 if (!CE) return false;
1120 Val = CE->getValue();
1121 }
1122 else if (isGPRMem()) {
1123 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1124 if(Memory.BaseRegNum != ARM::PC) return false;
1125 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1126 Val = CE->getValue();
1127 else
1128 return false;
1129 }
1130 else return false;
1131 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1132 }
1133
1134 bool isFPImm() const {
1135 if (!isImm()) return false;
1136 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1137 if (!CE) return false;
1138 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1139 return Val != -1;
1140 }
1141
1142 template<int64_t N, int64_t M>
1143 bool isImmediate() const {
1144 if (!isImm()) return false;
1145 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1146 if (!CE) return false;
1147 int64_t Value = CE->getValue();
1148 return Value >= N && Value <= M;
1149 }
1150
1151 template<int64_t N, int64_t M>
1152 bool isImmediateS4() const {
1153 if (!isImm()) return false;
1154 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1155 if (!CE) return false;
1156 int64_t Value = CE->getValue();
1157 return ((Value & 3) == 0) && Value >= N && Value <= M;
1158 }
1159 template<int64_t N, int64_t M>
1160 bool isImmediateS2() const {
1161 if (!isImm()) return false;
1162 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163 if (!CE) return false;
1164 int64_t Value = CE->getValue();
1165 return ((Value & 1) == 0) && Value >= N && Value <= M;
1166 }
1167 bool isFBits16() const {
1168 return isImmediate<0, 17>();
1169 }
1170 bool isFBits32() const {
1171 return isImmediate<1, 33>();
1172 }
1173 bool isImm8s4() const {
1174 return isImmediateS4<-1020, 1020>();
1175 }
1176 bool isImm7s4() const {
1177 return isImmediateS4<-508, 508>();
1178 }
1179 bool isImm7Shift0() const {
1180 return isImmediate<-127, 127>();
1181 }
1182 bool isImm7Shift1() const {
1183 return isImmediateS2<-255, 255>();
1184 }
1185 bool isImm7Shift2() const {
1186 return isImmediateS4<-511, 511>();
1187 }
1188 bool isImm7() const {
1189 return isImmediate<-127, 127>();
1190 }
1191 bool isImm0_1020s4() const {
1192 return isImmediateS4<0, 1020>();
1193 }
1194 bool isImm0_508s4() const {
1195 return isImmediateS4<0, 508>();
1196 }
1197 bool isImm0_508s4Neg() const {
1198 if (!isImm()) return false;
1199 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1200 if (!CE) return false;
1201 int64_t Value = -CE->getValue();
1202 // explicitly exclude zero. we want that to use the normal 0_508 version.
1203 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1204 }
1205
1206 bool isImm0_4095Neg() const {
1207 if (!isImm()) return false;
1208 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1209 if (!CE) return false;
1210 // isImm0_4095Neg is used with 32-bit immediates only.
1211 // 32-bit immediates are zero extended to 64-bit when parsed,
1212 // thus simple -CE->getValue() results in a big negative number,
1213 // not a small positive number as intended
1214 if ((CE->getValue() >> 32) > 0) return false;
1215 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1216 return Value > 0 && Value < 4096;
1217 }
1218
1219 bool isImm0_7() const {
1220 return isImmediate<0, 7>();
1221 }
1222
1223 bool isImm1_16() const {
1224 return isImmediate<1, 16>();
1225 }
1226
1227 bool isImm1_32() const {
1228 return isImmediate<1, 32>();
1229 }
1230
1231 bool isImm8_255() const {
1232 return isImmediate<8, 255>();
1233 }
1234
1235 bool isImm256_65535Expr() const {
1236 if (!isImm()) return false;
1237 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1238 // If it's not a constant expression, it'll generate a fixup and be
1239 // handled later.
1240 if (!CE) return true;
1241 int64_t Value = CE->getValue();
1242 return Value >= 256 && Value < 65536;
1243 }
1244
1245 bool isImm0_65535Expr() const {
1246 if (!isImm()) return false;
1247 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1248 // If it's not a constant expression, it'll generate a fixup and be
1249 // handled later.
1250 if (!CE) return true;
1251 int64_t Value = CE->getValue();
1252 return Value >= 0 && Value < 65536;
1253 }
1254
1255 bool isImm24bit() const {
1256 return isImmediate<0, 0xffffff + 1>();
1257 }
1258
1259 bool isImmThumbSR() const {
1260 return isImmediate<1, 33>();
1261 }
1262
1263 template<int shift>
1264 bool isExpImmValue(uint64_t Value) const {
1265 uint64_t mask = (1 << shift) - 1;
1266 if ((Value & mask) != 0 || (Value >> shift) > 0xff)
1267 return false;
1268 return true;
1269 }
1270
1271 template<int shift>
1272 bool isExpImm() const {
1273 if (!isImm()) return false;
1274 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1275 if (!CE) return false;
1276
1277 return isExpImmValue<shift>(CE->getValue());
1278 }
1279
1280 template<int shift, int size>
1281 bool isInvertedExpImm() const {
1282 if (!isImm()) return false;
1283 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1284 if (!CE) return false;
1285
1286 uint64_t OriginalValue = CE->getValue();
1287 uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
1288 return isExpImmValue<shift>(InvertedValue);
1289 }
1290
1291 bool isPKHLSLImm() const {
1292 return isImmediate<0, 32>();
1293 }
1294
1295 bool isPKHASRImm() const {
1296 return isImmediate<0, 33>();
1297 }
1298
1299 bool isAdrLabel() const {
1300 // If we have an immediate that's not a constant, treat it as a label
1301 // reference needing a fixup.
1302 if (isImm() && !isa<MCConstantExpr>(getImm()))
1303 return true;
1304
1305 // If it is a constant, it must fit into a modified immediate encoding.
1306 if (!isImm()) return false;
1307 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308 if (!CE) return false;
1309 int64_t Value = CE->getValue();
1310 return (ARM_AM::getSOImmVal(Value) != -1 ||
1311 ARM_AM::getSOImmVal(-Value) != -1);
1312 }
1313
1314 bool isT2SOImm() const {
1315 // If we have an immediate that's not a constant, treat it as an expression
1316 // needing a fixup.
1317 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1318 // We want to avoid matching :upper16: and :lower16: as we want these
1319 // expressions to match in isImm0_65535Expr()
1320 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1321 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1322 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1323 }
1324 if (!isImm()) return false;
1325 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1326 if (!CE) return false;
1327 int64_t Value = CE->getValue();
1328 return ARM_AM::getT2SOImmVal(Value) != -1;
1329 }
1330
1331 bool isT2SOImmNot() const {
1332 if (!isImm()) return false;
1333 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1334 if (!CE) return false;
1335 int64_t Value = CE->getValue();
1336 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1337 ARM_AM::getT2SOImmVal(~Value) != -1;
1338 }
1339
1340 bool isT2SOImmNeg() const {
1341 if (!isImm()) return false;
1342 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1343 if (!CE) return false;
1344 int64_t Value = CE->getValue();
1345 // Only use this when not representable as a plain so_imm.
1346 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1347 ARM_AM::getT2SOImmVal(-Value) != -1;
1348 }
1349
1350 bool isSetEndImm() const {
1351 if (!isImm()) return false;
1352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1353 if (!CE) return false;
1354 int64_t Value = CE->getValue();
1355 return Value == 1 || Value == 0;
1356 }
1357
1358 bool isReg() const override { return Kind == k_Register; }
1359 bool isRegList() const { return Kind == k_RegisterList; }
1360 bool isRegListWithAPSR() const {
1361 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1362 }
1363 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1364 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1365 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1366 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1367 bool isToken() const override { return Kind == k_Token; }
1368 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1369 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1370 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1371 bool isMem() const override {
1372 return isGPRMem() || isMVEMem();
1373 }
1374 bool isMVEMem() const {
1375 if (Kind != k_Memory)
1376 return false;
1377 if (Memory.BaseRegNum &&
1378 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1379 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1380 return false;
1381 if (Memory.OffsetRegNum &&
1382 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1383 Memory.OffsetRegNum))
1384 return false;
1385 return true;
1386 }
1387 bool isGPRMem() const {
1388 if (Kind != k_Memory)
1389 return false;
1390 if (Memory.BaseRegNum &&
1391 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1392 return false;
1393 if (Memory.OffsetRegNum &&
1394 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1395 return false;
1396 return true;
1397 }
1398 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1399 bool isRegShiftedReg() const {
1400 return Kind == k_ShiftedRegister &&
1401 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1402 RegShiftedReg.SrcReg) &&
1403 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1404 RegShiftedReg.ShiftReg);
1405 }
1406 bool isRegShiftedImm() const {
1407 return Kind == k_ShiftedImmediate &&
1408 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1409 RegShiftedImm.SrcReg);
1410 }
1411 bool isRotImm() const { return Kind == k_RotateImmediate; }
1412
1413 template<unsigned Min, unsigned Max>
1414 bool isPowerTwoInRange() const {
1415 if (!isImm()) return false;
1416 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1417 if (!CE) return false;
1418 int64_t Value = CE->getValue();
1419 return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1420 Value <= Max;
1421 }
1422 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1423
1424 bool isModImmNot() const {
1425 if (!isImm()) return false;
1426 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427 if (!CE) return false;
1428 int64_t Value = CE->getValue();
1429 return ARM_AM::getSOImmVal(~Value) != -1;
1430 }
1431
1432 bool isModImmNeg() const {
1433 if (!isImm()) return false;
1434 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1435 if (!CE) return false;
1436 int64_t Value = CE->getValue();
1437 return ARM_AM::getSOImmVal(Value) == -1 &&
1438 ARM_AM::getSOImmVal(-Value) != -1;
1439 }
1440
1441 bool isThumbModImmNeg1_7() const {
1442 if (!isImm()) return false;
1443 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1444 if (!CE) return false;
1445 int32_t Value = -(int32_t)CE->getValue();
1446 return 0 < Value && Value < 8;
1447 }
1448
1449 bool isThumbModImmNeg8_255() const {
1450 if (!isImm()) return false;
1451 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1452 if (!CE) return false;
1453 int32_t Value = -(int32_t)CE->getValue();
1454 return 7 < Value && Value < 256;
1455 }
1456
1457 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1458 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1459 bool isPostIdxRegShifted() const {
1460 return Kind == k_PostIndexRegister &&
1461 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1462 }
1463 bool isPostIdxReg() const {
1464 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1465 }
1466 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1467 if (!isGPRMem())
1468 return false;
1469 // No offset of any kind.
1470 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1471 (alignOK || Memory.Alignment == Alignment);
1472 }
1473 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1474 if (!isGPRMem())
1475 return false;
1476
1477 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1478 Memory.BaseRegNum))
1479 return false;
1480
1481 // No offset of any kind.
1482 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1483 (alignOK || Memory.Alignment == Alignment);
1484 }
1485 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1486 if (!isGPRMem())
1487 return false;
1488
1489 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1490 Memory.BaseRegNum))
1491 return false;
1492
1493 // No offset of any kind.
1494 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1495 (alignOK || Memory.Alignment == Alignment);
1496 }
1497 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1498 if (!isGPRMem())
1499 return false;
1500
1501 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1502 Memory.BaseRegNum))
1503 return false;
1504
1505 // No offset of any kind.
1506 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1507 (alignOK || Memory.Alignment == Alignment);
1508 }
1509 bool isMemPCRelImm12() const {
1510 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1511 return false;
1512 // Base register must be PC.
1513 if (Memory.BaseRegNum != ARM::PC)
1514 return false;
1515 // Immediate offset in range [-4095, 4095].
1516 if (!Memory.OffsetImm) return true;
1517 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1518 int64_t Val = CE->getValue();
1519 return (Val > -4096 && Val < 4096) ||
1520 (Val == std::numeric_limits<int32_t>::min());
1521 }
1522 return false;
1523 }
1524
1525 bool isAlignedMemory() const {
1526 return isMemNoOffset(true);
1527 }
1528
1529 bool isAlignedMemoryNone() const {
1530 return isMemNoOffset(false, 0);
1531 }
1532
1533 bool isDupAlignedMemoryNone() const {
1534 return isMemNoOffset(false, 0);
1535 }
1536
1537 bool isAlignedMemory16() const {
1538 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1539 return true;
1540 return isMemNoOffset(false, 0);
1541 }
1542
1543 bool isDupAlignedMemory16() const {
1544 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1545 return true;
1546 return isMemNoOffset(false, 0);
1547 }
1548
1549 bool isAlignedMemory32() const {
1550 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1551 return true;
1552 return isMemNoOffset(false, 0);
1553 }
1554
1555 bool isDupAlignedMemory32() const {
1556 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1557 return true;
1558 return isMemNoOffset(false, 0);
1559 }
1560
1561 bool isAlignedMemory64() const {
1562 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1563 return true;
1564 return isMemNoOffset(false, 0);
1565 }
1566
1567 bool isDupAlignedMemory64() const {
1568 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1569 return true;
1570 return isMemNoOffset(false, 0);
1571 }
1572
1573 bool isAlignedMemory64or128() const {
1574 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1575 return true;
1576 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1577 return true;
1578 return isMemNoOffset(false, 0);
1579 }
1580
1581 bool isDupAlignedMemory64or128() const {
1582 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1583 return true;
1584 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1585 return true;
1586 return isMemNoOffset(false, 0);
1587 }
1588
1589 bool isAlignedMemory64or128or256() const {
1590 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1591 return true;
1592 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1593 return true;
1594 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1595 return true;
1596 return isMemNoOffset(false, 0);
1597 }
1598
1599 bool isAddrMode2() const {
1600 if (!isGPRMem() || Memory.Alignment != 0) return false;
1601 // Check for register offset.
1602 if (Memory.OffsetRegNum) return true;
1603 // Immediate offset in range [-4095, 4095].
1604 if (!Memory.OffsetImm) return true;
1605 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1606 int64_t Val = CE->getValue();
1607 return Val > -4096 && Val < 4096;
1608 }
1609 return false;
1610 }
1611
1612 bool isAM2OffsetImm() const {
1613 if (!isImm()) return false;
1614 // Immediate offset in range [-4095, 4095].
1615 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1616 if (!CE) return false;
1617 int64_t Val = CE->getValue();
1618 return (Val == std::numeric_limits<int32_t>::min()) ||
1619 (Val > -4096 && Val < 4096);
1620 }
1621
1622 bool isAddrMode3() const {
1623 // If we have an immediate that's not a constant, treat it as a label
1624 // reference needing a fixup. If it is a constant, it's something else
1625 // and we reject it.
1626 if (isImm() && !isa<MCConstantExpr>(getImm()))
1627 return true;
1628 if (!isGPRMem() || Memory.Alignment != 0) return false;
1629 // No shifts are legal for AM3.
1630 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1631 // Check for register offset.
1632 if (Memory.OffsetRegNum) return true;
1633 // Immediate offset in range [-255, 255].
1634 if (!Memory.OffsetImm) return true;
1635 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1636 int64_t Val = CE->getValue();
1637 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1638 // we have to check for this too.
1639 return (Val > -256 && Val < 256) ||
1640 Val == std::numeric_limits<int32_t>::min();
1641 }
1642 return false;
1643 }
1644
1645 bool isAM3Offset() const {
1646 if (isPostIdxReg())
1647 return true;
1648 if (!isImm())
1649 return false;
1650 // Immediate offset in range [-255, 255].
1651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1652 if (!CE) return false;
1653 int64_t Val = CE->getValue();
1654 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1655 return (Val > -256 && Val < 256) ||
1656 Val == std::numeric_limits<int32_t>::min();
1657 }
1658
1659 bool isAddrMode5() const {
1660 // If we have an immediate that's not a constant, treat it as a label
1661 // reference needing a fixup. If it is a constant, it's something else
1662 // and we reject it.
1663 if (isImm() && !isa<MCConstantExpr>(getImm()))
1664 return true;
1665 if (!isGPRMem() || Memory.Alignment != 0) return false;
1666 // Check for register offset.
1667 if (Memory.OffsetRegNum) return false;
1668 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1669 if (!Memory.OffsetImm) return true;
1670 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1671 int64_t Val = CE->getValue();
1672 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1673 Val == std::numeric_limits<int32_t>::min();
1674 }
1675 return false;
1676 }
1677
1678 bool isAddrMode5FP16() const {
1679 // If we have an immediate that's not a constant, treat it as a label
1680 // reference needing a fixup. If it is a constant, it's something else
1681 // and we reject it.
1682 if (isImm() && !isa<MCConstantExpr>(getImm()))
1683 return true;
1684 if (!isGPRMem() || Memory.Alignment != 0) return false;
1685 // Check for register offset.
1686 if (Memory.OffsetRegNum) return false;
1687 // Immediate offset in range [-510, 510] and a multiple of 2.
1688 if (!Memory.OffsetImm) return true;
1689 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1690 int64_t Val = CE->getValue();
1691 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1692 Val == std::numeric_limits<int32_t>::min();
1693 }
1694 return false;
1695 }
1696
1697 bool isMemTBB() const {
1698 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1699 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1700 return false;
1701 return true;
1702 }
1703
1704 bool isMemTBH() const {
1705 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1706 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1707 Memory.Alignment != 0 )
1708 return false;
1709 return true;
1710 }
1711
1712 bool isMemRegOffset() const {
1713 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1714 return false;
1715 return true;
1716 }
1717
1718 bool isT2MemRegOffset() const {
1719 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1720 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1721 return false;
1722 // Only lsl #{0, 1, 2, 3} allowed.
1723 if (Memory.ShiftType == ARM_AM::no_shift)
1724 return true;
1725 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1726 return false;
1727 return true;
1728 }
1729
1730 bool isMemThumbRR() const {
1731 // Thumb reg+reg addressing is simple. Just two registers, a base and
1732 // an offset. No shifts, negations or any other complicating factors.
1733 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1734 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1735 return false;
1736 return isARMLowRegister(Memory.BaseRegNum) &&
1737 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1738 }
1739
1740 bool isMemThumbRIs4() const {
1741 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1742 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1743 return false;
1744 // Immediate offset, multiple of 4 in range [0, 124].
1745 if (!Memory.OffsetImm) return true;
1746 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1747 int64_t Val = CE->getValue();
1748 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1749 }
1750 return false;
1751 }
1752
1753 bool isMemThumbRIs2() const {
1754 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1755 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1756 return false;
1757 // Immediate offset, multiple of 4 in range [0, 62].
1758 if (!Memory.OffsetImm) return true;
1759 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1760 int64_t Val = CE->getValue();
1761 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1762 }
1763 return false;
1764 }
1765
1766 bool isMemThumbRIs1() const {
1767 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1768 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1769 return false;
1770 // Immediate offset in range [0, 31].
1771 if (!Memory.OffsetImm) return true;
1772 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1773 int64_t Val = CE->getValue();
1774 return Val >= 0 && Val <= 31;
1775 }
1776 return false;
1777 }
1778
1779 bool isMemThumbSPI() const {
1780 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1781 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1782 return false;
1783 // Immediate offset, multiple of 4 in range [0, 1020].
1784 if (!Memory.OffsetImm) return true;
1785 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1786 int64_t Val = CE->getValue();
1787 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1788 }
1789 return false;
1790 }
1791
1792 bool isMemImm8s4Offset() const {
1793 // If we have an immediate that's not a constant, treat it as a label
1794 // reference needing a fixup. If it is a constant, it's something else
1795 // and we reject it.
1796 if (isImm() && !isa<MCConstantExpr>(getImm()))
1797 return true;
1798 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1799 return false;
1800 // Immediate offset a multiple of 4 in range [-1020, 1020].
1801 if (!Memory.OffsetImm) return true;
1802 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1803 int64_t Val = CE->getValue();
1804 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1805 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1806 Val == std::numeric_limits<int32_t>::min();
1807 }
1808 return false;
1809 }
1810
1811 bool isMemImm7s4Offset() const {
1812 // If we have an immediate that's not a constant, treat it as a label
1813 // reference needing a fixup. If it is a constant, it's something else
1814 // and we reject it.
1815 if (isImm() && !isa<MCConstantExpr>(getImm()))
1816 return true;
1817 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1818 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1819 Memory.BaseRegNum))
1820 return false;
1821 // Immediate offset a multiple of 4 in range [-508, 508].
1822 if (!Memory.OffsetImm) return true;
1823 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1824 int64_t Val = CE->getValue();
1825 // Special case, #-0 is INT32_MIN.
1826 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN(-2147483647-1);
1827 }
1828 return false;
1829 }
1830
1831 bool isMemImm0_1020s4Offset() const {
1832 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1833 return false;
1834 // Immediate offset a multiple of 4 in range [0, 1020].
1835 if (!Memory.OffsetImm) return true;
1836 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1837 int64_t Val = CE->getValue();
1838 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1839 }
1840 return false;
1841 }
1842
1843 bool isMemImm8Offset() const {
1844 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1845 return false;
1846 // Base reg of PC isn't allowed for these encodings.
1847 if (Memory.BaseRegNum == ARM::PC) return false;
1848 // Immediate offset in range [-255, 255].
1849 if (!Memory.OffsetImm) return true;
1850 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1851 int64_t Val = CE->getValue();
1852 return (Val == std::numeric_limits<int32_t>::min()) ||
1853 (Val > -256 && Val < 256);
1854 }
1855 return false;
1856 }
1857
1858 template<unsigned Bits, unsigned RegClassID>
1859 bool isMemImm7ShiftedOffset() const {
1860 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1861 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1862 return false;
1863
1864 // Expect an immediate offset equal to an element of the range
1865 // [-127, 127], shifted left by Bits.
1866
1867 if (!Memory.OffsetImm) return true;
1868 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1869 int64_t Val = CE->getValue();
1870
1871 // INT32_MIN is a special-case value (indicating the encoding with
1872 // zero offset and the subtract bit set)
1873 if (Val == INT32_MIN(-2147483647-1))
1874 return true;
1875
1876 unsigned Divisor = 1U << Bits;
1877
1878 // Check that the low bits are zero
1879 if (Val % Divisor != 0)
1880 return false;
1881
1882 // Check that the remaining offset is within range.
1883 Val /= Divisor;
1884 return (Val >= -127 && Val <= 127);
1885 }
1886 return false;
1887 }
1888
1889 template <int shift> bool isMemRegRQOffset() const {
1890 if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1891 return false;
1892
1893 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1894 Memory.BaseRegNum))
1895 return false;
1896 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1897 Memory.OffsetRegNum))
1898 return false;
1899
1900 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1901 return false;
1902
1903 if (shift > 0 &&
1904 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1905 return false;
1906
1907 return true;
1908 }
1909
1910 template <int shift> bool isMemRegQOffset() const {
1911 if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1912 return false;
1913
1914 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1915 Memory.BaseRegNum))
1916 return false;
1917
1918 if (!Memory.OffsetImm)
1919 return true;
1920 static_assert(shift < 56,
1921 "Such that we dont shift by a value higher than 62");
1922 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1923 int64_t Val = CE->getValue();
1924
1925 // The value must be a multiple of (1 << shift)
1926 if ((Val & ((1U << shift) - 1)) != 0)
1927 return false;
1928
1929 // And be in the right range, depending on the amount that it is shifted
1930 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1931 // separately.
1932 int64_t Range = (1U << (7 + shift)) - 1;
1933 return (Val == INT32_MIN(-2147483647-1)) || (Val > -Range && Val < Range);
1934 }
1935 return false;
1936 }
1937
1938 bool isMemPosImm8Offset() const {
1939 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1940 return false;
1941 // Immediate offset in range [0, 255].
1942 if (!Memory.OffsetImm) return true;
1943 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1944 int64_t Val = CE->getValue();
1945 return Val >= 0 && Val < 256;
1946 }
1947 return false;
1948 }
1949
1950 bool isMemNegImm8Offset() const {
1951 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1952 return false;
1953 // Base reg of PC isn't allowed for these encodings.
1954 if (Memory.BaseRegNum == ARM::PC) return false;
1955 // Immediate offset in range [-255, -1].
1956 if (!Memory.OffsetImm) return false;
1957 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1958 int64_t Val = CE->getValue();
1959 return (Val == std::numeric_limits<int32_t>::min()) ||
1960 (Val > -256 && Val < 0);
1961 }
1962 return false;
1963 }
1964
1965 bool isMemUImm12Offset() const {
1966 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1967 return false;
1968 // Immediate offset in range [0, 4095].
1969 if (!Memory.OffsetImm) return true;
1970 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1971 int64_t Val = CE->getValue();
1972 return (Val >= 0 && Val < 4096);
1973 }
1974 return false;
1975 }
1976
1977 bool isMemImm12Offset() const {
1978 // If we have an immediate that's not a constant, treat it as a label
1979 // reference needing a fixup. If it is a constant, it's something else
1980 // and we reject it.
1981
1982 if (isImm() && !isa<MCConstantExpr>(getImm()))
1983 return true;
1984
1985 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1986 return false;
1987 // Immediate offset in range [-4095, 4095].
1988 if (!Memory.OffsetImm) return true;
1989 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1990 int64_t Val = CE->getValue();
1991 return (Val > -4096 && Val < 4096) ||
1992 (Val == std::numeric_limits<int32_t>::min());
1993 }
1994 // If we have an immediate that's not a constant, treat it as a
1995 // symbolic expression needing a fixup.
1996 return true;
1997 }
1998
1999 bool isConstPoolAsmImm() const {
2000 // Delay processing of Constant Pool Immediate, this will turn into
2001 // a constant. Match no other operand
2002 return (isConstantPoolImm());
2003 }
2004
2005 bool isPostIdxImm8() const {
2006 if (!isImm()) return false;
2007 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2008 if (!CE) return false;
2009 int64_t Val = CE->getValue();
2010 return (Val > -256 && Val < 256) ||
2011 (Val == std::numeric_limits<int32_t>::min());
2012 }
2013
2014 bool isPostIdxImm8s4() const {
2015 if (!isImm()) return false;
2016 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2017 if (!CE) return false;
2018 int64_t Val = CE->getValue();
2019 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2020 (Val == std::numeric_limits<int32_t>::min());
2021 }
2022
2023 bool isMSRMask() const { return Kind == k_MSRMask; }
2024 bool isBankedReg() const { return Kind == k_BankedReg; }
2025 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2026
2027 // NEON operands.
2028 bool isSingleSpacedVectorList() const {
2029 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2030 }
2031
2032 bool isDoubleSpacedVectorList() const {
2033 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2034 }
2035
2036 bool isVecListOneD() const {
2037 if (!isSingleSpacedVectorList()) return false;
2038 return VectorList.Count == 1;
2039 }
2040
2041 bool isVecListTwoMQ() const {
2042 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2043 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2044 VectorList.RegNum);
2045 }
2046
2047 bool isVecListDPair() const {
2048 if (!isSingleSpacedVectorList()) return false;
2049 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2050 .contains(VectorList.RegNum));
2051 }
2052
2053 bool isVecListThreeD() const {
2054 if (!isSingleSpacedVectorList()) return false;
2055 return VectorList.Count == 3;
2056 }
2057
2058 bool isVecListFourD() const {
2059 if (!isSingleSpacedVectorList()) return false;
2060 return VectorList.Count == 4;
2061 }
2062
2063 bool isVecListDPairSpaced() const {
2064 if (Kind != k_VectorList) return false;
2065 if (isSingleSpacedVectorList()) return false;
2066 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2067 .contains(VectorList.RegNum));
2068 }
2069
2070 bool isVecListThreeQ() const {
2071 if (!isDoubleSpacedVectorList()) return false;
2072 return VectorList.Count == 3;
2073 }
2074
2075 bool isVecListFourQ() const {
2076 if (!isDoubleSpacedVectorList()) return false;
2077 return VectorList.Count == 4;
2078 }
2079
2080 bool isVecListFourMQ() const {
2081 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2082 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2083 VectorList.RegNum);
2084 }
2085
2086 bool isSingleSpacedVectorAllLanes() const {
2087 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2088 }
2089
2090 bool isDoubleSpacedVectorAllLanes() const {
2091 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2092 }
2093
2094 bool isVecListOneDAllLanes() const {
2095 if (!isSingleSpacedVectorAllLanes()) return false;
2096 return VectorList.Count == 1;
2097 }
2098
2099 bool isVecListDPairAllLanes() const {
2100 if (!isSingleSpacedVectorAllLanes()) return false;
2101 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2102 .contains(VectorList.RegNum));
2103 }
2104
2105 bool isVecListDPairSpacedAllLanes() const {
2106 if (!isDoubleSpacedVectorAllLanes()) return false;
2107 return VectorList.Count == 2;
2108 }
2109
2110 bool isVecListThreeDAllLanes() const {
2111 if (!isSingleSpacedVectorAllLanes()) return false;
2112 return VectorList.Count == 3;
2113 }
2114
2115 bool isVecListThreeQAllLanes() const {
2116 if (!isDoubleSpacedVectorAllLanes()) return false;
2117 return VectorList.Count == 3;
2118 }
2119
2120 bool isVecListFourDAllLanes() const {
2121 if (!isSingleSpacedVectorAllLanes()) return false;
2122 return VectorList.Count == 4;
2123 }
2124
2125 bool isVecListFourQAllLanes() const {
2126 if (!isDoubleSpacedVectorAllLanes()) return false;
2127 return VectorList.Count == 4;
2128 }
2129
2130 bool isSingleSpacedVectorIndexed() const {
2131 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2132 }
2133
2134 bool isDoubleSpacedVectorIndexed() const {
2135 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2136 }
2137
2138 bool isVecListOneDByteIndexed() const {
2139 if (!isSingleSpacedVectorIndexed()) return false;
2140 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2141 }
2142
2143 bool isVecListOneDHWordIndexed() const {
2144 if (!isSingleSpacedVectorIndexed()) return false;
2145 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2146 }
2147
2148 bool isVecListOneDWordIndexed() const {
2149 if (!isSingleSpacedVectorIndexed()) return false;
2150 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2151 }
2152
2153 bool isVecListTwoDByteIndexed() const {
2154 if (!isSingleSpacedVectorIndexed()) return false;
2155 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2156 }
2157
2158 bool isVecListTwoDHWordIndexed() const {
2159 if (!isSingleSpacedVectorIndexed()) return false;
2160 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2161 }
2162
2163 bool isVecListTwoQWordIndexed() const {
2164 if (!isDoubleSpacedVectorIndexed()) return false;
2165 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2166 }
2167
2168 bool isVecListTwoQHWordIndexed() const {
2169 if (!isDoubleSpacedVectorIndexed()) return false;
2170 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2171 }
2172
2173 bool isVecListTwoDWordIndexed() const {
2174 if (!isSingleSpacedVectorIndexed()) return false;
2175 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2176 }
2177
2178 bool isVecListThreeDByteIndexed() const {
2179 if (!isSingleSpacedVectorIndexed()) return false;
2180 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2181 }
2182
2183 bool isVecListThreeDHWordIndexed() const {
2184 if (!isSingleSpacedVectorIndexed()) return false;
2185 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2186 }
2187
2188 bool isVecListThreeQWordIndexed() const {
2189 if (!isDoubleSpacedVectorIndexed()) return false;
2190 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2191 }
2192
2193 bool isVecListThreeQHWordIndexed() const {
2194 if (!isDoubleSpacedVectorIndexed()) return false;
2195 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2196 }
2197
2198 bool isVecListThreeDWordIndexed() const {
2199 if (!isSingleSpacedVectorIndexed()) return false;
2200 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2201 }
2202
2203 bool isVecListFourDByteIndexed() const {
2204 if (!isSingleSpacedVectorIndexed()) return false;
2205 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2206 }
2207
2208 bool isVecListFourDHWordIndexed() const {
2209 if (!isSingleSpacedVectorIndexed()) return false;
2210 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2211 }
2212
2213 bool isVecListFourQWordIndexed() const {
2214 if (!isDoubleSpacedVectorIndexed()) return false;
2215 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2216 }
2217
2218 bool isVecListFourQHWordIndexed() const {
2219 if (!isDoubleSpacedVectorIndexed()) return false;
2220 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2221 }
2222
2223 bool isVecListFourDWordIndexed() const {
2224 if (!isSingleSpacedVectorIndexed()) return false;
2225 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2226 }
2227
2228 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2229
2230 template <unsigned NumLanes>
2231 bool isVectorIndexInRange() const {
2232 if (Kind != k_VectorIndex) return false;
2233 return VectorIndex.Val < NumLanes;
2234 }
2235
2236 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2237 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2238 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2239 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2240
2241 template<int PermittedValue, int OtherPermittedValue>
2242 bool isMVEPairVectorIndex() const {
2243 if (Kind != k_VectorIndex) return false;
2244 return VectorIndex.Val == PermittedValue ||
2245 VectorIndex.Val == OtherPermittedValue;
2246 }
2247
2248 bool isNEONi8splat() const {
2249 if (!isImm()) return false;
2250 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2251 // Must be a constant.
2252 if (!CE) return false;
2253 int64_t Value = CE->getValue();
2254 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2255 // value.
2256 return Value >= 0 && Value < 256;
2257 }
2258
2259 bool isNEONi16splat() const {
2260 if (isNEONByteReplicate(2))
2261 return false; // Leave that for bytes replication and forbid by default.
2262 if (!isImm())
2263 return false;
2264 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2265 // Must be a constant.
2266 if (!CE) return false;
2267 unsigned Value = CE->getValue();
2268 return ARM_AM::isNEONi16splat(Value);
2269 }
2270
2271 bool isNEONi16splatNot() const {
2272 if (!isImm())
2273 return false;
2274 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2275 // Must be a constant.
2276 if (!CE) return false;
2277 unsigned Value = CE->getValue();
2278 return ARM_AM::isNEONi16splat(~Value & 0xffff);
2279 }
2280
2281 bool isNEONi32splat() const {
2282 if (isNEONByteReplicate(4))
2283 return false; // Leave that for bytes replication and forbid by default.
2284 if (!isImm())
2285 return false;
2286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2287 // Must be a constant.
2288 if (!CE) return false;
2289 unsigned Value = CE->getValue();
2290 return ARM_AM::isNEONi32splat(Value);
2291 }
2292
2293 bool isNEONi32splatNot() const {
2294 if (!isImm())
2295 return false;
2296 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2297 // Must be a constant.
2298 if (!CE) return false;
2299 unsigned Value = CE->getValue();
2300 return ARM_AM::isNEONi32splat(~Value);
2301 }
2302
2303 static bool isValidNEONi32vmovImm(int64_t Value) {
2304 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2305 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2306 return ((Value & 0xffffffffffffff00) == 0) ||
2307 ((Value & 0xffffffffffff00ff) == 0) ||
2308 ((Value & 0xffffffffff00ffff) == 0) ||
2309 ((Value & 0xffffffff00ffffff) == 0) ||
2310 ((Value & 0xffffffffffff00ff) == 0xff) ||
2311 ((Value & 0xffffffffff00ffff) == 0xffff);
2312 }
2313
2314 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2315 assert((Width == 8 || Width == 16 || Width == 32) &&(static_cast <bool> ((Width == 8 || Width == 16 || Width
== 32) && "Invalid element width") ? void (0) : __assert_fail
("(Width == 8 || Width == 16 || Width == 32) && \"Invalid element width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2316, __extension__
__PRETTY_FUNCTION__))
2316 "Invalid element width")(static_cast <bool> ((Width == 8 || Width == 16 || Width
== 32) && "Invalid element width") ? void (0) : __assert_fail
("(Width == 8 || Width == 16 || Width == 32) && \"Invalid element width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2316, __extension__
__PRETTY_FUNCTION__))
;
2317 assert(NumElems * Width <= 64 && "Invalid result width")(static_cast <bool> (NumElems * Width <= 64 &&
"Invalid result width") ? void (0) : __assert_fail ("NumElems * Width <= 64 && \"Invalid result width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2317, __extension__
__PRETTY_FUNCTION__))
;
2318
2319 if (!isImm())
2320 return false;
2321 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2322 // Must be a constant.
2323 if (!CE)
2324 return false;
2325 int64_t Value = CE->getValue();
2326 if (!Value)
2327 return false; // Don't bother with zero.
2328 if (Inv)
2329 Value = ~Value;
2330
2331 uint64_t Mask = (1ull << Width) - 1;
2332 uint64_t Elem = Value & Mask;
2333 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2334 return false;
2335 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2336 return false;
2337
2338 for (unsigned i = 1; i < NumElems; ++i) {
2339 Value >>= Width;
2340 if ((Value & Mask) != Elem)
2341 return false;
2342 }
2343 return true;
2344 }
2345
2346 bool isNEONByteReplicate(unsigned NumBytes) const {
2347 return isNEONReplicate(8, NumBytes, false);
2348 }
2349
2350 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2351 assert((FromW == 8 || FromW == 16 || FromW == 32) &&(static_cast <bool> ((FromW == 8 || FromW == 16 || FromW
== 32) && "Invalid source width") ? void (0) : __assert_fail
("(FromW == 8 || FromW == 16 || FromW == 32) && \"Invalid source width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2352, __extension__
__PRETTY_FUNCTION__))
2352 "Invalid source width")(static_cast <bool> ((FromW == 8 || FromW == 16 || FromW
== 32) && "Invalid source width") ? void (0) : __assert_fail
("(FromW == 8 || FromW == 16 || FromW == 32) && \"Invalid source width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2352, __extension__
__PRETTY_FUNCTION__))
;
2353 assert((ToW == 16 || ToW == 32 || ToW == 64) &&(static_cast <bool> ((ToW == 16 || ToW == 32 || ToW == 64
) && "Invalid destination width") ? void (0) : __assert_fail
("(ToW == 16 || ToW == 32 || ToW == 64) && \"Invalid destination width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2354, __extension__
__PRETTY_FUNCTION__))
2354 "Invalid destination width")(static_cast <bool> ((ToW == 16 || ToW == 32 || ToW == 64
) && "Invalid destination width") ? void (0) : __assert_fail
("(ToW == 16 || ToW == 32 || ToW == 64) && \"Invalid destination width\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2354, __extension__
__PRETTY_FUNCTION__))
;
2355 assert(FromW < ToW && "ToW is not less than FromW")(static_cast <bool> (FromW < ToW && "ToW is not less than FromW"
) ? void (0) : __assert_fail ("FromW < ToW && \"ToW is not less than FromW\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2355, __extension__
__PRETTY_FUNCTION__))
;
2356 }
2357
2358 template<unsigned FromW, unsigned ToW>
2359 bool isNEONmovReplicate() const {
2360 checkNeonReplicateArgs(FromW, ToW);
2361 if (ToW == 64 && isNEONi64splat())
2362 return false;
2363 return isNEONReplicate(FromW, ToW / FromW, false);
2364 }
2365
2366 template<unsigned FromW, unsigned ToW>
2367 bool isNEONinvReplicate() const {
2368 checkNeonReplicateArgs(FromW, ToW);
2369 return isNEONReplicate(FromW, ToW / FromW, true);
2370 }
2371
2372 bool isNEONi32vmov() const {
2373 if (isNEONByteReplicate(4))
2374 return false; // Let it to be classified as byte-replicate case.
2375 if (!isImm())
2376 return false;
2377 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2378 // Must be a constant.
2379 if (!CE)
2380 return false;
2381 return isValidNEONi32vmovImm(CE->getValue());
2382 }
2383
2384 bool isNEONi32vmovNeg() const {
2385 if (!isImm()) return false;
2386 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2387 // Must be a constant.
2388 if (!CE) return false;
2389 return isValidNEONi32vmovImm(~CE->getValue());
2390 }
2391
2392 bool isNEONi64splat() const {
2393 if (!isImm()) return false;
2394 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2395 // Must be a constant.
2396 if (!CE) return false;
2397 uint64_t Value = CE->getValue();
2398 // i64 value with each byte being either 0 or 0xff.
2399 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2400 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2401 return true;
2402 }
2403
2404 template<int64_t Angle, int64_t Remainder>
2405 bool isComplexRotation() const {
2406 if (!isImm()) return false;
2407
2408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2409 if (!CE) return false;
2410 uint64_t Value = CE->getValue();
2411
2412 return (Value % Angle == Remainder && Value <= 270);
2413 }
2414
2415 bool isMVELongShift() const {
2416 if (!isImm()) return false;
2417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2418 // Must be a constant.
2419 if (!CE) return false;
2420 uint64_t Value = CE->getValue();
2421 return Value >= 1 && Value <= 32;
2422 }
2423
2424 bool isMveSaturateOp() const {
2425 if (!isImm()) return false;
2426 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2427 if (!CE) return false;
2428 uint64_t Value = CE->getValue();
2429 return Value == 48 || Value == 64;
2430 }
2431
2432 bool isITCondCodeNoAL() const {
2433 if (!isITCondCode()) return false;
2434 ARMCC::CondCodes CC = getCondCode();
2435 return CC != ARMCC::AL;
2436 }
2437
2438 bool isITCondCodeRestrictedI() const {
2439 if (!isITCondCode())
2440 return false;
2441 ARMCC::CondCodes CC = getCondCode();
2442 return CC == ARMCC::EQ || CC == ARMCC::NE;
2443 }
2444
2445 bool isITCondCodeRestrictedS() const {
2446 if (!isITCondCode())
2447 return false;
2448 ARMCC::CondCodes CC = getCondCode();
2449 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2450 CC == ARMCC::GE;
2451 }
2452
2453 bool isITCondCodeRestrictedU() const {
2454 if (!isITCondCode())
2455 return false;
2456 ARMCC::CondCodes CC = getCondCode();
2457 return CC == ARMCC::HS || CC == ARMCC::HI;
2458 }
2459
2460 bool isITCondCodeRestrictedFP() const {
2461 if (!isITCondCode())
2462 return false;
2463 ARMCC::CondCodes CC = getCondCode();
2464 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2465 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2466 }
2467
2468 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2469 // Add as immediates when possible. Null MCExpr = 0.
2470 if (!Expr)
2471 Inst.addOperand(MCOperand::createImm(0));
2472 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2473 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2474 else
2475 Inst.addOperand(MCOperand::createExpr(Expr));
2476 }
2477
2478 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2479 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2479, __extension__
__PRETTY_FUNCTION__))
;
2480 addExpr(Inst, getImm());
2481 }
2482
2483 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2484 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2484, __extension__
__PRETTY_FUNCTION__))
;
2485 addExpr(Inst, getImm());
2486 }
2487
2488 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2489 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2489, __extension__
__PRETTY_FUNCTION__))
;
2490 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2491 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2492 Inst.addOperand(MCOperand::createReg(RegNum));
2493 }
2494
2495 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2496 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2496, __extension__
__PRETTY_FUNCTION__))
;
2497 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2498 unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2499 Inst.addOperand(MCOperand::createReg(RegNum));
2500 Inst.addOperand(MCOperand::createReg(0));
2501 }
2502
2503 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2504 assert(N == 4 && "Invalid number of operands!")(static_cast <bool> (N == 4 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 4 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2504, __extension__
__PRETTY_FUNCTION__))
;
2505 addVPTPredNOperands(Inst, N-1);
2506 unsigned RegNum;
2507 if (getVPTPred() == ARMVCC::None) {
2508 RegNum = 0;
2509 } else {
2510 unsigned NextOpIndex = Inst.getNumOperands();
2511 const MCInstrDesc &MCID =
2512 ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
2513 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2514 assert(TiedOp >= 0 &&(static_cast <bool> (TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!"
) ? void (0) : __assert_fail ("TiedOp >= 0 && \"Inactive register in vpred_r is not tied to an output!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2515, __extension__
__PRETTY_FUNCTION__))
2515 "Inactive register in vpred_r is not tied to an output!")(static_cast <bool> (TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!"
) ? void (0) : __assert_fail ("TiedOp >= 0 && \"Inactive register in vpred_r is not tied to an output!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2515, __extension__
__PRETTY_FUNCTION__))
;
2516 RegNum = Inst.getOperand(TiedOp).getReg();
2517 }
2518 Inst.addOperand(MCOperand::createReg(RegNum));
2519 }
2520
2521 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2522 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2522, __extension__
__PRETTY_FUNCTION__))
;
2523 Inst.addOperand(MCOperand::createImm(getCoproc()));
2524 }
2525
2526 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2527 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2527, __extension__
__PRETTY_FUNCTION__))
;
2528 Inst.addOperand(MCOperand::createImm(getCoproc()));
2529 }
2530
2531 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2532 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2532, __extension__
__PRETTY_FUNCTION__))
;
2533 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2534 }
2535
2536 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2537 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2537, __extension__
__PRETTY_FUNCTION__))
;
2538 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2539 }
2540
2541 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2542 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2542, __extension__
__PRETTY_FUNCTION__))
;
2543 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2544 }
2545
2546 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2547 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2547, __extension__
__PRETTY_FUNCTION__))
;
2548 Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2549 }
2550
2551 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2552 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2552, __extension__
__PRETTY_FUNCTION__))
;
2553 Inst.addOperand(MCOperand::createReg(getReg()));
2554 }
2555
2556 void addRegOperands(MCInst &Inst, unsigned N) const {
2557 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2557, __extension__
__PRETTY_FUNCTION__))
;
2558 Inst.addOperand(MCOperand::createReg(getReg()));
2559 }
2560
2561 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2562 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2562, __extension__
__PRETTY_FUNCTION__))
;
2563 assert(isRegShiftedReg() &&(static_cast <bool> (isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!"
) ? void (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2564, __extension__
__PRETTY_FUNCTION__))
2564 "addRegShiftedRegOperands() on non-RegShiftedReg!")(static_cast <bool> (isRegShiftedReg() && "addRegShiftedRegOperands() on non-RegShiftedReg!"
) ? void (0) : __assert_fail ("isRegShiftedReg() && \"addRegShiftedRegOperands() on non-RegShiftedReg!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2564, __extension__
__PRETTY_FUNCTION__))
;
2565 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2566 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2567 Inst.addOperand(MCOperand::createImm(
2568 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2569 }
2570
2571 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2572 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2572, __extension__
__PRETTY_FUNCTION__))
;
2573 assert(isRegShiftedImm() &&(static_cast <bool> (isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!"
) ? void (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2574, __extension__
__PRETTY_FUNCTION__))
2574 "addRegShiftedImmOperands() on non-RegShiftedImm!")(static_cast <bool> (isRegShiftedImm() && "addRegShiftedImmOperands() on non-RegShiftedImm!"
) ? void (0) : __assert_fail ("isRegShiftedImm() && \"addRegShiftedImmOperands() on non-RegShiftedImm!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2574, __extension__
__PRETTY_FUNCTION__))
;
2575 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2576 // Shift of #32 is encoded as 0 where permitted
2577 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2578 Inst.addOperand(MCOperand::createImm(
2579 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2580 }
2581
2582 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2583 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2583, __extension__
__PRETTY_FUNCTION__))
;
2584 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2585 ShifterImm.Imm));
2586 }
2587
2588 void addRegListOperands(MCInst &Inst, unsigned N) const {
2589 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2589, __extension__
__PRETTY_FUNCTION__))
;
2590 const SmallVectorImpl<unsigned> &RegList = getRegList();
2591 for (unsigned Reg : RegList)
2592 Inst.addOperand(MCOperand::createReg(Reg));
2593 }
2594
2595 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2596 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2596, __extension__
__PRETTY_FUNCTION__))
;
2597 const SmallVectorImpl<unsigned> &RegList = getRegList();
2598 for (unsigned Reg : RegList)
2599 Inst.addOperand(MCOperand::createReg(Reg));
2600 }
2601
2602 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2603 addRegListOperands(Inst, N);
2604 }
2605
2606 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2607 addRegListOperands(Inst, N);
2608 }
2609
2610 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2611 addRegListOperands(Inst, N);
2612 }
2613
2614 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2615 addRegListOperands(Inst, N);
2616 }
2617
2618 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2619 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2619, __extension__
__PRETTY_FUNCTION__))
;
2620 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2621 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2622 }
2623
2624 void addModImmOperands(MCInst &Inst, unsigned N) const {
2625 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2625, __extension__
__PRETTY_FUNCTION__))
;
2626
2627 // Support for fixups (MCFixup)
2628 if (isImm())
2629 return addImmOperands(Inst, N);
2630
2631 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2632 }
2633
2634 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2635 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2635, __extension__
__PRETTY_FUNCTION__))
;
2636 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2637 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2638 Inst.addOperand(MCOperand::createImm(Enc));
2639 }
2640
2641 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2642 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2642, __extension__
__PRETTY_FUNCTION__))
;
2643 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2644 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2645 Inst.addOperand(MCOperand::createImm(Enc));
2646 }
2647
2648 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2649 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2649, __extension__
__PRETTY_FUNCTION__))
;
2650 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2651 uint32_t Val = -CE->getValue();
2652 Inst.addOperand(MCOperand::createImm(Val));
2653 }
2654
2655 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2656 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2656, __extension__
__PRETTY_FUNCTION__))
;
2657 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2658 uint32_t Val = -CE->getValue();
2659 Inst.addOperand(MCOperand::createImm(Val));
2660 }
2661
2662 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2663 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2663, __extension__
__PRETTY_FUNCTION__))
;
2664 // Munge the lsb/width into a bitfield mask.
2665 unsigned lsb = Bitfield.LSB;
2666 unsigned width = Bitfield.Width;
2667 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2668 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2669 (32 - (lsb + width)));
2670 Inst.addOperand(MCOperand::createImm(Mask));
2671 }
2672
2673 void addImmOperands(MCInst &Inst, unsigned N) const {
2674 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2674, __extension__
__PRETTY_FUNCTION__))
;
2675 addExpr(Inst, getImm());
2676 }
2677
2678 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2679 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2679, __extension__
__PRETTY_FUNCTION__))
;
2680 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2681 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2682 }
2683
2684 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2685 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2685, __extension__
__PRETTY_FUNCTION__))
;
2686 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2687 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2688 }
2689
2690 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2691 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2691, __extension__
__PRETTY_FUNCTION__))
;
2692 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2693 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2694 Inst.addOperand(MCOperand::createImm(Val));
2695 }
2696
2697 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2698 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2698, __extension__
__PRETTY_FUNCTION__))
;
2699 // FIXME: We really want to scale the value here, but the LDRD/STRD
2700 // instruction don't encode operands that way yet.
2701 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2702 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2703 }
2704
2705 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2706 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2706, __extension__
__PRETTY_FUNCTION__))
;
2707 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2708 // instruction don't encode operands that way yet.
2709 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2710 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2711 }
2712
2713 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2714 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2714, __extension__
__PRETTY_FUNCTION__))
;
2715 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2716 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2717 }
2718
2719 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2720 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2720, __extension__
__PRETTY_FUNCTION__))
;
2721 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2722 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2723 }
2724
2725 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2726 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2726, __extension__
__PRETTY_FUNCTION__))
;
2727 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2728 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2729 }
2730
2731 void addImm7Operands(MCInst &Inst, unsigned N) const {
2732 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2732, __extension__
__PRETTY_FUNCTION__))
;
2733 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2734 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2735 }
2736
2737 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2738 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2738, __extension__
__PRETTY_FUNCTION__))
;
2739 // The immediate is scaled by four in the encoding and is stored
2740 // in the MCInst as such. Lop off the low two bits here.
2741 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2742 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2743 }
2744
2745 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2746 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2746, __extension__
__PRETTY_FUNCTION__))
;
2747 // The immediate is scaled by four in the encoding and is stored
2748 // in the MCInst as such. Lop off the low two bits here.
2749 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2750 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2751 }
2752
2753 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2754 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2754, __extension__
__PRETTY_FUNCTION__))
;
2755 // The immediate is scaled by four in the encoding and is stored
2756 // in the MCInst as such. Lop off the low two bits here.
2757 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2758 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2759 }
2760
2761 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2762 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2762, __extension__
__PRETTY_FUNCTION__))
;
2763 // The constant encodes as the immediate-1, and we store in the instruction
2764 // the bits as encoded, so subtract off one here.
2765 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2766 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2767 }
2768
2769 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2770 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2770, __extension__
__PRETTY_FUNCTION__))
;
2771 // The constant encodes as the immediate-1, and we store in the instruction
2772 // the bits as encoded, so subtract off one here.
2773 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2774 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2775 }
2776
2777 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2778 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2778, __extension__
__PRETTY_FUNCTION__))
;
2779 // The constant encodes as the immediate, except for 32, which encodes as
2780 // zero.
2781 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2782 unsigned Imm = CE->getValue();
2783 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2784 }
2785
2786 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2787 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2787, __extension__
__PRETTY_FUNCTION__))
;
2788 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2789 // the instruction as well.
2790 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2791 int Val = CE->getValue();
2792 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2793 }
2794
2795 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2796 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2796, __extension__
__PRETTY_FUNCTION__))
;
2797 // The operand is actually a t2_so_imm, but we have its bitwise
2798 // negation in the assembly source, so twiddle it here.
2799 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2800 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2801 }
2802
2803 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2804 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2804, __extension__
__PRETTY_FUNCTION__))
;
2805 // The operand is actually a t2_so_imm, but we have its
2806 // negation in the assembly source, so twiddle it here.
2807 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2808 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2809 }
2810
2811 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2812 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2812, __extension__
__PRETTY_FUNCTION__))
;
2813 // The operand is actually an imm0_4095, but we have its
2814 // negation in the assembly source, so twiddle it here.
2815 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2816 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2817 }
2818
2819 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2820 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2821 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2822 return;
2823 }
2824 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2825 Inst.addOperand(MCOperand::createExpr(SR));
2826 }
2827
2828 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2829 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2829, __extension__
__PRETTY_FUNCTION__))
;
2830 if (isImm()) {
2831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2832 if (CE) {
2833 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2834 return;
2835 }
2836 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2837 Inst.addOperand(MCOperand::createExpr(SR));
2838 return;
2839 }
2840
2841 assert(isGPRMem() && "Unknown value type!")(static_cast <bool> (isGPRMem() && "Unknown value type!"
) ? void (0) : __assert_fail ("isGPRMem() && \"Unknown value type!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2841, __extension__
__PRETTY_FUNCTION__))
;
2842 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!")(static_cast <bool> (isa<MCConstantExpr>(Memory.OffsetImm
) && "Unknown value type!") ? void (0) : __assert_fail
("isa<MCConstantExpr>(Memory.OffsetImm) && \"Unknown value type!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2842, __extension__
__PRETTY_FUNCTION__))
;
2843 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2844 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2845 else
2846 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2847 }
2848
2849 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2850 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2850, __extension__
__PRETTY_FUNCTION__))
;
2851 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2852 }
2853
2854 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2855 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2855, __extension__
__PRETTY_FUNCTION__))
;
2856 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2857 }
2858
2859 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2860 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2860, __extension__
__PRETTY_FUNCTION__))
;
2861 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2862 }
2863
2864 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2865 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2865, __extension__
__PRETTY_FUNCTION__))
;
2866 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2867 }
2868
2869 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2870 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2870, __extension__
__PRETTY_FUNCTION__))
;
2871 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2872 }
2873
2874 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2875 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2875, __extension__
__PRETTY_FUNCTION__))
;
2876 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2877 }
2878
2879 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2880 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2880, __extension__
__PRETTY_FUNCTION__))
;
2881 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2882 }
2883
2884 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2885 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2885, __extension__
__PRETTY_FUNCTION__))
;
2886 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2887 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2888 else
2889 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2890 }
2891
2892 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2893 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2893, __extension__
__PRETTY_FUNCTION__))
;
2894 assert(isImm() && "Not an immediate!")(static_cast <bool> (isImm() && "Not an immediate!"
) ? void (0) : __assert_fail ("isImm() && \"Not an immediate!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2894, __extension__
__PRETTY_FUNCTION__))
;
2895
2896 // If we have an immediate that's not a constant, treat it as a label
2897 // reference needing a fixup.
2898 if (!isa<MCConstantExpr>(getImm())) {
2899 Inst.addOperand(MCOperand::createExpr(getImm()));
2900 return;
2901 }
2902
2903 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2904 int Val = CE->getValue();
2905 Inst.addOperand(MCOperand::createImm(Val));
2906 }
2907
2908 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2909 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2909, __extension__
__PRETTY_FUNCTION__))
;
2910 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2911 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2912 }
2913
2914 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2915 addAlignedMemoryOperands(Inst, N);
2916 }
2917
2918 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2919 addAlignedMemoryOperands(Inst, N);
2920 }
2921
2922 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2923 addAlignedMemoryOperands(Inst, N);
2924 }
2925
2926 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2927 addAlignedMemoryOperands(Inst, N);
2928 }
2929
2930 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2931 addAlignedMemoryOperands(Inst, N);
2932 }
2933
2934 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2935 addAlignedMemoryOperands(Inst, N);
2936 }
2937
2938 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2939 addAlignedMemoryOperands(Inst, N);
2940 }
2941
2942 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2943 addAlignedMemoryOperands(Inst, N);
2944 }
2945
2946 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2947 addAlignedMemoryOperands(Inst, N);
2948 }
2949
2950 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2951 addAlignedMemoryOperands(Inst, N);
2952 }
2953
2954 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2955 addAlignedMemoryOperands(Inst, N);
2956 }
2957
2958 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2959 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2959, __extension__
__PRETTY_FUNCTION__))
;
2960 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2961 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2962 if (!Memory.OffsetRegNum) {
2963 if (!Memory.OffsetImm)
2964 Inst.addOperand(MCOperand::createImm(0));
2965 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2966 int32_t Val = CE->getValue();
2967 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2968 // Special case for #-0
2969 if (Val == std::numeric_limits<int32_t>::min())
2970 Val = 0;
2971 if (Val < 0)
2972 Val = -Val;
2973 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2974 Inst.addOperand(MCOperand::createImm(Val));
2975 } else
2976 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2977 } else {
2978 // For register offset, we encode the shift type and negation flag
2979 // here.
2980 int32_t Val =
2981 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2982 Memory.ShiftImm, Memory.ShiftType);
2983 Inst.addOperand(MCOperand::createImm(Val));
2984 }
2985 }
2986
2987 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2988 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2988, __extension__
__PRETTY_FUNCTION__))
;
2989 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2990 assert(CE && "non-constant AM2OffsetImm operand!")(static_cast <bool> (CE && "non-constant AM2OffsetImm operand!"
) ? void (0) : __assert_fail ("CE && \"non-constant AM2OffsetImm operand!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 2990, __extension__
__PRETTY_FUNCTION__))
;
2991 int32_t Val = CE->getValue();
2992 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2993 // Special case for #-0
2994 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2995 if (Val < 0) Val = -Val;
2996 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2997 Inst.addOperand(MCOperand::createReg(0));
2998 Inst.addOperand(MCOperand::createImm(Val));
2999 }
3000
3001 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
3002 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3002, __extension__
__PRETTY_FUNCTION__))
;
3003 // If we have an immediate that's not a constant, treat it as a label
3004 // reference needing a fixup. If it is a constant, it's something else
3005 // and we reject it.
3006 if (isImm()) {
3007 Inst.addOperand(MCOperand::createExpr(getImm()));
3008 Inst.addOperand(MCOperand::createReg(0));
3009 Inst.addOperand(MCOperand::createImm(0));
3010 return;
3011 }
3012
3013 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3014 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3015 if (!Memory.OffsetRegNum) {
3016 if (!Memory.OffsetImm)
3017 Inst.addOperand(MCOperand::createImm(0));
3018 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3019 int32_t Val = CE->getValue();
3020 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3021 // Special case for #-0
3022 if (Val == std::numeric_limits<int32_t>::min())
3023 Val = 0;
3024 if (Val < 0)
3025 Val = -Val;
3026 Val = ARM_AM::getAM3Opc(AddSub, Val);
3027 Inst.addOperand(MCOperand::createImm(Val));
3028 } else
3029 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3030 } else {
3031 // For register offset, we encode the shift type and negation flag
3032 // here.
3033 int32_t Val =
3034 ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3035 Inst.addOperand(MCOperand::createImm(Val));
3036 }
3037 }
3038
3039 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3040 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3040, __extension__
__PRETTY_FUNCTION__))
;
3041 if (Kind == k_PostIndexRegister) {
3042 int32_t Val =
3043 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3044 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3045 Inst.addOperand(MCOperand::createImm(Val));
3046 return;
3047 }
3048
3049 // Constant offset.
3050 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3051 int32_t Val = CE->getValue();
3052 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3053 // Special case for #-0
3054 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3055 if (Val < 0) Val = -Val;
3056 Val = ARM_AM::getAM3Opc(AddSub, Val);
3057 Inst.addOperand(MCOperand::createReg(0));
3058 Inst.addOperand(MCOperand::createImm(Val));
3059 }
3060
3061 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3062 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3062, __extension__
__PRETTY_FUNCTION__))
;
3063 // If we have an immediate that's not a constant, treat it as a label
3064 // reference needing a fixup. If it is a constant, it's something else
3065 // and we reject it.
3066 if (isImm()) {
3067 Inst.addOperand(MCOperand::createExpr(getImm()));
3068 Inst.addOperand(MCOperand::createImm(0));
3069 return;
3070 }
3071
3072 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3073 if (!Memory.OffsetImm)
3074 Inst.addOperand(MCOperand::createImm(0));
3075 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3076 // The lower two bits are always zero and as such are not encoded.
3077 int32_t Val = CE->getValue() / 4;
3078 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3079 // Special case for #-0
3080 if (Val == std::numeric_limits<int32_t>::min())
3081 Val = 0;
3082 if (Val < 0)
3083 Val = -Val;
3084 Val = ARM_AM::getAM5Opc(AddSub, Val);
3085 Inst.addOperand(MCOperand::createImm(Val));
3086 } else
3087 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3088 }
3089
3090 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3091 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3091, __extension__
__PRETTY_FUNCTION__))
;
3092 // If we have an immediate that's not a constant, treat it as a label
3093 // reference needing a fixup. If it is a constant, it's something else
3094 // and we reject it.
3095 if (isImm()) {
3096 Inst.addOperand(MCOperand::createExpr(getImm()));
3097 Inst.addOperand(MCOperand::createImm(0));
3098 return;
3099 }
3100
3101 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3102 // The lower bit is always zero and as such is not encoded.
3103 if (!Memory.OffsetImm)
3104 Inst.addOperand(MCOperand::createImm(0));
3105 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3106 int32_t Val = CE->getValue() / 2;
3107 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3108 // Special case for #-0
3109 if (Val == std::numeric_limits<int32_t>::min())
3110 Val = 0;
3111 if (Val < 0)
3112 Val = -Val;
3113 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3114 Inst.addOperand(MCOperand::createImm(Val));
3115 } else
3116 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3117 }
3118
3119 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3120 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3120, __extension__
__PRETTY_FUNCTION__))
;
3121 // If we have an immediate that's not a constant, treat it as a label
3122 // reference needing a fixup. If it is a constant, it's something else
3123 // and we reject it.
3124 if (isImm()) {
3125 Inst.addOperand(MCOperand::createExpr(getImm()));
3126 Inst.addOperand(MCOperand::createImm(0));
3127 return;
3128 }
3129
3130 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3131 addExpr(Inst, Memory.OffsetImm);
3132 }
3133
3134 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3135 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3135, __extension__
__PRETTY_FUNCTION__))
;
3136 // If we have an immediate that's not a constant, treat it as a label
3137 // reference needing a fixup. If it is a constant, it's something else
3138 // and we reject it.
3139 if (isImm()) {
3140 Inst.addOperand(MCOperand::createExpr(getImm()));
3141 Inst.addOperand(MCOperand::createImm(0));
3142 return;
3143 }
3144
3145 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146 addExpr(Inst, Memory.OffsetImm);
3147 }
3148
3149 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3150 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3150, __extension__
__PRETTY_FUNCTION__))
;
3151 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3152 if (!Memory.OffsetImm)
3153 Inst.addOperand(MCOperand::createImm(0));
3154 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3155 // The lower two bits are always zero and as such are not encoded.
3156 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3157 else
3158 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3159 }
3160
3161 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3162 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3162, __extension__
__PRETTY_FUNCTION__))
;
3163 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3164 addExpr(Inst, Memory.OffsetImm);
3165 }
3166
3167 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3168 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3168, __extension__
__PRETTY_FUNCTION__))
;
3169 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3170 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3171 }
3172
3173 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3174 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3174, __extension__
__PRETTY_FUNCTION__))
;
3175 // If this is an immediate, it's a label reference.
3176 if (isImm()) {
3177 addExpr(Inst, getImm());
3178 Inst.addOperand(MCOperand::createImm(0));
3179 return;
3180 }
3181
3182 // Otherwise, it's a normal memory reg+offset.
3183 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3184 addExpr(Inst, Memory.OffsetImm);
3185 }
3186
3187 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3188 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3188, __extension__
__PRETTY_FUNCTION__))
;
3189 // If this is an immediate, it's a label reference.
3190 if (isImm()) {
3191 addExpr(Inst, getImm());
3192 Inst.addOperand(MCOperand::createImm(0));
3193 return;
3194 }
3195
3196 // Otherwise, it's a normal memory reg+offset.
3197 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3198 addExpr(Inst, Memory.OffsetImm);
3199 }
3200
3201 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3202 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3202, __extension__
__PRETTY_FUNCTION__))
;
3203 // This is container for the immediate that we will create the constant
3204 // pool from
3205 addExpr(Inst, getConstantPoolImm());
3206 }
3207
3208 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3209 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3209, __extension__
__PRETTY_FUNCTION__))
;
3210 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3211 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3212 }
3213
3214 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3215 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3215, __extension__
__PRETTY_FUNCTION__))
;
3216 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3217 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3218 }
3219
3220 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3221 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3221, __extension__
__PRETTY_FUNCTION__))
;
3222 unsigned Val =
3223 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3224 Memory.ShiftImm, Memory.ShiftType);
3225 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3226 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3227 Inst.addOperand(MCOperand::createImm(Val));
3228 }
3229
3230 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3231 assert(N == 3 && "Invalid number of operands!")(static_cast <bool> (N == 3 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 3 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3231, __extension__
__PRETTY_FUNCTION__))
;
3232 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3233 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3234 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3235 }
3236
3237 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3238 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3238, __extension__
__PRETTY_FUNCTION__))
;
3239 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3240 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3241 }
3242
3243 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3244 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3244, __extension__
__PRETTY_FUNCTION__))
;
3245 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3246 if (!Memory.OffsetImm)
3247 Inst.addOperand(MCOperand::createImm(0));
3248 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3249 // The lower two bits are always zero and as such are not encoded.
3250 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3251 else
3252 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3253 }
3254
3255 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3256 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3256, __extension__
__PRETTY_FUNCTION__))
;
3257 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3258 if (!Memory.OffsetImm)
3259 Inst.addOperand(MCOperand::createImm(0));
3260 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3261 Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3262 else
3263 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3264 }
3265
3266 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3267 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3267, __extension__
__PRETTY_FUNCTION__))
;
3268 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3269 addExpr(Inst, Memory.OffsetImm);
3270 }
3271
3272 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3273 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3273, __extension__
__PRETTY_FUNCTION__))
;
3274 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3275 if (!Memory.OffsetImm)
3276 Inst.addOperand(MCOperand::createImm(0));
3277 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3278 // The lower two bits are always zero and as such are not encoded.
3279 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3280 else
3281 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3282 }
3283
3284 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3285 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3285, __extension__
__PRETTY_FUNCTION__))
;
3286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3287 assert(CE && "non-constant post-idx-imm8 operand!")(static_cast <bool> (CE && "non-constant post-idx-imm8 operand!"
) ? void (0) : __assert_fail ("CE && \"non-constant post-idx-imm8 operand!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3287, __extension__
__PRETTY_FUNCTION__))
;
3288 int Imm = CE->getValue();
3289 bool isAdd = Imm >= 0;
3290 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3291 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3292 Inst.addOperand(MCOperand::createImm(Imm));
3293 }
3294
3295 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3296 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3296, __extension__
__PRETTY_FUNCTION__))
;
3297 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3298 assert(CE && "non-constant post-idx-imm8s4 operand!")(static_cast <bool> (CE && "non-constant post-idx-imm8s4 operand!"
) ? void (0) : __assert_fail ("CE && \"non-constant post-idx-imm8s4 operand!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3298, __extension__
__PRETTY_FUNCTION__))
;
3299 int Imm = CE->getValue();
3300 bool isAdd = Imm >= 0;
3301 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3302 // Immediate is scaled by 4.
3303 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3304 Inst.addOperand(MCOperand::createImm(Imm));
3305 }
3306
3307 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3308 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3308, __extension__
__PRETTY_FUNCTION__))
;
3309 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3310 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3311 }
3312
3313 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3314 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3314, __extension__
__PRETTY_FUNCTION__))
;
3315 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3316 // The sign, shift type, and shift amount are encoded in a single operand
3317 // using the AM2 encoding helpers.
3318 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3319 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3320 PostIdxReg.ShiftTy);
3321 Inst.addOperand(MCOperand::createImm(Imm));
3322 }
3323
3324 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3325 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3325, __extension__
__PRETTY_FUNCTION__))
;
3326 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3327 Inst.addOperand(MCOperand::createImm(CE->getValue()));
3328 }
3329
3330 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3331 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3331, __extension__
__PRETTY_FUNCTION__))
;
3332 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3333 }
3334
3335 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3336 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3336, __extension__
__PRETTY_FUNCTION__))
;
3337 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3338 }
3339
3340 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3341 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3341, __extension__
__PRETTY_FUNCTION__))
;
3342 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3343 }
3344
3345 void addVecListOperands(MCInst &Inst, unsigned N) const {
3346 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3346, __extension__
__PRETTY_FUNCTION__))
;
3347 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3348 }
3349
3350 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3351 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3351, __extension__
__PRETTY_FUNCTION__))
;
3352
3353 // When we come here, the VectorList field will identify a range
3354 // of q-registers by its base register and length, and it will
3355 // have already been error-checked to be the expected length of
3356 // range and contain only q-regs in the range q0-q7. So we can
3357 // count on the base register being in the range q0-q6 (for 2
3358 // regs) or q0-q4 (for 4)
3359 //
3360 // The MVE instructions taking a register range of this kind will
3361 // need an operand in the MQQPR or MQQQQPR class, representing the
3362 // entire range as a unit. So we must translate into that class,
3363 // by finding the index of the base register in the MQPR reg
3364 // class, and returning the super-register at the corresponding
3365 // index in the target class.
3366
3367 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3368 const MCRegisterClass *RC_out =
3369 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3370 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3371
3372 unsigned I, E = RC_out->getNumRegs();
3373 for (I = 0; I < E; I++)
3374 if (RC_in->getRegister(I) == VectorList.RegNum)
3375 break;
3376 assert(I < E && "Invalid vector list start register!")(static_cast <bool> (I < E && "Invalid vector list start register!"
) ? void (0) : __assert_fail ("I < E && \"Invalid vector list start register!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3376, __extension__
__PRETTY_FUNCTION__))
;
3377
3378 Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3379 }
3380
3381 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3382 assert(N == 2 && "Invalid number of operands!")(static_cast <bool> (N == 2 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 2 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3382, __extension__
__PRETTY_FUNCTION__))
;
3383 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3384 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3385 }
3386
3387 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3388 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3388, __extension__
__PRETTY_FUNCTION__))
;
3389 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3390 }
3391
3392 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3393 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3393, __extension__
__PRETTY_FUNCTION__))
;
3394 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3395 }
3396
3397 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3398 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3398, __extension__
__PRETTY_FUNCTION__))
;
3399 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3400 }
3401
3402 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3403 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3403, __extension__
__PRETTY_FUNCTION__))
;
3404 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3405 }
3406
3407 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3408 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3408, __extension__
__PRETTY_FUNCTION__))
;
3409 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3410 }
3411
3412 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3413 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3413, __extension__
__PRETTY_FUNCTION__))
;
3414 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3415 }
3416
3417 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3418 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3418, __extension__
__PRETTY_FUNCTION__))
;
3419 // The immediate encodes the type of constant as well as the value.
3420 // Mask in that this is an i8 splat.
3421 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3422 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3423 }
3424
3425 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3426 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3426, __extension__
__PRETTY_FUNCTION__))
;
3427 // The immediate encodes the type of constant as well as the value.
3428 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3429 unsigned Value = CE->getValue();
3430 Value = ARM_AM::encodeNEONi16splat(Value);
3431 Inst.addOperand(MCOperand::createImm(Value));
3432 }
3433
3434 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3435 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3435, __extension__
__PRETTY_FUNCTION__))
;
3436 // The immediate encodes the type of constant as well as the value.
3437 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3438 unsigned Value = CE->getValue();
3439 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3440 Inst.addOperand(MCOperand::createImm(Value));
3441 }
3442
3443 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3444 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3444, __extension__
__PRETTY_FUNCTION__))
;
3445 // The immediate encodes the type of constant as well as the value.
3446 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3447 unsigned Value = CE->getValue();
3448 Value = ARM_AM::encodeNEONi32splat(Value);
3449 Inst.addOperand(MCOperand::createImm(Value));
3450 }
3451
3452 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3453 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3453, __extension__
__PRETTY_FUNCTION__))
;
3454 // The immediate encodes the type of constant as well as the value.
3455 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3456 unsigned Value = CE->getValue();
3457 Value = ARM_AM::encodeNEONi32splat(~Value);
3458 Inst.addOperand(MCOperand::createImm(Value));
3459 }
3460
3461 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3462 // The immediate encodes the type of constant as well as the value.
3463 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3464 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3467, __extension__
__PRETTY_FUNCTION__))
3465 Inst.getOpcode() == ARM::VMOVv16i8) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3467, __extension__
__PRETTY_FUNCTION__))
3466 "All instructions that wants to replicate non-zero byte "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3467, __extension__
__PRETTY_FUNCTION__))
3467 "always must be replaced with VMOVv8i8 or VMOVv16i8.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv8i8
|| Inst.getOpcode() == ARM::VMOVv16i8) && "All instructions that wants to replicate non-zero byte "
"always must be replaced with VMOVv8i8 or VMOVv16i8.") ? void
(0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && \"All instructions that wants to replicate non-zero byte \" \"always must be replaced with VMOVv8i8 or VMOVv16i8.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3467, __extension__
__PRETTY_FUNCTION__))
;
3468 unsigned Value = CE->getValue();
3469 if (Inv)
3470 Value = ~Value;
3471 unsigned B = Value & 0xff;
3472 B |= 0xe00; // cmode = 0b1110
3473 Inst.addOperand(MCOperand::createImm(B));
3474 }
3475
3476 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3477 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3477, __extension__
__PRETTY_FUNCTION__))
;
3478 addNEONi8ReplicateOperands(Inst, true);
3479 }
3480
3481 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3482 if (Value >= 256 && Value <= 0xffff)
3483 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3484 else if (Value > 0xffff && Value <= 0xffffff)
3485 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3486 else if (Value > 0xffffff)
3487 Value = (Value >> 24) | 0x600;
3488 return Value;
3489 }
3490
3491 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3492 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3492, __extension__
__PRETTY_FUNCTION__))
;
3493 // The immediate encodes the type of constant as well as the value.
3494 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3495 unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3496 Inst.addOperand(MCOperand::createImm(Value));
3497 }
3498
3499 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3500 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3500, __extension__
__PRETTY_FUNCTION__))
;
3501 addNEONi8ReplicateOperands(Inst, false);
3502 }
3503
3504 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3505 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3505, __extension__
__PRETTY_FUNCTION__))
;
3506 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3507 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16
|| Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() ==
ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void (
0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3512, __extension__
__PRETTY_FUNCTION__))
3508 Inst.getOpcode() == ARM::VMOVv8i16 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16
|| Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() ==
ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void (
0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3512, __extension__
__PRETTY_FUNCTION__))
3509 Inst.getOpcode() == ARM::VMVNv4i16 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16
|| Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() ==
ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void (
0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3512, __extension__
__PRETTY_FUNCTION__))
3510 Inst.getOpcode() == ARM::VMVNv8i16) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16
|| Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() ==
ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void (
0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3512, __extension__
__PRETTY_FUNCTION__))
3511 "All instructions that want to replicate non-zero half-word "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16
|| Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() ==
ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void (
0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3512, __extension__
__PRETTY_FUNCTION__))
3512 "always must be replaced with V{MOV,MVN}v{4,8}i16.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv4i16
|| Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() ==
ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) &&
"All instructions that want to replicate non-zero half-word "
"always must be replaced with V{MOV,MVN}v{4,8}i16.") ? void (
0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv4i16 || Inst.getOpcode() == ARM::VMOVv8i16 || Inst.getOpcode() == ARM::VMVNv4i16 || Inst.getOpcode() == ARM::VMVNv8i16) && \"All instructions that want to replicate non-zero half-word \" \"always must be replaced with V{MOV,MVN}v{4,8}i16.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3512, __extension__
__PRETTY_FUNCTION__))
;
3513 uint64_t Value = CE->getValue();
3514 unsigned Elem = Value & 0xffff;
3515 if (Elem >= 256)
3516 Elem = (Elem >> 8) | 0x200;
3517 Inst.addOperand(MCOperand::createImm(Elem));
3518 }
3519
3520 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3521 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3521, __extension__
__PRETTY_FUNCTION__))
;
3522 // The immediate encodes the type of constant as well as the value.
3523 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3524 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3525 Inst.addOperand(MCOperand::createImm(Value));
3526 }
3527
3528 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3529 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3529, __extension__
__PRETTY_FUNCTION__))
;
3530 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3531 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32
|| Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() ==
ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32."
) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3536, __extension__
__PRETTY_FUNCTION__))
3532 Inst.getOpcode() == ARM::VMOVv4i32 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32
|| Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() ==
ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32."
) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3536, __extension__
__PRETTY_FUNCTION__))
3533 Inst.getOpcode() == ARM::VMVNv2i32 ||(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32
|| Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() ==
ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32."
) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3536, __extension__
__PRETTY_FUNCTION__))
3534 Inst.getOpcode() == ARM::VMVNv4i32) &&(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32
|| Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() ==
ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32."
) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3536, __extension__
__PRETTY_FUNCTION__))
3535 "All instructions that want to replicate non-zero word "(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32
|| Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() ==
ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32."
) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3536, __extension__
__PRETTY_FUNCTION__))
3536 "always must be replaced with V{MOV,MVN}v{2,4}i32.")(static_cast <bool> ((Inst.getOpcode() == ARM::VMOVv2i32
|| Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() ==
ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) &&
"All instructions that want to replicate non-zero word " "always must be replaced with V{MOV,MVN}v{2,4}i32."
) ? void (0) : __assert_fail ("(Inst.getOpcode() == ARM::VMOVv2i32 || Inst.getOpcode() == ARM::VMOVv4i32 || Inst.getOpcode() == ARM::VMVNv2i32 || Inst.getOpcode() == ARM::VMVNv4i32) && \"All instructions that want to replicate non-zero word \" \"always must be replaced with V{MOV,MVN}v{2,4}i32.\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3536, __extension__
__PRETTY_FUNCTION__))
;
3537 uint64_t Value = CE->getValue();
3538 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3539 Inst.addOperand(MCOperand::createImm(Elem));
3540 }
3541
3542 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3543 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3543, __extension__
__PRETTY_FUNCTION__))
;
3544 // The immediate encodes the type of constant as well as the value.
3545 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3546 uint64_t Value = CE->getValue();
3547 unsigned Imm = 0;
3548 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3549 Imm |= (Value & 1) << i;
3550 }
3551 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3552 }
3553
3554 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3555 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3555, __extension__
__PRETTY_FUNCTION__))
;
3556 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3557 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3558 }
3559
3560 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3561 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3561, __extension__
__PRETTY_FUNCTION__))
;
3562 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3563 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3564 }
3565
3566 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3567 assert(N == 1 && "Invalid number of operands!")(static_cast <bool> (N == 1 && "Invalid number of operands!"
) ? void (0) : __assert_fail ("N == 1 && \"Invalid number of operands!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3567, __extension__
__PRETTY_FUNCTION__))
;
3568 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3569 unsigned Imm = CE->getValue();
3570 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand")(static_cast <bool> ((Imm == 48 || Imm == 64) &&
"Invalid saturate operand") ? void (0) : __assert_fail ("(Imm == 48 || Imm == 64) && \"Invalid saturate operand\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3570, __extension__
__PRETTY_FUNCTION__))
;
3571 Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3572 }
3573
3574 void print(raw_ostream &OS) const override;
3575
3576 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3577 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3578 Op->ITMask.Mask = Mask;
3579 Op->StartLoc = S;
3580 Op->EndLoc = S;
3581 return Op;
3582 }
3583
3584 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3585 SMLoc S) {
3586 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3587 Op->CC.Val = CC;
3588 Op->StartLoc = S;
3589 Op->EndLoc = S;
3590 return Op;
3591 }
3592
3593 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3594 SMLoc S) {
3595 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3596 Op->VCC.Val = CC;
3597 Op->StartLoc = S;
3598 Op->EndLoc = S;
3599 return Op;
3600 }
3601
3602 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3603 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3604 Op->Cop.Val = CopVal;
3605 Op->StartLoc = S;
3606 Op->EndLoc = S;
3607 return Op;
3608 }
3609
3610 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3611 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3612 Op->Cop.Val = CopVal;
3613 Op->StartLoc = S;
3614 Op->EndLoc = S;
3615 return Op;
3616 }
3617
3618 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3619 SMLoc E) {
3620 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3621 Op->Cop.Val = Val;
3622 Op->StartLoc = S;
3623 Op->EndLoc = E;
3624 return Op;
3625 }
3626
3627 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3628 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3629 Op->Reg.RegNum = RegNum;
3630 Op->StartLoc = S;
3631 Op->EndLoc = S;
3632 return Op;
3633 }
3634
3635 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3636 auto Op = std::make_unique<ARMOperand>(k_Token);
3637 Op->Tok.Data = Str.data();
3638 Op->Tok.Length = Str.size();
3639 Op->StartLoc = S;
3640 Op->EndLoc = S;
3641 return Op;
3642 }
3643
3644 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3645 SMLoc E) {
3646 auto Op = std::make_unique<ARMOperand>(k_Register);
3647 Op->Reg.RegNum = RegNum;
3648 Op->StartLoc = S;
3649 Op->EndLoc = E;
3650 return Op;
3651 }
3652
3653 static std::unique_ptr<ARMOperand>
3654 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3655 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3656 SMLoc E) {
3657 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3658 Op->RegShiftedReg.ShiftTy = ShTy;
3659 Op->RegShiftedReg.SrcReg = SrcReg;
3660 Op->RegShiftedReg.ShiftReg = ShiftReg;
3661 Op->RegShiftedReg.ShiftImm = ShiftImm;
3662 Op->StartLoc = S;
3663 Op->EndLoc = E;
3664 return Op;
3665 }
3666
3667 static std::unique_ptr<ARMOperand>
3668 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3669 unsigned ShiftImm, SMLoc S, SMLoc E) {
3670 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3671 Op->RegShiftedImm.ShiftTy = ShTy;
3672 Op->RegShiftedImm.SrcReg = SrcReg;
3673 Op->RegShiftedImm.ShiftImm = ShiftImm;
3674 Op->StartLoc = S;
3675 Op->EndLoc = E;
3676 return Op;
3677 }
3678
3679 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3680 SMLoc S, SMLoc E) {
3681 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3682 Op->ShifterImm.isASR = isASR;
3683 Op->ShifterImm.Imm = Imm;
3684 Op->StartLoc = S;
3685 Op->EndLoc = E;
3686 return Op;
3687 }
3688
3689 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3690 SMLoc E) {
3691 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3692 Op->RotImm.Imm = Imm;
3693 Op->StartLoc = S;
3694 Op->EndLoc = E;
3695 return Op;
3696 }
3697
3698 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3699 SMLoc S, SMLoc E) {
3700 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3701 Op->ModImm.Bits = Bits;
3702 Op->ModImm.Rot = Rot;
3703 Op->StartLoc = S;
3704 Op->EndLoc = E;
3705 return Op;
3706 }
3707
3708 static std::unique_ptr<ARMOperand>
3709 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3710 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3711 Op->Imm.Val = Val;
3712 Op->StartLoc = S;
3713 Op->EndLoc = E;
3714 return Op;
3715 }
3716
3717 static std::unique_ptr<ARMOperand>
3718 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3719 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3720 Op->Bitfield.LSB = LSB;
3721 Op->Bitfield.Width = Width;
3722 Op->StartLoc = S;
3723 Op->EndLoc = E;
3724 return Op;
3725 }
3726
3727 static std::unique_ptr<ARMOperand>
3728 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3729 SMLoc StartLoc, SMLoc EndLoc) {
3730 assert(Regs.size() > 0 && "RegList contains no registers?")(static_cast <bool> (Regs.size() > 0 && "RegList contains no registers?"
) ? void (0) : __assert_fail ("Regs.size() > 0 && \"RegList contains no registers?\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3730, __extension__
__PRETTY_FUNCTION__))
;
3731 KindTy Kind = k_RegisterList;
3732
3733 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3734 Regs.front().second)) {
3735 if (Regs.back().second == ARM::VPR)
3736 Kind = k_FPDRegisterListWithVPR;
3737 else
3738 Kind = k_DPRRegisterList;
3739 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3740 Regs.front().second)) {
3741 if (Regs.back().second == ARM::VPR)
3742 Kind = k_FPSRegisterListWithVPR;
3743 else
3744 Kind = k_SPRRegisterList;
3745 }
3746
3747 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3748 Kind = k_RegisterListWithAPSR;
3749
3750 assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding")(static_cast <bool> (llvm::is_sorted(Regs) && "Register list must be sorted by encoding"
) ? void (0) : __assert_fail ("llvm::is_sorted(Regs) && \"Register list must be sorted by encoding\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3750, __extension__
__PRETTY_FUNCTION__))
;
3751
3752 auto Op = std::make_unique<ARMOperand>(Kind);
3753 for (const auto &P : Regs)
3754 Op->Registers.push_back(P.second);
3755
3756 Op->StartLoc = StartLoc;
3757 Op->EndLoc = EndLoc;
3758 return Op;
3759 }
3760
3761 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3762 unsigned Count,
3763 bool isDoubleSpaced,
3764 SMLoc S, SMLoc E) {
3765 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3766 Op->VectorList.RegNum = RegNum;
3767 Op->VectorList.Count = Count;
3768 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3769 Op->StartLoc = S;
3770 Op->EndLoc = E;
3771 return Op;
3772 }
3773
3774 static std::unique_ptr<ARMOperand>
3775 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3776 SMLoc S, SMLoc E) {
3777 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3778 Op->VectorList.RegNum = RegNum;
3779 Op->VectorList.Count = Count;
3780 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3781 Op->StartLoc = S;
3782 Op->EndLoc = E;
3783 return Op;
3784 }
3785
3786 static std::unique_ptr<ARMOperand>
3787 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3788 bool isDoubleSpaced, SMLoc S, SMLoc E) {
3789 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3790 Op->VectorList.RegNum = RegNum;
3791 Op->VectorList.Count = Count;
3792 Op->VectorList.LaneIndex = Index;
3793 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3794 Op->StartLoc = S;
3795 Op->EndLoc = E;
3796 return Op;
3797 }
3798
3799 static std::unique_ptr<ARMOperand>
3800 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3801 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3802 Op->VectorIndex.Val = Idx;
3803 Op->StartLoc = S;
3804 Op->EndLoc = E;
3805 return Op;
3806 }
3807
3808 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3809 SMLoc E) {
3810 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3811 Op->Imm.Val = Val;
3812 Op->StartLoc = S;
3813 Op->EndLoc = E;
3814 return Op;
3815 }
3816
3817 static std::unique_ptr<ARMOperand>
3818 CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3819 ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3820 bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3821 auto Op = std::make_unique<ARMOperand>(k_Memory);
3822 Op->Memory.BaseRegNum = BaseRegNum;
3823 Op->Memory.OffsetImm = OffsetImm;
3824 Op->Memory.OffsetRegNum = OffsetRegNum;
3825 Op->Memory.ShiftType = ShiftType;
3826 Op->Memory.ShiftImm = ShiftImm;
3827 Op->Memory.Alignment = Alignment;
3828 Op->Memory.isNegative = isNegative;
3829 Op->StartLoc = S;
3830 Op->EndLoc = E;
3831 Op->AlignmentLoc = AlignmentLoc;
3832 return Op;
3833 }
3834
3835 static std::unique_ptr<ARMOperand>
3836 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3837 unsigned ShiftImm, SMLoc S, SMLoc E) {
3838 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3839 Op->PostIdxReg.RegNum = RegNum;
3840 Op->PostIdxReg.isAdd = isAdd;
3841 Op->PostIdxReg.ShiftTy = ShiftTy;
3842 Op->PostIdxReg.ShiftImm = ShiftImm;
3843 Op->StartLoc = S;
3844 Op->EndLoc = E;
3845 return Op;
3846 }
3847
3848 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3849 SMLoc S) {
3850 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3851 Op->MBOpt.Val = Opt;
3852 Op->StartLoc = S;
3853 Op->EndLoc = S;
3854 return Op;
3855 }
3856
3857 static std::unique_ptr<ARMOperand>
3858 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3859 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3860 Op->ISBOpt.Val = Opt;
3861 Op->StartLoc = S;
3862 Op->EndLoc = S;
3863 return Op;
3864 }
3865
3866 static std::unique_ptr<ARMOperand>
3867 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3868 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3869 Op->TSBOpt.Val = Opt;
3870 Op->StartLoc = S;
3871 Op->EndLoc = S;
3872 return Op;
3873 }
3874
3875 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3876 SMLoc S) {
3877 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3878 Op->IFlags.Val = IFlags;
3879 Op->StartLoc = S;
3880 Op->EndLoc = S;
3881 return Op;
3882 }
3883
3884 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3885 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3886 Op->MMask.Val = MMask;
3887 Op->StartLoc = S;
3888 Op->EndLoc = S;
3889 return Op;
3890 }
3891
3892 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3893 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3894 Op->BankedReg.Val = Reg;
3895 Op->StartLoc = S;
3896 Op->EndLoc = S;
3897 return Op;
3898 }
3899};
3900
3901} // end anonymous namespace.
3902
3903void ARMOperand::print(raw_ostream &OS) const {
3904 auto RegName = [](MCRegister Reg) {
3905 if (Reg)
3906 return ARMInstPrinter::getRegisterName(Reg);
3907 else
3908 return "noreg";
3909 };
3910
3911 switch (Kind) {
3912 case k_CondCode:
3913 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3914 break;
3915 case k_VPTPred:
3916 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3917 break;
3918 case k_CCOut:
3919 OS << "<ccout " << RegName(getReg()) << ">";
3920 break;
3921 case k_ITCondMask: {
3922 static const char *const MaskStr[] = {
3923 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3924 "(tt)", "(ttet)", "(tte)", "(ttee)",
3925 "(t)", "(tett)", "(tet)", "(tete)",
3926 "(te)", "(teet)", "(tee)", "(teee)",
3927 };
3928 assert((ITMask.Mask & 0xf) == ITMask.Mask)(static_cast <bool> ((ITMask.Mask & 0xf) == ITMask.
Mask) ? void (0) : __assert_fail ("(ITMask.Mask & 0xf) == ITMask.Mask"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 3928, __extension__
__PRETTY_FUNCTION__))
;
3929 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3930 break;
3931 }
3932 case k_CoprocNum:
3933 OS << "<coprocessor number: " << getCoproc() << ">";
3934 break;
3935 case k_CoprocReg:
3936 OS << "<coprocessor register: " << getCoproc() << ">";
3937 break;
3938 case k_CoprocOption:
3939 OS << "<coprocessor option: " << CoprocOption.Val << ">";
3940 break;
3941 case k_MSRMask:
3942 OS << "<mask: " << getMSRMask() << ">";
3943 break;
3944 case k_BankedReg:
3945 OS << "<banked reg: " << getBankedReg() << ">";
3946 break;
3947 case k_Immediate:
3948 OS << *getImm();
3949 break;
3950 case k_MemBarrierOpt:
3951 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3952 break;
3953 case k_InstSyncBarrierOpt:
3954 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3955 break;
3956 case k_TraceSyncBarrierOpt:
3957 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3958 break;
3959 case k_Memory:
3960 OS << "<memory";
3961 if (Memory.BaseRegNum)
3962 OS << " base:" << RegName(Memory.BaseRegNum);
3963 if (Memory.OffsetImm)
3964 OS << " offset-imm:" << *Memory.OffsetImm;
3965 if (Memory.OffsetRegNum)
3966 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3967 << RegName(Memory.OffsetRegNum);
3968 if (Memory.ShiftType != ARM_AM::no_shift) {
3969 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3970 OS << " shift-imm:" << Memory.ShiftImm;
3971 }
3972 if (Memory.Alignment)
3973 OS << " alignment:" << Memory.Alignment;
3974 OS << ">";
3975 break;
3976 case k_PostIndexRegister:
3977 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3978 << RegName(PostIdxReg.RegNum);
3979 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3980 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3981 << PostIdxReg.ShiftImm;
3982 OS << ">";
3983 break;
3984 case k_ProcIFlags: {
3985 OS << "<ARM_PROC::";
3986 unsigned IFlags = getProcIFlags();
3987 for (int i=2; i >= 0; --i)
3988 if (IFlags & (1 << i))
3989 OS << ARM_PROC::IFlagsToString(1 << i);
3990 OS << ">";
3991 break;
3992 }
3993 case k_Register:
3994 OS << "<register " << RegName(getReg()) << ">";
3995 break;
3996 case k_ShifterImmediate:
3997 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3998 << " #" << ShifterImm.Imm << ">";
3999 break;
4000 case k_ShiftedRegister:
4001 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
4002 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
4003 << RegName(RegShiftedReg.ShiftReg) << ">";
4004 break;
4005 case k_ShiftedImmediate:
4006 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4007 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
4008 << RegShiftedImm.ShiftImm << ">";
4009 break;
4010 case k_RotateImmediate:
4011 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4012 break;
4013 case k_ModifiedImmediate:
4014 OS << "<mod_imm #" << ModImm.Bits << ", #"
4015 << ModImm.Rot << ")>";
4016 break;
4017 case k_ConstantPoolImmediate:
4018 OS << "<constant_pool_imm #" << *getConstantPoolImm();
4019 break;
4020 case k_BitfieldDescriptor:
4021 OS << "<bitfield " << "lsb: " << Bitfield.LSB
4022 << ", width: " << Bitfield.Width << ">";
4023 break;
4024 case k_RegisterList:
4025 case k_RegisterListWithAPSR:
4026 case k_DPRRegisterList:
4027 case k_SPRRegisterList:
4028 case k_FPSRegisterListWithVPR:
4029 case k_FPDRegisterListWithVPR: {
4030 OS << "<register_list ";
4031
4032 const SmallVectorImpl<unsigned> &RegList = getRegList();
4033 for (SmallVectorImpl<unsigned>::const_iterator
4034 I = RegList.begin(), E = RegList.end(); I != E; ) {
4035 OS << RegName(*I);
4036 if (++I < E) OS << ", ";
4037 }
4038
4039 OS << ">";
4040 break;
4041 }
4042 case k_VectorList:
4043 OS << "<vector_list " << VectorList.Count << " * "
4044 << RegName(VectorList.RegNum) << ">";
4045 break;
4046 case k_VectorListAllLanes:
4047 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4048 << RegName(VectorList.RegNum) << ">";
4049 break;
4050 case k_VectorListIndexed:
4051 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4052 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4053 break;
4054 case k_Token:
4055 OS << "'" << getToken() << "'";
4056 break;
4057 case k_VectorIndex:
4058 OS << "<vectorindex " << getVectorIndex() << ">";
4059 break;
4060 }
4061}
4062
4063/// @name Auto-generated Match Functions
4064/// {
4065
4066static unsigned MatchRegisterName(StringRef Name);
4067
4068/// }
4069
4070bool ARMAsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
4071 SMLoc &EndLoc) {
4072 const AsmToken &Tok = getParser().getTok();
4073 StartLoc = Tok.getLoc();
4074 EndLoc = Tok.getEndLoc();
4075 RegNo = tryParseRegister();
4076
4077 return (RegNo == (unsigned)-1);
4078}
4079
4080OperandMatchResultTy ARMAsmParser::tryParseRegister(MCRegister &RegNo,
4081 SMLoc &StartLoc,
4082 SMLoc &EndLoc) {
4083 if (parseRegister(RegNo, StartLoc, EndLoc))
4084 return MatchOperand_NoMatch;
4085 return MatchOperand_Success;
4086}
4087
4088/// Try to parse a register name. The token must be an Identifier when called,
4089/// and if it is a register name the token is eaten and the register number is
4090/// returned. Otherwise return -1.
4091int ARMAsmParser::tryParseRegister() {
4092 MCAsmParser &Parser = getParser();
4093 const AsmToken &Tok = Parser.getTok();
4094 if (Tok.isNot(AsmToken::Identifier)) return -1;
4095
4096 std::string lowerCase = Tok.getString().lower();
4097 unsigned RegNum = MatchRegisterName(lowerCase);
4098 if (!RegNum) {
4099 RegNum = StringSwitch<unsigned>(lowerCase)
4100 .Case("r13", ARM::SP)
4101 .Case("r14", ARM::LR)
4102 .Case("r15", ARM::PC)
4103 .Case("ip", ARM::R12)
4104 // Additional register name aliases for 'gas' compatibility.
4105 .Case("a1", ARM::R0)
4106 .Case("a2", ARM::R1)
4107 .Case("a3", ARM::R2)
4108 .Case("a4", ARM::R3)
4109 .Case("v1", ARM::R4)
4110 .Case("v2", ARM::R5)
4111 .Case("v3", ARM::R6)
4112 .Case("v4", ARM::R7)
4113 .Case("v5", ARM::R8)
4114 .Case("v6", ARM::R9)
4115 .Case("v7", ARM::R10)
4116 .Case("v8", ARM::R11)
4117 .Case("sb", ARM::R9)
4118 .Case("sl", ARM::R10)
4119 .Case("fp", ARM::R11)
4120 .Default(0);
4121 }
4122 if (!RegNum) {
4123 // Check for aliases registered via .req. Canonicalize to lower case.
4124 // That's more consistent since register names are case insensitive, and
4125 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4126 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4127 // If no match, return failure.
4128 if (Entry == RegisterReqs.end())
4129 return -1;
4130 Parser.Lex(); // Eat identifier token.
4131 return Entry->getValue();
4132 }
4133
4134 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4135 if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4136 return -1;
4137
4138 Parser.Lex(); // Eat identifier token.
4139
4140 return RegNum;
4141}
4142
4143// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4144// If a recoverable error occurs, return 1. If an irrecoverable error
4145// occurs, return -1. An irrecoverable error is one where tokens have been
4146// consumed in the process of trying to parse the shifter (i.e., when it is
4147// indeed a shifter operand, but malformed).
4148int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4149 MCAsmParser &Parser = getParser();
4150 SMLoc S = Parser.getTok().getLoc();
4151 const AsmToken &Tok = Parser.getTok();
4152 if (Tok.isNot(AsmToken::Identifier))
4153 return -1;
4154
4155 std::string lowerCase = Tok.getString().lower();
4156 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
4157 .Case("asl", ARM_AM::lsl)
4158 .Case("lsl", ARM_AM::lsl)
4159 .Case("lsr", ARM_AM::lsr)
4160 .Case("asr", ARM_AM::asr)
4161 .Case("ror", ARM_AM::ror)
4162 .Case("rrx", ARM_AM::rrx)
4163 .Default(ARM_AM::no_shift);
4164
4165 if (ShiftTy == ARM_AM::no_shift)
4166 return 1;
4167
4168 Parser.Lex(); // Eat the operator.
4169
4170 // The source register for the shift has already been added to the
4171 // operand list, so we need to pop it off and combine it into the shifted
4172 // register operand instead.
4173 std::unique_ptr<ARMOperand> PrevOp(
4174 (ARMOperand *)Operands.pop_back_val().release());
4175 if (!PrevOp->isReg())
4176 return Error(PrevOp->getStartLoc(), "shift must be of a register");
4177 int SrcReg = PrevOp->getReg();
4178
4179 SMLoc EndLoc;
4180 int64_t Imm = 0;
4181 int ShiftReg = 0;
4182 if (ShiftTy == ARM_AM::rrx) {
4183 // RRX Doesn't have an explicit shift amount. The encoder expects
4184 // the shift register to be the same as the source register. Seems odd,
4185 // but OK.
4186 ShiftReg = SrcReg;
4187 } else {
4188 // Figure out if this is shifted by a constant or a register (for non-RRX).
4189 if (Parser.getTok().is(AsmToken::Hash) ||
4190 Parser.getTok().is(AsmToken::Dollar)) {
4191 Parser.Lex(); // Eat hash.
4192 SMLoc ImmLoc = Parser.getTok().getLoc();
4193 const MCExpr *ShiftExpr = nullptr;
4194 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4195 Error(ImmLoc, "invalid immediate shift value");
4196 return -1;
4197 }
4198 // The expression must be evaluatable as an immediate.
4199 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4200 if (!CE) {
4201 Error(ImmLoc, "invalid immediate shift value");
4202 return -1;
4203 }
4204 // Range check the immediate.
4205 // lsl, ror: 0 <= imm <= 31
4206 // lsr, asr: 0 <= imm <= 32
4207 Imm = CE->getValue();
4208 if (Imm < 0 ||
4209 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4210 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4211 Error(ImmLoc, "immediate shift value out of range");
4212 return -1;
4213 }
4214 // shift by zero is a nop. Always send it through as lsl.
4215 // ('as' compatibility)
4216 if (Imm == 0)
4217 ShiftTy = ARM_AM::lsl;
4218 } else if (Parser.getTok().is(AsmToken::Identifier)) {
4219 SMLoc L = Parser.getTok().getLoc();
4220 EndLoc = Parser.getTok().getEndLoc();
4221 ShiftReg = tryParseRegister();
4222 if (ShiftReg == -1) {
4223 Error(L, "expected immediate or register in shift operand");
4224 return -1;
4225 }
4226 } else {
4227 Error(Parser.getTok().getLoc(),
4228 "expected immediate or register in shift operand");
4229 return -1;
4230 }
4231 }
4232
4233 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4234 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4235 ShiftReg, Imm,
4236 S, EndLoc));
4237 else
4238 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4239 S, EndLoc));
4240
4241 return 0;
4242}
4243
4244/// Try to parse a register name. The token must be an Identifier when called.
4245/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4246/// if there is a "writeback". 'true' if it's not a register.
4247///
4248/// TODO this is likely to change to allow different register types and or to
4249/// parse for a specific register type.
4250bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4251 MCAsmParser &Parser = getParser();
4252 SMLoc RegStartLoc = Parser.getTok().getLoc();
4253 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4254 int RegNo = tryParseRegister();
4255 if (RegNo == -1)
4256 return true;
4257
4258 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4259
4260 const AsmToken &ExclaimTok = Parser.getTok();
4261 if (ExclaimTok.is(AsmToken::Exclaim)) {
4262 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4263 ExclaimTok.getLoc()));
4264 Parser.Lex(); // Eat exclaim token
4265 return false;
4266 }
4267
4268 // Also check for an index operand. This is only legal for vector registers,
4269 // but that'll get caught OK in operand matching, so we don't need to
4270 // explicitly filter everything else out here.
4271 if (Parser.getTok().is(AsmToken::LBrac)) {
4272 SMLoc SIdx = Parser.getTok().getLoc();
4273 Parser.Lex(); // Eat left bracket token.
4274
4275 const MCExpr *ImmVal;
4276 if (getParser().parseExpression(ImmVal))
4277 return true;
4278 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4279 if (!MCE)
4280 return TokError("immediate value expected for vector index");
4281
4282 if (Parser.getTok().isNot(AsmToken::RBrac))
4283 return Error(Parser.getTok().getLoc(), "']' expected");
4284
4285 SMLoc E = Parser.getTok().getEndLoc();
4286 Parser.Lex(); // Eat right bracket token.
4287
4288 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4289 SIdx, E,
4290 getContext()));
4291 }
4292
4293 return false;
4294}
4295
4296/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4297/// instruction with a symbolic operand name.
4298/// We accept "crN" syntax for GAS compatibility.
4299/// <operand-name> ::= <prefix><number>
4300/// If CoprocOp is 'c', then:
4301/// <prefix> ::= c | cr
4302/// If CoprocOp is 'p', then :
4303/// <prefix> ::= p
4304/// <number> ::= integer in range [0, 15]
4305static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4306 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4307 // but efficient.
4308 if (Name.size() < 2 || Name[0] != CoprocOp)
4309 return -1;
4310 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4311
4312 switch (Name.size()) {
4313 default: return -1;
4314 case 1:
4315 switch (Name[0]) {
4316 default: return -1;
4317 case '0': return 0;
4318 case '1': return 1;
4319 case '2': return 2;
4320 case '3': return 3;
4321 case '4': return 4;
4322 case '5': return 5;
4323 case '6': return 6;
4324 case '7': return 7;
4325 case '8': return 8;
4326 case '9': return 9;
4327 }
4328 case 2:
4329 if (Name[0] != '1')
4330 return -1;
4331 switch (Name[1]) {
4332 default: return -1;
4333 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4334 // However, old cores (v5/v6) did use them in that way.
4335 case '0': return 10;
4336 case '1': return 11;
4337 case '2': return 12;
4338 case '3': return 13;
4339 case '4': return 14;
4340 case '5': return 15;
4341 }
4342 }
4343}
4344
4345/// parseITCondCode - Try to parse a condition code for an IT instruction.
4346OperandMatchResultTy
4347ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4348 MCAsmParser &Parser = getParser();
4349 SMLoc S = Parser.getTok().getLoc();
4350 const AsmToken &Tok = Parser.getTok();
4351 if (!Tok.is(AsmToken::Identifier))
4352 return MatchOperand_NoMatch;
4353 unsigned CC = ARMCondCodeFromString(Tok.getString());
4354 if (CC == ~0U)
4355 return MatchOperand_NoMatch;
4356 Parser.Lex(); // Eat the token.
4357
4358 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4359
4360 return MatchOperand_Success;
4361}
4362
4363/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4364/// token must be an Identifier when called, and if it is a coprocessor
4365/// number, the token is eaten and the operand is added to the operand list.
4366OperandMatchResultTy
4367ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4368 MCAsmParser &Parser = getParser();
4369 SMLoc S = Parser.getTok().getLoc();
4370 const AsmToken &Tok = Parser.getTok();
4371 if (Tok.isNot(AsmToken::Identifier))
4372 return MatchOperand_NoMatch;
4373
4374 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4375 if (Num == -1)
4376 return MatchOperand_NoMatch;
4377 if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4378 return MatchOperand_NoMatch;
4379
4380 Parser.Lex(); // Eat identifier token.
4381 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4382 return MatchOperand_Success;
4383}
4384
4385/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4386/// token must be an Identifier when called, and if it is a coprocessor
4387/// number, the token is eaten and the operand is added to the operand list.
4388OperandMatchResultTy
4389ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4390 MCAsmParser &Parser = getParser();
4391 SMLoc S = Parser.getTok().getLoc();
4392 const AsmToken &Tok = Parser.getTok();
4393 if (Tok.isNot(AsmToken::Identifier))
4394 return MatchOperand_NoMatch;
4395
4396 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4397 if (Reg == -1)
4398 return MatchOperand_NoMatch;
4399
4400 Parser.Lex(); // Eat identifier token.
4401 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4402 return MatchOperand_Success;
4403}
4404
4405/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4406/// coproc_option : '{' imm0_255 '}'
4407OperandMatchResultTy
4408ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4409 MCAsmParser &Parser = getParser();
4410 SMLoc S = Parser.getTok().getLoc();
4411
4412 // If this isn't a '{', this isn't a coprocessor immediate operand.
4413 if (Parser.getTok().isNot(AsmToken::LCurly))
4414 return MatchOperand_NoMatch;
4415 Parser.Lex(); // Eat the '{'
4416
4417 const MCExpr *Expr;
4418 SMLoc Loc = Parser.getTok().getLoc();
4419 if (getParser().parseExpression(Expr)) {
4420 Error(Loc, "illegal expression");
4421 return MatchOperand_ParseFail;
4422 }
4423 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4424 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
4425 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
4426 return MatchOperand_ParseFail;
4427 }
4428 int Val = CE->getValue();
4429
4430 // Check for and consume the closing '}'
4431 if (Parser.getTok().isNot(AsmToken::RCurly))
4432 return MatchOperand_ParseFail;
4433 SMLoc E = Parser.getTok().getEndLoc();
4434 Parser.Lex(); // Eat the '}'
4435
4436 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4437 return MatchOperand_Success;
4438}
4439
4440// For register list parsing, we need to map from raw GPR register numbering
4441// to the enumeration values. The enumeration values aren't sorted by
4442// register number due to our using "sp", "lr" and "pc" as canonical names.
4443static unsigned getNextRegister(unsigned Reg) {
4444 // If this is a GPR, we need to do it manually, otherwise we can rely
4445 // on the sort ordering of the enumeration since the other reg-classes
4446 // are sane.
4447 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4448 return Reg + 1;
4449 switch(Reg) {
4450 default: llvm_unreachable("Invalid GPR number!")::llvm::llvm_unreachable_internal("Invalid GPR number!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 4450)
;
4451 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4452 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4453 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4454 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4455 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4456 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4457 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4458 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4459 }
4460}
4461
4462// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4463// success, or false, if duplicate encoding found.
4464static bool
4465insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4466 unsigned Enc, unsigned Reg) {
4467 Regs.emplace_back(Enc, Reg);
4468 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4469 if (J->first == Enc) {
4470 Regs.erase(J.base());
4471 return false;
4472 }
4473 if (J->first < Enc)
4474 break;
4475 std::swap(*I, *J);
4476 }
4477 return true;
4478}
4479
4480/// Parse a register list.
4481bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4482 bool AllowRAAC) {
4483 MCAsmParser &Parser = getParser();
4484 if (Parser.getTok().isNot(AsmToken::LCurly))
4485 return TokError("Token is not a Left Curly Brace");
4486 SMLoc S = Parser.getTok().getLoc();
4487 Parser.Lex(); // Eat '{' token.
4488 SMLoc RegLoc = Parser.getTok().getLoc();
4489
4490 // Check the first register in the list to see what register class
4491 // this is a list of.
4492 int Reg = tryParseRegister();
4493 if (Reg == -1)
4494 return Error(RegLoc, "register expected");
4495 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4496 return Error(RegLoc, "pseudo-register not allowed");
4497 // The reglist instructions have at most 16 registers, so reserve
4498 // space for that many.
4499 int EReg = 0;
4500 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4501
4502 // Allow Q regs and just interpret them as the two D sub-registers.
4503 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4504 Reg = getDRegFromQReg(Reg);
4505 EReg = MRI->getEncodingValue(Reg);
4506 Registers.emplace_back(EReg, Reg);
4507 ++Reg;
4508 }
4509 const MCRegisterClass *RC;
4510 if (Reg == ARM::RA_AUTH_CODE ||
4511 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4512 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4513 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4514 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4515 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4516 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4517 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4518 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4519 else
4520 return Error(RegLoc, "invalid register in register list");
4521
4522 // Store the register.
4523 EReg = MRI->getEncodingValue(Reg);
4524 Registers.emplace_back(EReg, Reg);
4525
4526 // This starts immediately after the first register token in the list,
4527 // so we can see either a comma or a minus (range separator) as a legal
4528 // next token.
4529 while (Parser.getTok().is(AsmToken::Comma) ||
4530 Parser.getTok().is(AsmToken::Minus)) {
4531 if (Parser.getTok().is(AsmToken::Minus)) {
4532 if (Reg == ARM::RA_AUTH_CODE)
4533 return Error(RegLoc, "pseudo-register not allowed");
4534 Parser.Lex(); // Eat the minus.
4535 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4536 int EndReg = tryParseRegister();
4537 if (EndReg == -1)
4538 return Error(AfterMinusLoc, "register expected");
4539 if (EndReg == ARM::RA_AUTH_CODE)
4540 return Error(AfterMinusLoc, "pseudo-register not allowed");
4541 // Allow Q regs and just interpret them as the two D sub-registers.
4542 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4543 EndReg = getDRegFromQReg(EndReg) + 1;
4544 // If the register is the same as the start reg, there's nothing
4545 // more to do.
4546 if (Reg == EndReg)
4547 continue;
4548 // The register must be in the same register class as the first.
4549 if (!RC->contains(Reg))
4550 return Error(AfterMinusLoc, "invalid register in register list");
4551 // Ranges must go from low to high.
4552 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4553 return Error(AfterMinusLoc, "bad range in register list");
4554
4555 // Add all the registers in the range to the register list.
4556 while (Reg != EndReg) {
4557 Reg = getNextRegister(Reg);
4558 EReg = MRI->getEncodingValue(Reg);
4559 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4560 Warning(AfterMinusLoc, StringRef("duplicated register (") +
4561 ARMInstPrinter::getRegisterName(Reg) +
4562 ") in register list");
4563 }
4564 }
4565 continue;
4566 }
4567 Parser.Lex(); // Eat the comma.
4568 RegLoc = Parser.getTok().getLoc();
4569 int OldReg = Reg;
4570 const AsmToken RegTok = Parser.getTok();
4571 Reg = tryParseRegister();
4572 if (Reg == -1)
4573 return Error(RegLoc, "register expected");
4574 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4575 return Error(RegLoc, "pseudo-register not allowed");
4576 // Allow Q regs and just interpret them as the two D sub-registers.
4577 bool isQReg = false;
4578 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4579 Reg = getDRegFromQReg(Reg);
4580 isQReg = true;
4581 }
4582 if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4583 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4584 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4585 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4586 // subset of GPRRegClassId except it contains APSR as well.
4587 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4588 }
4589 if (Reg == ARM::VPR &&
4590 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4591 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4592 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4593 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4594 EReg = MRI->getEncodingValue(Reg);
4595 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4596 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4597 ") in register list");
4598 }
4599 continue;
4600 }
4601 // The register must be in the same register class as the first.
4602 if ((Reg == ARM::RA_AUTH_CODE &&
4603 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4604 (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4605 return Error(RegLoc, "invalid register in register list");
4606 // In most cases, the list must be monotonically increasing. An
4607 // exception is CLRM, which is order-independent anyway, so
4608 // there's no potential for confusion if you write clrm {r2,r1}
4609 // instead of clrm {r1,r2}.
4610 if (EnforceOrder &&
4611 MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4612 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4613 Warning(RegLoc, "register list not in ascending order");
4614 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4615 return Error(RegLoc, "register list not in ascending order");
4616 }
4617 // VFP register lists must also be contiguous.
4618 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4619 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4620 Reg != OldReg + 1)
4621 return Error(RegLoc, "non-contiguous register range");
4622 EReg = MRI->getEncodingValue(Reg);
4623 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4624 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4625 ") in register list");
4626 }
4627 if (isQReg) {
4628 EReg = MRI->getEncodingValue(++Reg);
4629 Registers.emplace_back(EReg, Reg);
4630 }
4631 }
4632
4633 if (Parser.getTok().isNot(AsmToken::RCurly))
4634 return Error(Parser.getTok().getLoc(), "'}' expected");
4635 SMLoc E = Parser.getTok().getEndLoc();
4636 Parser.Lex(); // Eat '}' token.
4637
4638 // Push the register list operand.
4639 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4640
4641 // The ARM system instruction variants for LDM/STM have a '^' token here.
4642 if (Parser.getTok().is(AsmToken::Caret)) {
4643 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4644 Parser.Lex(); // Eat '^' token.
4645 }
4646
4647 return false;
4648}
4649
4650// Helper function to parse the lane index for vector lists.
4651OperandMatchResultTy ARMAsmParser::
4652parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4653 MCAsmParser &Parser = getParser();
4654 Index = 0; // Always return a defined index value.
4655 if (Parser.getTok().is(AsmToken::LBrac)) {
4656 Parser.Lex(); // Eat the '['.
4657 if (Parser.getTok().is(AsmToken::RBrac)) {
4658 // "Dn[]" is the 'all lanes' syntax.
4659 LaneKind = AllLanes;
4660 EndLoc = Parser.getTok().getEndLoc();
4661 Parser.Lex(); // Eat the ']'.
4662 return MatchOperand_Success;
4663 }
4664
4665 // There's an optional '#' token here. Normally there wouldn't be, but
4666 // inline assemble puts one in, and it's friendly to accept that.
4667 if (Parser.getTok().is(AsmToken::Hash))
4668 Parser.Lex(); // Eat '#' or '$'.
4669
4670 const MCExpr *LaneIndex;
4671 SMLoc Loc = Parser.getTok().getLoc();
4672 if (getParser().parseExpression(LaneIndex)) {
4673 Error(Loc, "illegal expression");
4674 return MatchOperand_ParseFail;
4675 }
4676 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4677 if (!CE) {
4678 Error(Loc, "lane index must be empty or an integer");
4679 return MatchOperand_ParseFail;
4680 }
4681 if (Parser.getTok().isNot(AsmToken::RBrac)) {
4682 Error(Parser.getTok().getLoc(), "']' expected");
4683 return MatchOperand_ParseFail;
4684 }
4685 EndLoc = Parser.getTok().getEndLoc();
4686 Parser.Lex(); // Eat the ']'.
4687 int64_t Val = CE->getValue();
4688
4689 // FIXME: Make this range check context sensitive for .8, .16, .32.
4690 if (Val < 0 || Val > 7) {
4691 Error(Parser.getTok().getLoc(), "lane index out of range");
4692 return MatchOperand_ParseFail;
4693 }
4694 Index = Val;
4695 LaneKind = IndexedLane;
4696 return MatchOperand_Success;
4697 }
4698 LaneKind = NoLanes;
4699 return MatchOperand_Success;
4700}
4701
4702// parse a vector register list
4703OperandMatchResultTy
4704ARMAsmParser::parseVectorList(OperandVector &Operands) {
4705 MCAsmParser &Parser = getParser();
4706 VectorLaneTy LaneKind;
4707 unsigned LaneIndex;
4708 SMLoc S = Parser.getTok().getLoc();
4709 // As an extension (to match gas), support a plain D register or Q register
4710 // (without encosing curly braces) as a single or double entry list,
4711 // respectively.
4712 if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4713 SMLoc E = Parser.getTok().getEndLoc();
4714 int Reg = tryParseRegister();
4715 if (Reg == -1)
4716 return MatchOperand_NoMatch;
4717 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4718 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4719 if (Res != MatchOperand_Success)
4720 return Res;
4721 switch (LaneKind) {
4722 case NoLanes:
4723 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4724 break;
4725 case AllLanes:
4726 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4727 S, E));
4728 break;
4729 case IndexedLane:
4730 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4731 LaneIndex,
4732 false, S, E));
4733 break;
4734 }
4735 return MatchOperand_Success;
4736 }
4737 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4738 Reg = getDRegFromQReg(Reg);
4739 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4740 if (Res != MatchOperand_Success)
4741 return Res;
4742 switch (LaneKind) {
4743 case NoLanes:
4744 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4745 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4746 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4747 break;
4748 case AllLanes:
4749 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4750 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4751 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4752 S, E));
4753 break;
4754 case IndexedLane:
4755 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4756 LaneIndex,
4757 false, S, E));
4758 break;
4759 }
4760 return MatchOperand_Success;
4761 }
4762 Error(S, "vector register expected");
4763 return MatchOperand_ParseFail;
4764 }
4765
4766 if (Parser.getTok().isNot(AsmToken::LCurly))
4767 return MatchOperand_NoMatch;
4768
4769 Parser.Lex(); // Eat '{' token.
4770 SMLoc RegLoc = Parser.getTok().getLoc();
4771
4772 int Reg = tryParseRegister();
4773 if (Reg == -1) {
4774 Error(RegLoc, "register expected");
4775 return MatchOperand_ParseFail;
4776 }
4777 unsigned Count = 1;
4778 int Spacing = 0;
4779 unsigned FirstReg = Reg;
4780
4781 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4782 Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
4783 return MatchOperand_ParseFail;
4784 }
4785 // The list is of D registers, but we also allow Q regs and just interpret
4786 // them as the two D sub-registers.
4787 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4788 FirstReg = Reg = getDRegFromQReg(Reg);
4789 Spacing = 1; // double-spacing requires explicit D registers, otherwise
4790 // it's ambiguous with four-register single spaced.
4791 ++Reg;
4792 ++Count;
4793 }
4794
4795 SMLoc E;
4796 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4797 return MatchOperand_ParseFail;
4798
4799 while (Parser.getTok().is(AsmToken::Comma) ||
4800 Parser.getTok().is(AsmToken::Minus)) {
4801 if (Parser.getTok().is(AsmToken::Minus)) {
4802 if (!Spacing)
4803 Spacing = 1; // Register range implies a single spaced list.
4804 else if (Spacing == 2) {
4805 Error(Parser.getTok().getLoc(),
4806 "sequential registers in double spaced list");
4807 return MatchOperand_ParseFail;
4808 }
4809 Parser.Lex(); // Eat the minus.
4810 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4811 int EndReg = tryParseRegister();
4812 if (EndReg == -1) {
4813 Error(AfterMinusLoc, "register expected");
4814 return MatchOperand_ParseFail;
4815 }
4816 // Allow Q regs and just interpret them as the two D sub-registers.
4817 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4818 EndReg = getDRegFromQReg(EndReg) + 1;
4819 // If the register is the same as the start reg, there's nothing
4820 // more to do.
4821 if (Reg == EndReg)
4822 continue;
4823 // The register must be in the same register class as the first.
4824 if ((hasMVE() &&
4825 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4826 (!hasMVE() &&
4827 !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
4828 Error(AfterMinusLoc, "invalid register in register list");
4829 return MatchOperand_ParseFail;
4830 }
4831 // Ranges must go from low to high.
4832 if (Reg > EndReg) {
4833 Error(AfterMinusLoc, "bad range in register list");
4834 return MatchOperand_ParseFail;
4835 }
4836 // Parse the lane specifier if present.
4837 VectorLaneTy NextLaneKind;
4838 unsigned NextLaneIndex;
4839 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4840 MatchOperand_Success)
4841 return MatchOperand_ParseFail;
4842 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4843 Error(AfterMinusLoc, "mismatched lane index in register list");
4844 return MatchOperand_ParseFail;
4845 }
4846
4847 // Add all the registers in the range to the register list.
4848 Count += EndReg - Reg;
4849 Reg = EndReg;
4850 continue;
4851 }
4852 Parser.Lex(); // Eat the comma.
4853 RegLoc = Parser.getTok().getLoc();
4854 int OldReg = Reg;
4855 Reg = tryParseRegister();
4856 if (Reg == -1) {
4857 Error(RegLoc, "register expected");
4858 return MatchOperand_ParseFail;
4859 }
4860
4861 if (hasMVE()) {
4862 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4863 Error(RegLoc, "vector register in range Q0-Q7 expected");
4864 return MatchOperand_ParseFail;
4865 }
4866 Spacing = 1;
4867 }
4868 // vector register lists must be contiguous.
4869 // It's OK to use the enumeration values directly here rather, as the
4870 // VFP register classes have the enum sorted properly.
4871 //
4872 // The list is of D registers, but we also allow Q regs and just interpret
4873 // them as the two D sub-registers.
4874 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4875 if (!Spacing)
4876 Spacing = 1; // Register range implies a single spaced list.
4877 else if (Spacing == 2) {
4878 Error(RegLoc,
4879 "invalid register in double-spaced list (must be 'D' register')");
4880 return MatchOperand_ParseFail;
4881 }
4882 Reg = getDRegFromQReg(Reg);
4883 if (Reg != OldReg + 1) {
4884 Error(RegLoc, "non-contiguous register range");
4885 return MatchOperand_ParseFail;
4886 }
4887 ++Reg;
4888 Count += 2;
4889 // Parse the lane specifier if present.
4890 VectorLaneTy NextLaneKind;
4891 unsigned NextLaneIndex;
4892 SMLoc LaneLoc = Parser.getTok().getLoc();
4893 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4894 MatchOperand_Success)
4895 return MatchOperand_ParseFail;
4896 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4897 Error(LaneLoc, "mismatched lane index in register list");
4898 return MatchOperand_ParseFail;
4899 }
4900 continue;
4901 }
4902 // Normal D register.
4903 // Figure out the register spacing (single or double) of the list if
4904 // we don't know it already.
4905 if (!Spacing)
4906 Spacing = 1 + (Reg == OldReg + 2);
4907
4908 // Just check that it's contiguous and keep going.
4909 if (Reg != OldReg + Spacing) {
4910 Error(RegLoc, "non-contiguous register range");
4911 return MatchOperand_ParseFail;
4912 }
4913 ++Count;
4914 // Parse the lane specifier if present.
4915 VectorLaneTy NextLaneKind;
4916 unsigned NextLaneIndex;
4917 SMLoc EndLoc = Parser.getTok().getLoc();
4918 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4919 return MatchOperand_ParseFail;
4920 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4921 Error(EndLoc, "mismatched lane index in register list");
4922 return MatchOperand_ParseFail;
4923 }
4924 }
4925
4926 if (Parser.getTok().isNot(AsmToken::RCurly)) {
4927 Error(Parser.getTok().getLoc(), "'}' expected");
4928 return MatchOperand_ParseFail;
4929 }
4930 E = Parser.getTok().getEndLoc();
4931 Parser.Lex(); // Eat '}' token.
4932
4933 switch (LaneKind) {
4934 case NoLanes:
4935 case AllLanes: {
4936 // Two-register operands have been converted to the
4937 // composite register classes.
4938 if (Count == 2 && !hasMVE()) {
4939 const MCRegisterClass *RC = (Spacing == 1) ?
4940 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4941 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4942 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4943 }
4944 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4945 ARMOperand::CreateVectorListAllLanes);
4946 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4947 break;
4948 }
4949 case IndexedLane:
4950 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4951 LaneIndex,
4952 (Spacing == 2),
4953 S, E));
4954 break;
4955 }
4956 return MatchOperand_Success;
4957}
4958
4959/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4960OperandMatchResultTy
4961ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4962 MCAsmParser &Parser = getParser();
4963 SMLoc S = Parser.getTok().getLoc();
4964 const AsmToken &Tok = Parser.getTok();
4965 unsigned Opt;
4966
4967 if (Tok.is(AsmToken::Identifier)) {
4968 StringRef OptStr = Tok.getString();
4969
4970 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4971 .Case("sy", ARM_MB::SY)
4972 .Case("st", ARM_MB::ST)
4973 .Case("ld", ARM_MB::LD)
4974 .Case("sh", ARM_MB::ISH)
4975 .Case("ish", ARM_MB::ISH)
4976 .Case("shst", ARM_MB::ISHST)
4977 .Case("ishst", ARM_MB::ISHST)
4978 .Case("ishld", ARM_MB::ISHLD)
4979 .Case("nsh", ARM_MB::NSH)
4980 .Case("un", ARM_MB::NSH)
4981 .Case("nshst", ARM_MB::NSHST)
4982 .Case("nshld", ARM_MB::NSHLD)
4983 .Case("unst", ARM_MB::NSHST)
4984 .Case("osh", ARM_MB::OSH)
4985 .Case("oshst", ARM_MB::OSHST)
4986 .Case("oshld", ARM_MB::OSHLD)
4987 .Default(~0U);
4988
4989 // ishld, oshld, nshld and ld are only available from ARMv8.
4990 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4991 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4992 Opt = ~0U;
4993
4994 if (Opt == ~0U)
4995 return MatchOperand_NoMatch;
4996
4997 Parser.Lex(); // Eat identifier token.
4998 } else if (Tok.is(AsmToken::Hash) ||
4999 Tok.is(AsmToken::Dollar) ||
5000 Tok.is(AsmToken::Integer)) {
5001 if (Parser.getTok().isNot(AsmToken::Integer))
5002 Parser.Lex(); // Eat '#' or '$'.
5003 SMLoc Loc = Parser.getTok().getLoc();
5004
5005 const MCExpr *MemBarrierID;
5006 if (getParser().parseExpression(MemBarrierID)) {
5007 Error(Loc, "illegal expression");
5008 return MatchOperand_ParseFail;
5009 }
5010
5011 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
5012 if (!CE) {
5013 Error(Loc, "constant expression expected");
5014 return MatchOperand_ParseFail;
5015 }
5016
5017 int Val = CE->getValue();
5018 if (Val & ~0xf) {
5019 Error(Loc, "immediate value out of range");
5020 return MatchOperand_ParseFail;
5021 }
5022
5023 Opt = ARM_MB::RESERVED_0 + Val;
5024 } else
5025 return MatchOperand_ParseFail;
5026
5027 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
5028 return MatchOperand_Success;
5029}
5030
5031OperandMatchResultTy
5032ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5033 MCAsmParser &Parser = getParser();
5034 SMLoc S = Parser.getTok().getLoc();
5035 const AsmToken &Tok = Parser.getTok();
5036
5037 if (Tok.isNot(AsmToken::Identifier))
5038 return MatchOperand_NoMatch;
5039
5040 if (!Tok.getString().equals_insensitive("csync"))
5041 return MatchOperand_NoMatch;
5042
5043 Parser.Lex(); // Eat identifier token.
5044
5045 Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
5046 return MatchOperand_Success;
5047}
5048
5049/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
5050OperandMatchResultTy
5051ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
5052 MCAsmParser &Parser = getParser();
5053 SMLoc S = Parser.getTok().getLoc();
5054 const AsmToken &Tok = Parser.getTok();
5055 unsigned Opt;
5056
5057 if (Tok.is(AsmToken::Identifier)) {
5058 StringRef OptStr = Tok.getString();
5059
5060 if (OptStr.equals_insensitive("sy"))
5061 Opt = ARM_ISB::SY;
5062 else
5063 return MatchOperand_NoMatch;
5064
5065 Parser.Lex(); // Eat identifier token.
5066 } else if (Tok.is(AsmToken::Hash) ||
5067 Tok.is(AsmToken::Dollar) ||
5068 Tok.is(AsmToken::Integer)) {
5069 if (Parser.getTok().isNot(AsmToken::Integer))
5070 Parser.Lex(); // Eat '#' or '$'.
5071 SMLoc Loc = Parser.getTok().getLoc();
5072
5073 const MCExpr *ISBarrierID;
5074 if (getParser().parseExpression(ISBarrierID)) {
5075 Error(Loc, "illegal expression");
5076 return MatchOperand_ParseFail;
5077 }
5078
5079 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5080 if (!CE) {
5081 Error(Loc, "constant expression expected");
5082 return MatchOperand_ParseFail;
5083 }
5084
5085 int Val = CE->getValue();
5086 if (Val & ~0xf) {
5087 Error(Loc, "immediate value out of range");
5088 return MatchOperand_ParseFail;
5089 }
5090
5091 Opt = ARM_ISB::RESERVED_0 + Val;
5092 } else
5093 return MatchOperand_ParseFail;
5094
5095 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5096 (ARM_ISB::InstSyncBOpt)Opt, S));
5097 return MatchOperand_Success;
5098}
5099
5100
5101/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5102OperandMatchResultTy
5103ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5104 MCAsmParser &Parser = getParser();
5105 SMLoc S = Parser.getTok().getLoc();
5106 const AsmToken &Tok = Parser.getTok();
5107 if (!Tok.is(AsmToken::Identifier))
5108 return MatchOperand_NoMatch;
5109 StringRef IFlagsStr = Tok.getString();
5110
5111 // An iflags string of "none" is interpreted to mean that none of the AIF
5112 // bits are set. Not a terribly useful instruction, but a valid encoding.
5113 unsigned IFlags = 0;
5114 if (IFlagsStr != "none") {
5115 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5116 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5117 .Case("a", ARM_PROC::A)
5118 .Case("i", ARM_PROC::I)
5119 .Case("f", ARM_PROC::F)
5120 .Default(~0U);
5121
5122 // If some specific iflag is already set, it means that some letter is
5123 // present more than once, this is not acceptable.
5124 if (Flag == ~0U || (IFlags & Flag))
5125 return MatchOperand_NoMatch;
5126
5127 IFlags |= Flag;
5128 }
5129 }
5130
5131 Parser.Lex(); // Eat identifier token.
5132 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5133 return MatchOperand_Success;
5134}
5135
5136/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5137OperandMatchResultTy
5138ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5139 MCAsmParser &Parser = getParser();
5140 SMLoc S = Parser.getTok().getLoc();
5141 const AsmToken &Tok = Parser.getTok();
5142
5143 if (Tok.is(AsmToken::Integer)) {
5144 int64_t Val = Tok.getIntVal();
5145 if (Val > 255 || Val < 0) {
5146 return MatchOperand_NoMatch;
5147 }
5148 unsigned SYSmvalue = Val & 0xFF;
5149 Parser.Lex();
5150 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5151 return MatchOperand_Success;
5152 }
5153
5154 if (!Tok.is(AsmToken::Identifier))
5155 return MatchOperand_NoMatch;
5156 StringRef Mask = Tok.getString();
5157
5158 if (isMClass()) {
5159 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5160 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5161 return MatchOperand_NoMatch;
5162
5163 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5164
5165 Parser.Lex(); // Eat identifier token.
5166 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5167 return MatchOperand_Success;
5168 }
5169
5170 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5171 size_t Start = 0, Next = Mask.find('_');
5172 StringRef Flags = "";
5173 std::string SpecReg = Mask.slice(Start, Next).lower();
5174 if (Next != StringRef::npos)
5175 Flags = Mask.slice(Next+1, Mask.size());
5176
5177 // FlagsVal contains the complete mask:
5178 // 3-0: Mask
5179 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5180 unsigned FlagsVal = 0;
5181
5182 if (SpecReg == "apsr") {
5183 FlagsVal = StringSwitch<unsigned>(Flags)
5184 .Case("nzcvq", 0x8) // same as CPSR_f
5185 .Case("g", 0x4) // same as CPSR_s
5186 .Case("nzcvqg", 0xc) // same as CPSR_fs
5187 .Default(~0U);
5188
5189 if (FlagsVal == ~0U) {
5190 if (!Flags.empty())
5191 return MatchOperand_NoMatch;
5192 else
5193 FlagsVal = 8; // No flag
5194 }
5195 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5196 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5197 if (Flags == "all" || Flags == "")
5198 Flags = "fc";
5199 for (int i = 0, e = Flags.size(); i != e; ++i) {
5200 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5201 .Case("c", 1)
5202 .Case("x", 2)
5203 .Case("s", 4)
5204 .Case("f", 8)
5205 .Default(~0U);
5206
5207 // If some specific flag is already set, it means that some letter is
5208 // present more than once, this is not acceptable.
5209 if (Flag == ~0U || (FlagsVal & Flag))
5210 return MatchOperand_NoMatch;
5211 FlagsVal |= Flag;
5212 }
5213 } else // No match for special register.
5214 return MatchOperand_NoMatch;
5215
5216 // Special register without flags is NOT equivalent to "fc" flags.
5217 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
5218 // two lines would enable gas compatibility at the expense of breaking
5219 // round-tripping.
5220 //
5221 // if (!FlagsVal)
5222 // FlagsVal = 0x9;
5223
5224 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5225 if (SpecReg == "spsr")
5226 FlagsVal |= 16;
5227
5228 Parser.Lex(); // Eat identifier token.
5229 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5230 return MatchOperand_Success;
5231}
5232
5233/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5234/// use in the MRS/MSR instructions added to support virtualization.
5235OperandMatchResultTy
5236ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5237 MCAsmParser &Parser = getParser();
5238 SMLoc S = Parser.getTok().getLoc();
5239 const AsmToken &Tok = Parser.getTok();
5240 if (!Tok.is(AsmToken::Identifier))
5241 return MatchOperand_NoMatch;
5242 StringRef RegName = Tok.getString();
5243
5244 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5245 if (!TheReg)
5246 return MatchOperand_NoMatch;
5247 unsigned Encoding = TheReg->Encoding;
5248
5249 Parser.Lex(); // Eat identifier token.
5250 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5251 return MatchOperand_Success;
5252}
5253
5254OperandMatchResultTy
5255ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
5256 int High) {
5257 MCAsmParser &Parser = getParser();
5258 const AsmToken &Tok = Parser.getTok();
5259 if (Tok.isNot(AsmToken::Identifier)) {
5260 Error(Parser.getTok().getLoc(), Op + " operand expected.");
5261 return MatchOperand_ParseFail;
5262 }
5263 StringRef ShiftName = Tok.getString();
5264 std::string LowerOp = Op.lower();
5265 std::string UpperOp = Op.upper();
5266 if (ShiftName != LowerOp && ShiftName != UpperOp) {
5267 Error(Parser.getTok().getLoc(), Op + " operand expected.");
5268 return MatchOperand_ParseFail;
5269 }
5270 Parser.Lex(); // Eat shift type token.
5271
5272 // There must be a '#' and a shift amount.
5273 if (Parser.getTok().isNot(AsmToken::Hash) &&
5274 Parser.getTok().isNot(AsmToken::Dollar)) {
5275 Error(Parser.getTok().getLoc(), "'#' expected");
5276 return MatchOperand_ParseFail;
5277 }
5278 Parser.Lex(); // Eat hash token.
5279
5280 const MCExpr *ShiftAmount;
5281 SMLoc Loc = Parser.getTok().getLoc();
5282 SMLoc EndLoc;
5283 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5284 Error(Loc, "illegal expression");
5285 return MatchOperand_ParseFail;
5286 }
5287 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5288 if (!CE) {
5289 Error(Loc, "constant expression expected");
5290 return MatchOperand_ParseFail;
5291 }
5292 int Val = CE->getValue();
5293 if (Val < Low || Val > High) {
5294 Error(Loc, "immediate value out of range");
5295 return MatchOperand_ParseFail;
5296 }
5297
5298 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5299
5300 return MatchOperand_Success;
5301}
5302
5303OperandMatchResultTy
5304ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5305 MCAsmParser &Parser = getParser();
5306 const AsmToken &Tok = Parser.getTok();
5307 SMLoc S = Tok.getLoc();
5308 if (Tok.isNot(AsmToken::Identifier)) {
5309 Error(S, "'be' or 'le' operand expected");
5310 return MatchOperand_ParseFail;
5311 }
5312 int Val = StringSwitch<int>(Tok.getString().lower())
5313 .Case("be", 1)
5314 .Case("le", 0)
5315 .Default(-1);
5316 Parser.Lex(); // Eat the token.
5317
5318 if (Val == -1) {
5319 Error(S, "'be' or 'le' operand expected");
5320 return MatchOperand_ParseFail;
5321 }
5322 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5323 getContext()),
5324 S, Tok.getEndLoc()));
5325 return MatchOperand_Success;
5326}
5327
5328/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5329/// instructions. Legal values are:
5330/// lsl #n 'n' in [0,31]
5331/// asr #n 'n' in [1,32]
5332/// n == 32 encoded as n == 0.
5333OperandMatchResultTy
5334ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5335 MCAsmParser &Parser = getParser();
5336 const AsmToken &Tok = Parser.getTok();
5337 SMLoc S = Tok.getLoc();
5338 if (Tok.isNot(AsmToken::Identifier)) {
5339 Error(S, "shift operator 'asr' or 'lsl' expected");
5340 return MatchOperand_ParseFail;
5341 }
5342 StringRef ShiftName = Tok.getString();
5343 bool isASR;
5344 if (ShiftName == "lsl" || ShiftName == "LSL")
5345 isASR = false;
5346 else if (ShiftName == "asr" || ShiftName == "ASR")
5347 isASR = true;
5348 else {
5349 Error(S, "shift operator 'asr' or 'lsl' expected");
5350 return MatchOperand_ParseFail;
5351 }
5352 Parser.Lex(); // Eat the operator.
5353
5354 // A '#' and a shift amount.
5355 if (Parser.getTok().isNot(AsmToken::Hash) &&
5356 Parser.getTok().isNot(AsmToken::Dollar)) {
5357 Error(Parser.getTok().getLoc(), "'#' expected");
5358 return MatchOperand_ParseFail;
5359 }
5360 Parser.Lex(); // Eat hash token.
5361 SMLoc ExLoc = Parser.getTok().getLoc();
5362
5363 const MCExpr *ShiftAmount;
5364 SMLoc EndLoc;
5365 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5366 Error(ExLoc, "malformed shift expression");
5367 return MatchOperand_ParseFail;
5368 }
5369 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5370 if (!CE) {
5371 Error(ExLoc, "shift amount must be an immediate");
5372 return MatchOperand_ParseFail;
5373 }
5374
5375 int64_t Val = CE->getValue();
5376 if (isASR) {
5377 // Shift amount must be in [1,32]
5378 if (Val < 1 || Val > 32) {
5379 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5380 return MatchOperand_ParseFail;
5381 }
5382 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5383 if (isThumb() && Val == 32) {
5384 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5385 return MatchOperand_ParseFail;
5386 }
5387 if (Val == 32) Val = 0;
5388 } else {
5389 // Shift amount must be in [1,32]
5390 if (Val < 0 || Val > 31) {
5391 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5392 return MatchOperand_ParseFail;
5393 }
5394 }
5395
5396 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5397
5398 return MatchOperand_Success;
5399}
5400
5401/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5402/// of instructions. Legal values are:
5403/// ror #n 'n' in {0, 8, 16, 24}
5404OperandMatchResultTy
5405ARMAsmParser::parseRotImm(OperandVector &Operands) {
5406 MCAsmParser &Parser = getParser();
5407 const AsmToken &Tok = Parser.getTok();
5408 SMLoc S = Tok.getLoc();
5409 if (Tok.isNot(AsmToken::Identifier))
5410 return MatchOperand_NoMatch;
5411 StringRef ShiftName = Tok.getString();
5412 if (ShiftName != "ror" && ShiftName != "ROR")
5413 return MatchOperand_NoMatch;
5414 Parser.Lex(); // Eat the operator.
5415
5416 // A '#' and a rotate amount.
5417 if (Parser.getTok().isNot(AsmToken::Hash) &&
5418 Parser.getTok().isNot(AsmToken::Dollar)) {
5419 Error(Parser.getTok().getLoc(), "'#' expected");
5420 return MatchOperand_ParseFail;
5421 }
5422 Parser.Lex(); // Eat hash token.
5423 SMLoc ExLoc = Parser.getTok().getLoc();
5424
5425 const MCExpr *ShiftAmount;
5426 SMLoc EndLoc;
5427 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5428 Error(ExLoc, "malformed rotate expression");
5429 return MatchOperand_ParseFail;
5430 }
5431 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5432 if (!CE) {
5433 Error(ExLoc, "rotate amount must be an immediate");
5434 return MatchOperand_ParseFail;
5435 }
5436
5437 int64_t Val = CE->getValue();
5438 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5439 // normally, zero is represented in asm by omitting the rotate operand
5440 // entirely.
5441 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
5442 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5443 return MatchOperand_ParseFail;
5444 }
5445
5446 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5447
5448 return MatchOperand_Success;
5449}
5450
5451OperandMatchResultTy
5452ARMAsmParser::parseModImm(OperandVector &Operands) {
5453 MCAsmParser &Parser = getParser();
5454 MCAsmLexer &Lexer = getLexer();
5455 int64_t Imm1, Imm2;
5456
5457 SMLoc S = Parser.getTok().getLoc();
5458
5459 // 1) A mod_imm operand can appear in the place of a register name:
5460 // add r0, #mod_imm
5461 // add r0, r0, #mod_imm
5462 // to correctly handle the latter, we bail out as soon as we see an
5463 // identifier.
5464 //
5465 // 2) Similarly, we do not want to parse into complex operands:
5466 // mov r0, #mod_imm
5467 // mov r0, :lower16:(_foo)
5468 if (Parser.getTok().is(AsmToken::Identifier) ||
5469 Parser.getTok().is(AsmToken::Colon))
5470 return MatchOperand_NoMatch;
5471
5472 // Hash (dollar) is optional as per the ARMARM
5473 if (Parser.getTok().is(AsmToken::Hash) ||
5474 Parser.getTok().is(AsmToken::Dollar)) {
5475 // Avoid parsing into complex operands (#:)
5476 if (Lexer.peekTok().is(AsmToken::Colon))
5477 return MatchOperand_NoMatch;
5478
5479 // Eat the hash (dollar)
5480 Parser.Lex();
5481 }
5482
5483 SMLoc Sx1, Ex1;
5484 Sx1 = Parser.getTok().getLoc();
5485 const MCExpr *Imm1Exp;
5486 if (getParser().parseExpression(Imm1Exp, Ex1)) {
5487 Error(Sx1, "malformed expression");
5488 return MatchOperand_ParseFail;
5489 }
5490
5491 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5492
5493 if (CE) {
5494 // Immediate must fit within 32-bits
5495 Imm1 = CE->getValue();
5496 int Enc = ARM_AM::getSOImmVal(Imm1);
5497 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5498 // We have a match!
5499 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5500 (Enc & 0xF00) >> 7,
5501 Sx1, Ex1));
5502 return MatchOperand_Success;
5503 }
5504
5505 // We have parsed an immediate which is not for us, fallback to a plain
5506 // immediate. This can happen for instruction aliases. For an example,
5507 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5508 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5509 // instruction with a mod_imm operand. The alias is defined such that the
5510 // parser method is shared, that's why we have to do this here.
5511 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5512 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5513 return MatchOperand_Success;
5514 }
5515 } else {
5516 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5517 // MCFixup). Fallback to a plain immediate.
5518 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5519 return MatchOperand_Success;
5520 }
5521
5522 // From this point onward, we expect the input to be a (#bits, #rot) pair
5523 if (Parser.getTok().isNot(AsmToken::Comma)) {
5524 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
5525 return MatchOperand_ParseFail;
5526 }
5527
5528 if (Imm1 & ~0xFF) {
5529 Error(Sx1, "immediate operand must a number in the range [0, 255]");
5530 return MatchOperand_ParseFail;
5531 }
5532
5533 // Eat the comma
5534 Parser.Lex();
5535
5536 // Repeat for #rot
5537 SMLoc Sx2, Ex2;
5538 Sx2 = Parser.getTok().getLoc();
5539
5540 // Eat the optional hash (dollar)
5541 if (Parser.getTok().is(AsmToken::Hash) ||
5542 Parser.getTok().is(AsmToken::Dollar))
5543 Parser.Lex();
5544
5545 const MCExpr *Imm2Exp;
5546 if (getParser().parseExpression(Imm2Exp, Ex2)) {
5547 Error(Sx2, "malformed expression");
5548 return MatchOperand_ParseFail;
5549 }
5550
5551 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5552
5553 if (CE) {
5554 Imm2 = CE->getValue();
5555 if (!(Imm2 & ~0x1E)) {
5556 // We have a match!
5557 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5558 return MatchOperand_Success;
5559 }
5560 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
5561 return MatchOperand_ParseFail;
5562 } else {
5563 Error(Sx2, "constant expression expected");
5564 return MatchOperand_ParseFail;
5565 }
5566}
5567
5568OperandMatchResultTy
5569ARMAsmParser::parseBitfield(OperandVector &Operands) {
5570 MCAsmParser &Parser = getParser();
5571 SMLoc S = Parser.getTok().getLoc();
5572 // The bitfield descriptor is really two operands, the LSB and the width.
5573 if (Parser.getTok().isNot(AsmToken::Hash) &&
5574 Parser.getTok().isNot(AsmToken::Dollar)) {
5575 Error(Parser.getTok().getLoc(), "'#' expected");
5576 return MatchOperand_ParseFail;
5577 }
5578 Parser.Lex(); // Eat hash token.
5579
5580 const MCExpr *LSBExpr;
5581 SMLoc E = Parser.getTok().getLoc();
5582 if (getParser().parseExpression(LSBExpr)) {
5583 Error(E, "malformed immediate expression");
5584 return MatchOperand_ParseFail;
5585 }
5586 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5587 if (!CE) {
5588 Error(E, "'lsb' operand must be an immediate");
5589 return MatchOperand_ParseFail;
5590 }
5591
5592 int64_t LSB = CE->getValue();
5593 // The LSB must be in the range [0,31]
5594 if (LSB < 0 || LSB > 31) {
5595 Error(E, "'lsb' operand must be in the range [0,31]");
5596 return MatchOperand_ParseFail;
5597 }
5598 E = Parser.getTok().getLoc();
5599
5600 // Expect another immediate operand.
5601 if (Parser.getTok().isNot(AsmToken::Comma)) {
5602 Error(Parser.getTok().getLoc(), "too few operands");
5603 return MatchOperand_ParseFail;
5604 }
5605 Parser.Lex(); // Eat hash token.
5606 if (Parser.getTok().isNot(AsmToken::Hash) &&
5607 Parser.getTok().isNot(AsmToken::Dollar)) {
5608 Error(Parser.getTok().getLoc(), "'#' expected");
5609 return MatchOperand_ParseFail;
5610 }
5611 Parser.Lex(); // Eat hash token.
5612
5613 const MCExpr *WidthExpr;
5614 SMLoc EndLoc;
5615 if (getParser().parseExpression(WidthExpr, EndLoc)) {
5616 Error(E, "malformed immediate expression");
5617 return MatchOperand_ParseFail;
5618 }
5619 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5620 if (!CE) {
5621 Error(E, "'width' operand must be an immediate");
5622 return MatchOperand_ParseFail;
5623 }
5624
5625 int64_t Width = CE->getValue();
5626 // The LSB must be in the range [1,32-lsb]
5627 if (Width < 1 || Width > 32 - LSB) {
5628 Error(E, "'width' operand must be in the range [1,32-lsb]");
5629 return MatchOperand_ParseFail;
5630 }
5631
5632 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5633
5634 return MatchOperand_Success;
5635}
5636
5637OperandMatchResultTy
5638ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5639 // Check for a post-index addressing register operand. Specifically:
5640 // postidx_reg := '+' register {, shift}
5641 // | '-' register {, shift}
5642 // | register {, shift}
5643
5644 // This method must return MatchOperand_NoMatch without consuming any tokens
5645 // in the case where there is no match, as other alternatives take other
5646 // parse methods.
5647 MCAsmParser &Parser = getParser();
5648 AsmToken Tok = Parser.getTok();
5649 SMLoc S = Tok.getLoc();
5650 bool haveEaten = false;
5651 bool isAdd = true;
5652 if (Tok.is(AsmToken::Plus)) {
5653 Parser.Lex(); // Eat the '+' token.
5654 haveEaten = true;
5655 } else if (Tok.is(AsmToken::Minus)) {
5656 Parser.Lex(); // Eat the '-' token.
5657 isAdd = false;
5658 haveEaten = true;
5659 }
5660
5661 SMLoc E = Parser.getTok().getEndLoc();
5662 int Reg = tryParseRegister();
5663 if (Reg == -1) {
5664 if (!haveEaten)
5665 return MatchOperand_NoMatch;
5666 Error(Parser.getTok().getLoc(), "register expected");
5667 return MatchOperand_ParseFail;
5668 }
5669
5670 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5671 unsigned ShiftImm = 0;
5672 if (Parser.getTok().is(AsmToken::Comma)) {
5673 Parser.Lex(); // Eat the ','.
5674 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5675 return MatchOperand_ParseFail;
5676
5677 // FIXME: Only approximates end...may include intervening whitespace.
5678 E = Parser.getTok().getLoc();
5679 }
5680
5681 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5682 ShiftImm, S, E));
5683
5684 return MatchOperand_Success;
5685}
5686
5687OperandMatchResultTy
5688ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5689 // Check for a post-index addressing register operand. Specifically:
5690 // am3offset := '+' register
5691 // | '-' register
5692 // | register
5693 // | # imm
5694 // | # + imm
5695 // | # - imm
5696
5697 // This method must return MatchOperand_NoMatch without consuming any tokens
5698 // in the case where there is no match, as other alternatives take other
5699 // parse methods.
5700 MCAsmParser &Parser = getParser();
5701 AsmToken Tok = Parser.getTok();
5702 SMLoc S = Tok.getLoc();
5703
5704 // Do immediates first, as we always parse those if we have a '#'.
5705 if (Parser.getTok().is(AsmToken::Hash) ||
5706 Parser.getTok().is(AsmToken::Dollar)) {
5707 Parser.Lex(); // Eat '#' or '$'.
5708 // Explicitly look for a '-', as we need to encode negative zero
5709 // differently.
5710 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5711 const MCExpr *Offset;
5712 SMLoc E;
5713 if (getParser().parseExpression(Offset, E))
5714 return MatchOperand_ParseFail;
5715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5716 if (!CE) {
5717 Error(S, "constant expression expected");
5718 return MatchOperand_ParseFail;
5719 }
5720 // Negative zero is encoded as the flag value
5721 // std::numeric_limits<int32_t>::min().
5722 int32_t Val = CE->getValue();
5723 if (isNegative && Val == 0)
5724 Val = std::numeric_limits<int32_t>::min();
5725
5726 Operands.push_back(
5727 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5728
5729 return MatchOperand_Success;
5730 }
5731
5732 bool haveEaten = false;
5733 bool isAdd = true;
5734 if (Tok.is(AsmToken::Plus)) {
5735 Parser.Lex(); // Eat the '+' token.
5736 haveEaten = true;
5737 } else if (Tok.is(AsmToken::Minus)) {
5738 Parser.Lex(); // Eat the '-' token.
5739 isAdd = false;
5740 haveEaten = true;
5741 }
5742
5743 Tok = Parser.getTok();
5744 int Reg = tryParseRegister();
5745 if (Reg == -1) {
5746 if (!haveEaten)
5747 return MatchOperand_NoMatch;
5748 Error(Tok.getLoc(), "register expected");
5749 return MatchOperand_ParseFail;
5750 }
5751
5752 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5753 0, S, Tok.getEndLoc()));
5754
5755 return MatchOperand_Success;
5756}
5757
5758/// Convert parsed operands to MCInst. Needed here because this instruction
5759/// only has two register operands, but multiplication is commutative so
5760/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5761void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5762 const OperandVector &Operands) {
5763 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5764 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5765 // If we have a three-operand form, make sure to set Rn to be the operand
5766 // that isn't the same as Rd.
5767 unsigned RegOp = 4;
5768 if (Operands.size() == 6 &&
5769 ((ARMOperand &)*Operands[4]).getReg() ==
5770 ((ARMOperand &)*Operands[3]).getReg())
5771 RegOp = 5;
5772 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5773 Inst.addOperand(Inst.getOperand(0));
5774 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5775}
5776
5777void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5778 const OperandVector &Operands) {
5779 int CondOp = -1, ImmOp = -1;
5780 switch(Inst.getOpcode()) {
5781 case ARM::tB:
5782 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
5783
5784 case ARM::t2B:
5785 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5786
5787 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches")::llvm::llvm_unreachable_internal("Unexpected instruction in cvtThumbBranches"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 5787)
;
5788 }
5789 // first decide whether or not the branch should be conditional
5790 // by looking at it's location relative to an IT block
5791 if(inITBlock()) {
5792 // inside an IT block we cannot have any conditional branches. any
5793 // such instructions needs to be converted to unconditional form
5794 switch(Inst.getOpcode()) {
5795 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5796 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5797 }
5798 } else {
5799 // outside IT blocks we can only have unconditional branches with AL
5800 // condition code or conditional branches with non-AL condition code
5801 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5802 switch(Inst.getOpcode()) {
5803 case ARM::tB:
5804 case ARM::tBcc:
5805 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5806 break;
5807 case ARM::t2B:
5808 case ARM::t2Bcc:
5809 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5810 break;
5811 }
5812 }
5813
5814 // now decide on encoding size based on branch target range
5815 switch(Inst.getOpcode()) {
5816 // classify tB as either t2B or t1B based on range of immediate operand
5817 case ARM::tB: {
5818 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5819 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5820 Inst.setOpcode(ARM::t2B);
5821 break;
5822 }
5823 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5824 case ARM::tBcc: {
5825 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5826 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5827 Inst.setOpcode(ARM::t2Bcc);
5828 break;
5829 }
5830 }
5831 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5832 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5833}
5834
5835void ARMAsmParser::cvtMVEVMOVQtoDReg(
5836 MCInst &Inst, const OperandVector &Operands) {
5837
5838 // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5839 assert(Operands.size() == 8)(static_cast <bool> (Operands.size() == 8) ? void (0) :
__assert_fail ("Operands.size() == 8", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 5839, __extension__ __PRETTY_FUNCTION__))
;
5840
5841 ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5842 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5843 ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5844 ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5845 // skip second copy of Qd in Operands[6]
5846 ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5847 ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5848}
5849
5850/// Parse an ARM memory expression, return false if successful else return true
5851/// or an error. The first token must be a '[' when called.
5852bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5853 MCAsmParser &Parser = getParser();
5854 SMLoc S, E;
5855 if (Parser.getTok().isNot(AsmToken::LBrac))
5856 return TokError("Token is not a Left Bracket");
5857 S = Parser.getTok().getLoc();
5858 Parser.Lex(); // Eat left bracket token.
5859
5860 const AsmToken &BaseRegTok = Parser.getTok();
5861 int BaseRegNum = tryParseRegister();
5862 if (BaseRegNum == -1)
5863 return Error(BaseRegTok.getLoc(), "register expected");
5864
5865 // The next token must either be a comma, a colon or a closing bracket.
5866 const AsmToken &Tok = Parser.getTok();
5867 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5868 !Tok.is(AsmToken::RBrac))
5869 return Error(Tok.getLoc(), "malformed memory operand");
5870
5871 if (Tok.is(AsmToken::RBrac)) {
5872 E = Tok.getEndLoc();
5873 Parser.Lex(); // Eat right bracket token.
5874
5875 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5876 ARM_AM::no_shift, 0, 0, false,
5877 S, E));
5878
5879 // If there's a pre-indexing writeback marker, '!', just add it as a token
5880 // operand. It's rather odd, but syntactically valid.
5881 if (Parser.getTok().is(AsmToken::Exclaim)) {
5882 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5883 Parser.Lex(); // Eat the '!'.
5884 }
5885
5886 return false;
5887 }
5888
5889 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&(static_cast <bool> ((Tok.is(AsmToken::Colon) || Tok.is
(AsmToken::Comma)) && "Lost colon or comma in memory operand?!"
) ? void (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 5890, __extension__
__PRETTY_FUNCTION__))
5890 "Lost colon or comma in memory operand?!")(static_cast <bool> ((Tok.is(AsmToken::Colon) || Tok.is
(AsmToken::Comma)) && "Lost colon or comma in memory operand?!"
) ? void (0) : __assert_fail ("(Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && \"Lost colon or comma in memory operand?!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 5890, __extension__
__PRETTY_FUNCTION__))
;
5891 if (Tok.is(AsmToken::Comma)) {
5892 Parser.Lex(); // Eat the comma.
5893 }
5894
5895 // If we have a ':', it's an alignment specifier.
5896 if (Parser.getTok().is(AsmToken::Colon)) {
5897 Parser.Lex(); // Eat the ':'.
5898 E = Parser.getTok().getLoc();
5899 SMLoc AlignmentLoc = Tok.getLoc();
5900
5901 const MCExpr *Expr;
5902 if (getParser().parseExpression(Expr))
5903 return true;
5904
5905 // The expression has to be a constant. Memory references with relocations
5906 // don't come through here, as they use the <label> forms of the relevant
5907 // instructions.
5908 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5909 if (!CE)
5910 return Error (E, "constant expression expected");
5911
5912 unsigned Align = 0;
5913 switch (CE->getValue()) {
5914 default:
5915 return Error(E,
5916 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5917 case 16: Align = 2; break;
5918 case 32: Align = 4; break;
5919 case 64: Align = 8; break;
5920 case 128: Align = 16; break;
5921 case 256: Align = 32; break;
5922 }
5923
5924 // Now we should have the closing ']'
5925 if (Parser.getTok().isNot(AsmToken::RBrac))
5926 return Error(Parser.getTok().getLoc(), "']' expected");
5927 E = Parser.getTok().getEndLoc();
5928 Parser.Lex(); // Eat right bracket token.
5929
5930 // Don't worry about range checking the value here. That's handled by
5931 // the is*() predicates.
5932 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5933 ARM_AM::no_shift, 0, Align,
5934 false, S, E, AlignmentLoc));
5935
5936 // If there's a pre-indexing writeback marker, '!', just add it as a token
5937 // operand.
5938 if (Parser.getTok().is(AsmToken::Exclaim)) {
5939 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5940 Parser.Lex(); // Eat the '!'.
5941 }
5942
5943 return false;
5944 }
5945
5946 // If we have a '#' or '$', it's an immediate offset, else assume it's a
5947 // register offset. Be friendly and also accept a plain integer or expression
5948 // (without a leading hash) for gas compatibility.
5949 if (Parser.getTok().is(AsmToken::Hash) ||
5950 Parser.getTok().is(AsmToken::Dollar) ||
5951 Parser.getTok().is(AsmToken::LParen) ||
5952 Parser.getTok().is(AsmToken::Integer)) {
5953 if (Parser.getTok().is(AsmToken::Hash) ||
5954 Parser.getTok().is(AsmToken::Dollar))
5955 Parser.Lex(); // Eat '#' or '$'
5956 E = Parser.getTok().getLoc();
5957
5958 bool isNegative = getParser().getTok().is(AsmToken::Minus);
5959 const MCExpr *Offset, *AdjustedOffset;
5960 if (getParser().parseExpression(Offset))
5961 return true;
5962
5963 if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
5964 // If the constant was #-0, represent it as
5965 // std::numeric_limits<int32_t>::min().
5966 int32_t Val = CE->getValue();
5967 if (isNegative && Val == 0)
5968 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5969 getContext());
5970 // Don't worry about range checking the value here. That's handled by
5971 // the is*() predicates.
5972 AdjustedOffset = CE;
5973 } else
5974 AdjustedOffset = Offset;
5975 Operands.push_back(ARMOperand::CreateMem(
5976 BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
5977
5978 // Now we should have the closing ']'
5979 if (Parser.getTok().isNot(AsmToken::RBrac))
5980 return Error(Parser.getTok().getLoc(), "']' expected");
5981 E = Parser.getTok().getEndLoc();
5982 Parser.Lex(); // Eat right bracket token.
5983
5984 // If there's a pre-indexing writeback marker, '!', just add it as a token
5985 // operand.
5986 if (Parser.getTok().is(AsmToken::Exclaim)) {
5987 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5988 Parser.Lex(); // Eat the '!'.
5989 }
5990
5991 return false;
5992 }
5993
5994 // The register offset is optionally preceded by a '+' or '-'
5995 bool isNegative = false;
5996 if (Parser.getTok().is(AsmToken::Minus)) {
5997 isNegative = true;
5998 Parser.Lex(); // Eat the '-'.
5999 } else if (Parser.getTok().is(AsmToken::Plus)) {
6000 // Nothing to do.
6001 Parser.Lex(); // Eat the '+'.
6002 }
6003
6004 E = Parser.getTok().getLoc();
6005 int OffsetRegNum = tryParseRegister();
6006 if (OffsetRegNum == -1)
6007 return Error(E, "register expected");
6008
6009 // If there's a shift operator, handle it.
6010 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
6011 unsigned ShiftImm = 0;
6012 if (Parser.getTok().is(AsmToken::Comma)) {
6013 Parser.Lex(); // Eat the ','.
6014 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6015 return true;
6016 }
6017
6018 // Now we should have the closing ']'
6019 if (Parser.getTok().isNot(AsmToken::RBrac))
6020 return Error(Parser.getTok().getLoc(), "']' expected");
6021 E = Parser.getTok().getEndLoc();
6022 Parser.Lex(); // Eat right bracket token.
6023
6024 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
6025 ShiftType, ShiftImm, 0, isNegative,
6026 S, E));
6027
6028 // If there's a pre-indexing writeback marker, '!', just add it as a token
6029 // operand.
6030 if (Parser.getTok().is(AsmToken::Exclaim)) {
6031 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
6032 Parser.Lex(); // Eat the '!'.
6033 }
6034
6035 return false;
6036}
6037
6038/// parseMemRegOffsetShift - one of these two:
6039/// ( lsl | lsr | asr | ror ) , # shift_amount
6040/// rrx
6041/// return true if it parses a shift otherwise it returns false.
6042bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
6043 unsigned &Amount) {
6044 MCAsmParser &Parser = getParser();
6045 SMLoc Loc = Parser.getTok().getLoc();
6046 const AsmToken &Tok = Parser.getTok();
6047 if (Tok.isNot(AsmToken::Identifier))
6048 return Error(Loc, "illegal shift operator");
6049 StringRef ShiftName = Tok.getString();
6050 if (ShiftName == "lsl" || ShiftName == "LSL" ||
6051 ShiftName == "asl" || ShiftName == "ASL")
6052 St = ARM_AM::lsl;
6053 else if (ShiftName == "lsr" || ShiftName == "LSR")
6054 St = ARM_AM::lsr;
6055 else if (ShiftName == "asr" || ShiftName == "ASR")
6056 St = ARM_AM::asr;
6057 else if (ShiftName == "ror" || ShiftName == "ROR")
6058 St = ARM_AM::ror;
6059 else if (ShiftName == "rrx" || ShiftName == "RRX")
6060 St = ARM_AM::rrx;
6061 else if (ShiftName == "uxtw" || ShiftName == "UXTW")
6062 St = ARM_AM::uxtw;
6063 else
6064 return Error(Loc, "illegal shift operator");
6065 Parser.Lex(); // Eat shift type token.
6066
6067 // rrx stands alone.
6068 Amount = 0;
6069 if (St != ARM_AM::rrx) {
6070 Loc = Parser.getTok().getLoc();
6071 // A '#' and a shift amount.
6072 const AsmToken &HashTok = Parser.getTok();
6073 if (HashTok.isNot(AsmToken::Hash) &&
6074 HashTok.isNot(AsmToken::Dollar))
6075 return Error(HashTok.getLoc(), "'#' expected");
6076 Parser.Lex(); // Eat hash token.
6077
6078 const MCExpr *Expr;
6079 if (getParser().parseExpression(Expr))
6080 return true;
6081 // Range check the immediate.
6082 // lsl, ror: 0 <= imm <= 31
6083 // lsr, asr: 0 <= imm <= 32
6084 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
6085 if (!CE)
6086 return Error(Loc, "shift amount must be an immediate");
6087 int64_t Imm = CE->getValue();
6088 if (Imm < 0 ||
6089 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
6090 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
6091 return Error(Loc, "immediate shift value out of range");
6092 // If <ShiftTy> #0, turn it into a no_shift.
6093 if (Imm == 0)
6094 St = ARM_AM::lsl;
6095 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
6096 if (Imm == 32)
6097 Imm = 0;
6098 Amount = Imm;
6099 }
6100
6101 return false;
6102}
6103
6104/// parseFPImm - A floating point immediate expression operand.
6105OperandMatchResultTy
6106ARMAsmParser::parseFPImm(OperandVector &Operands) {
6107 MCAsmParser &Parser = getParser();
6108 // Anything that can accept a floating point constant as an operand
6109 // needs to go through here, as the regular parseExpression is
6110 // integer only.
6111 //
6112 // This routine still creates a generic Immediate operand, containing
6113 // a bitcast of the 64-bit floating point value. The various operands
6114 // that accept floats can check whether the value is valid for them
6115 // via the standard is*() predicates.
6116
6117 SMLoc S = Parser.getTok().getLoc();
6118
6119 if (Parser.getTok().isNot(AsmToken::Hash) &&
6120 Parser.getTok().isNot(AsmToken::Dollar))
6121 return MatchOperand_NoMatch;
6122
6123 // Disambiguate the VMOV forms that can accept an FP immediate.
6124 // vmov.f32 <sreg>, #imm
6125 // vmov.f64 <dreg>, #imm
6126 // vmov.f32 <dreg>, #imm @ vector f32x2
6127 // vmov.f32 <qreg>, #imm @ vector f32x4
6128 //
6129 // There are also the NEON VMOV instructions which expect an
6130 // integer constant. Make sure we don't try to parse an FPImm
6131 // for these:
6132 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
6133 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
6134 bool isVmovf = TyOp.isToken() &&
6135 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
6136 TyOp.getToken() == ".f16");
6137 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
6138 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
6139 Mnemonic.getToken() == "fconsts");
6140 if (!(isVmovf || isFconst))
6141 return MatchOperand_NoMatch;
6142
6143 Parser.Lex(); // Eat '#' or '$'.
6144
6145 // Handle negation, as that still comes through as a separate token.
6146 bool isNegative = false;
6147 if (Parser.getTok().is(AsmToken::Minus)) {
6148 isNegative = true;
6149 Parser.Lex();
6150 }
6151 const AsmToken &Tok = Parser.getTok();
6152 SMLoc Loc = Tok.getLoc();
6153 if (Tok.is(AsmToken::Real) && isVmovf) {
6154 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
6155 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6156 // If we had a '-' in front, toggle the sign bit.
6157 IntVal ^= (uint64_t)isNegative << 31;
6158 Parser.Lex(); // Eat the token.
6159 Operands.push_back(ARMOperand::CreateImm(
6160 MCConstantExpr::create(IntVal, getContext()),
6161 S, Parser.getTok().getLoc()));
6162 return MatchOperand_Success;
6163 }
6164 // Also handle plain integers. Instructions which allow floating point
6165 // immediates also allow a raw encoded 8-bit value.
6166 if (Tok.is(AsmToken::Integer) && isFconst) {
6167 int64_t Val = Tok.getIntVal();
6168 Parser.Lex(); // Eat the token.
6169 if (Val > 255 || Val < 0) {
6170 Error(Loc, "encoded floating point value out of range");
6171 return MatchOperand_ParseFail;
6172 }
6173 float RealVal = ARM_AM::getFPImmFloat(Val);
6174 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6175
6176 Operands.push_back(ARMOperand::CreateImm(
6177 MCConstantExpr::create(Val, getContext()), S,
6178 Parser.getTok().getLoc()));
6179 return MatchOperand_Success;
6180 }
6181
6182 Error(Loc, "invalid floating point immediate");
6183 return MatchOperand_ParseFail;
6184}
6185
6186/// Parse a arm instruction operand. For now this parses the operand regardless
6187/// of the mnemonic.
6188bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6189 MCAsmParser &Parser = getParser();
6190 SMLoc S, E;
6191
6192 // Check if the current operand has a custom associated parser, if so, try to
6193 // custom parse the operand, or fallback to the general approach.
6194 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6195 if (ResTy == MatchOperand_Success)
6196 return false;
6197 // If there wasn't a custom match, try the generic matcher below. Otherwise,
6198 // there was a match, but an error occurred, in which case, just return that
6199 // the operand parsing failed.
6200 if (ResTy == MatchOperand_ParseFail)
6201 return true;
6202
6203 switch (getLexer().getKind()) {
6204 default:
6205 Error(Parser.getTok().getLoc(), "unexpected token in operand");
6206 return true;
6207 case AsmToken::Identifier: {
6208 // If we've seen a branch mnemonic, the next operand must be a label. This
6209 // is true even if the label is a register name. So "br r1" means branch to
6210 // label "r1".
6211 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6212 if (!ExpectLabel) {
6213 if (!tryParseRegisterWithWriteBack(Operands))
6214 return false;
6215 int Res = tryParseShiftRegister(Operands);
6216 if (Res == 0) // success
6217 return false;
6218 else if (Res == -1) // irrecoverable error
6219 return true;
6220 // If this is VMRS, check for the apsr_nzcv operand.
6221 if (Mnemonic == "vmrs" &&
6222 Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6223 S = Parser.getTok().getLoc();
6224 Parser.Lex();
6225 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6226 return false;
6227 }
6228 }
6229
6230 // Fall though for the Identifier case that is not a register or a
6231 // special name.
6232 [[fallthrough]];
6233 }
6234 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
6235 case AsmToken::Integer: // things like 1f and 2b as a branch targets
6236 case AsmToken::String: // quoted label names.
6237 case AsmToken::Dot: { // . as a branch target
6238 // This was not a register so parse other operands that start with an
6239 // identifier (like labels) as expressions and create them as immediates.
6240 const MCExpr *IdVal;
6241 S = Parser.getTok().getLoc();
6242 if (getParser().parseExpression(IdVal))
6243 return true;
6244 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6245 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6246 return false;
6247 }
6248 case AsmToken::LBrac:
6249 return parseMemory(Operands);
6250 case AsmToken::LCurly:
6251 return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
6252 case AsmToken::Dollar:
6253 case AsmToken::Hash: {
6254 // #42 -> immediate
6255 // $ 42 -> immediate
6256 // $foo -> symbol name
6257 // $42 -> symbol name
6258 S = Parser.getTok().getLoc();
6259
6260 // Favor the interpretation of $-prefixed operands as symbol names.
6261 // Cases where immediates are explicitly expected are handled by their
6262 // specific ParseMethod implementations.
6263 auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6264 bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6265 (AdjacentToken.is(AsmToken::Identifier) ||
6266 AdjacentToken.is(AsmToken::Integer));
6267 if (!ExpectIdentifier) {
6268 // Token is not part of identifier. Drop leading $ or # before parsing
6269 // expression.
6270 Parser.Lex();
6271 }
6272
6273 if (Parser.getTok().isNot(AsmToken::Colon)) {
6274 bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6275 const MCExpr *ImmVal;
6276 if (getParser().parseExpression(ImmVal))
6277 return true;
6278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6279 if (CE) {
6280 int32_t Val = CE->getValue();
6281 if (IsNegative && Val == 0)
6282 ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6283 getContext());
6284 }
6285 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6286 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6287
6288 // There can be a trailing '!' on operands that we want as a separate
6289 // '!' Token operand. Handle that here. For example, the compatibility
6290 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6291 if (Parser.getTok().is(AsmToken::Exclaim)) {
6292 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6293 Parser.getTok().getLoc()));
6294 Parser.Lex(); // Eat exclaim token
6295 }
6296 return false;
6297 }
6298 // w/ a ':' after the '#', it's just like a plain ':'.
6299 [[fallthrough]];
6300 }
6301 case AsmToken::Colon: {
6302 S = Parser.getTok().getLoc();
6303 // ":lower16:" and ":upper16:" expression prefixes
6304 // FIXME: Check it's an expression prefix,
6305 // e.g. (FOO - :lower16:BAR) isn't legal.
6306 ARMMCExpr::VariantKind RefKind;
6307 if (parsePrefix(RefKind))
6308 return true;
6309
6310 const MCExpr *SubExprVal;
6311 if (getParser().parseExpression(SubExprVal))
6312 return true;
6313
6314 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6315 getContext());
6316 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6317 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6318 return false;
6319 }
6320 case AsmToken::Equal: {
6321 S = Parser.getTok().getLoc();
6322 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6323 return Error(S, "unexpected token in operand");
6324 Parser.Lex(); // Eat '='
6325 const MCExpr *SubExprVal;
6326 if (getParser().parseExpression(SubExprVal))
6327 return true;
6328 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6329
6330 // execute-only: we assume that assembly programmers know what they are
6331 // doing and allow literal pool creation here
6332 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6333 return false;
6334 }
6335 }
6336}
6337
6338bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6339 const MCExpr *Expr = nullptr;
6340 SMLoc L = getParser().getTok().getLoc();
6341 if (check(getParser().parseExpression(Expr), L, "expected expression"))
6342 return true;
6343 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6344 if (check(!Value, L, "expected constant expression"))
6345 return true;
6346 Out = Value->getValue();
6347 return false;
6348}
6349
6350// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
6351// :lower16: and :upper16:.
6352bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6353 MCAsmParser &Parser = getParser();
6354 RefKind = ARMMCExpr::VK_ARM_None;
6355
6356 // consume an optional '#' (GNU compatibility)
6357 if (getLexer().is(AsmToken::Hash))
6358 Parser.Lex();
6359
6360 // :lower16: and :upper16: modifiers
6361 assert(getLexer().is(AsmToken::Colon) && "expected a :")(static_cast <bool> (getLexer().is(AsmToken::Colon) &&
"expected a :") ? void (0) : __assert_fail ("getLexer().is(AsmToken::Colon) && \"expected a :\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 6361, __extension__
__PRETTY_FUNCTION__))
;
6362 Parser.Lex(); // Eat ':'
6363
6364 if (getLexer().isNot(AsmToken::Identifier)) {
6365 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6366 return true;
6367 }
6368
6369 enum {
6370 COFF = (1 << MCContext::IsCOFF),
6371 ELF = (1 << MCContext::IsELF),
6372 MACHO = (1 << MCContext::IsMachO),
6373 WASM = (1 << MCContext::IsWasm),
6374 };
6375 static const struct PrefixEntry {
6376 const char *Spelling;
6377 ARMMCExpr::VariantKind VariantKind;
6378 uint8_t SupportedFormats;
6379 } PrefixEntries[] = {
6380 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
6381 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
6382 };
6383
6384 StringRef IDVal = Parser.getTok().getIdentifier();
6385
6386 const auto &Prefix =
6387 llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6388 return PE.Spelling == IDVal;
6389 });
6390 if (Prefix == std::end(PrefixEntries)) {
6391 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6392 return true;
6393 }
6394
6395 uint8_t CurrentFormat;
6396 switch (getContext().getObjectFileType()) {
6397 case MCContext::IsMachO:
6398 CurrentFormat = MACHO;
6399 break;
6400 case MCContext::IsELF:
6401 CurrentFormat = ELF;
6402 break;
6403 case MCContext::IsCOFF:
6404 CurrentFormat = COFF;
6405 break;
6406 case MCContext::IsWasm:
6407 CurrentFormat = WASM;
6408 break;
6409 case MCContext::IsGOFF:
6410 case MCContext::IsSPIRV:
6411 case MCContext::IsXCOFF:
6412 case MCContext::IsDXContainer:
6413 llvm_unreachable("unexpected object format")::llvm::llvm_unreachable_internal("unexpected object format",
"llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 6413)
;
6414 break;
6415 }
6416
6417 if (~Prefix->SupportedFormats & CurrentFormat) {
6418 Error(Parser.getTok().getLoc(),
6419 "cannot represent relocation in the current file format");
6420 return true;
6421 }
6422
6423 RefKind = Prefix->VariantKind;
6424 Parser.Lex();
6425
6426 if (getLexer().isNot(AsmToken::Colon)) {
6427 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6428 return true;
6429 }
6430 Parser.Lex(); // Eat the last ':'
6431
6432 return false;
6433}
6434
6435/// Given a mnemonic, split out possible predication code and carry
6436/// setting letters to form a canonical mnemonic and flags.
6437//
6438// FIXME: Would be nice to autogen this.
6439// FIXME: This is a bit of a maze of special cases.
6440StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
6441 StringRef ExtraToken,
6442 unsigned &PredicationCode,
6443 unsigned &VPTPredicationCode,
6444 bool &CarrySetting,
6445 unsigned &ProcessorIMod,
6446 StringRef &ITMask) {
6447 PredicationCode = ARMCC::AL;
6448 VPTPredicationCode = ARMVCC::None;
6449 CarrySetting = false;
6450 ProcessorIMod = 0;
6451
6452 // Ignore some mnemonics we know aren't predicated forms.
6453 //
6454 // FIXME: Would be nice to autogen this.
6455 if ((Mnemonic == "movs" && isThumb()) ||
6456 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" ||
6457 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" ||
6458 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" ||
6459 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
6460 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" ||
6461 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
6462 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
6463 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
6464 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
6465 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" ||
6466 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
6467 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6468 Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
6469 Mnemonic == "bxns" || Mnemonic == "blxns" ||
6470 Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6471 Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6472 Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6473 Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6474 Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" ||
6475 Mnemonic == "csel" || Mnemonic == "csinc" ||
6476 Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
6477 Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" ||
6478 Mnemonic == "csetm" ||
6479 Mnemonic == "aut" || Mnemonic == "pac" || Mnemonic == "pacbti" ||
6480 Mnemonic == "bti")
6481 return Mnemonic;
6482
6483 // First, split out any predication code. Ignore mnemonics we know aren't
6484 // predicated but do have a carry-set and so weren't caught above.
6485 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6486 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6487 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6488 Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6489 !(hasMVE() &&
6490 (Mnemonic == "vmine" ||
6491 Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" ||
6492 Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6493 Mnemonic == "vmvne" || Mnemonic == "vorne" ||
6494 Mnemonic == "vnege" || Mnemonic == "vnegt" ||
6495 Mnemonic == "vmule" || Mnemonic == "vmult" ||
6496 Mnemonic == "vrintne" ||
6497 Mnemonic == "vcmult" || Mnemonic == "vcmule" ||
6498 Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6499 Mnemonic.startswith("vq")))) {
6500 unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6501 if (CC != ~0U) {
6502 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6503 PredicationCode = CC;
6504 }
6505 }
6506
6507 // Next, determine if we have a carry setting bit. We explicitly ignore all
6508 // the instructions we know end in 's'.
6509 if (Mnemonic.endswith("s") &&
6510 !(Mnemonic == "cps" || Mnemonic == "mls" ||
6511 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
6512 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
6513 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
6514 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
6515 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
6516 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
6517 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
6518 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
6519 Mnemonic == "bxns" || Mnemonic == "blxns" || Mnemonic == "vfmas" ||
6520 Mnemonic == "vmlas" ||
6521 (Mnemonic == "movs" && isThumb()))) {
6522 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6523 CarrySetting = true;
6524 }
6525
6526 // The "cps" instruction can have a interrupt mode operand which is glued into
6527 // the mnemonic. Check if this is the case, split it and parse the imod op
6528 if (Mnemonic.startswith("cps")) {
6529 // Split out any imod code.
6530 unsigned IMod =
6531 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6532 .Case("ie", ARM_PROC::IE)
6533 .Case("id", ARM_PROC::ID)
6534 .Default(~0U);
6535 if (IMod != ~0U) {
6536 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6537 ProcessorIMod = IMod;
6538 }
6539 }
6540
6541 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6542 Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6543 Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6544 Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6545 Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6546 Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6547 Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6548 unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
6549 if (CC != ~0U) {
6550 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6551 VPTPredicationCode = CC;
6552 }
6553 return Mnemonic;
6554 }
6555
6556 // The "it" instruction has the condition mask on the end of the mnemonic.
6557 if (Mnemonic.startswith("it")) {
6558 ITMask = Mnemonic.slice(2, Mnemonic.size());
6559 Mnemonic = Mnemonic.slice(0, 2);
6560 }
6561
6562 if (Mnemonic.startswith("vpst")) {
6563 ITMask = Mnemonic.slice(4, Mnemonic.size());
6564 Mnemonic = Mnemonic.slice(0, 4);
6565 }
6566 else if (Mnemonic.startswith("vpt")) {
6567 ITMask = Mnemonic.slice(3, Mnemonic.size());
6568 Mnemonic = Mnemonic.slice(0, 3);
6569 }
6570
6571 return Mnemonic;
6572}
6573
6574/// Given a canonical mnemonic, determine if the instruction ever allows
6575/// inclusion of carry set or predication code operands.
6576//
6577// FIXME: It would be nice to autogen this.
6578void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6579 StringRef ExtraToken,
6580 StringRef FullInst,
6581 bool &CanAcceptCarrySet,
6582 bool &CanAcceptPredicationCode,
6583 bool &CanAcceptVPTPredicationCode) {
6584 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6585
6586 CanAcceptCarrySet =
6587 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6588 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6589 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6590 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6591 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6592 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6593 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6594 (!isThumb() &&
6595 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6596 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6597
6598 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6599 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6600 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6601 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
6602 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
6603 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6604 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6605 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6606 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
6607 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
6608 (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
6609 Mnemonic == "vmovx" || Mnemonic == "vins" ||
6610 Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6611 Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6612 Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6613 Mnemonic == "vfmat" || Mnemonic == "vfmab" ||
6614 Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6615 Mnemonic == "sb" || Mnemonic == "ssbb" ||
6616 Mnemonic == "pssbb" || Mnemonic == "vsmmla" ||
6617 Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6618 Mnemonic == "vusdot" || Mnemonic == "vsudot" ||
6619 Mnemonic == "bfcsel" || Mnemonic == "wls" ||
6620 Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
6621 Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6622 Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6623 Mnemonic == "cset" || Mnemonic == "csetm" ||
6624 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6625 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6626 Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst") ||
6627 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6628 Mnemonic == "bti" ||
6629 (hasMVE() &&
6630 (Mnemonic.startswith("vst2") || Mnemonic.startswith("vld2") ||
6631 Mnemonic.startswith("vst4") || Mnemonic.startswith("vld4") ||
6632 Mnemonic.startswith("wlstp") || Mnemonic.startswith("dlstp") ||
6633 Mnemonic.startswith("letp")))) {
6634 // These mnemonics are never predicable
6635 CanAcceptPredicationCode = false;
6636 } else if (!isThumb()) {
6637 // Some instructions are only predicable in Thumb mode
6638 CanAcceptPredicationCode =
6639 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6640 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6641 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6642 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6643 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6644 Mnemonic != "stc2" && Mnemonic != "stc2l" &&
6645 Mnemonic != "tsb" &&
6646 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
6647 } else if (isThumbOne()) {
6648 if (hasV6MOps())
6649 CanAcceptPredicationCode = Mnemonic != "movs";
6650 else
6651 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6652 } else
6653 CanAcceptPredicationCode = true;
6654}
6655
6656// Some Thumb instructions have two operand forms that are not
6657// available as three operand, convert to two operand form if possible.
6658//
6659// FIXME: We would really like to be able to tablegen'erate this.
6660void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6661 bool CarrySetting,
6662 OperandVector &Operands) {
6663 if (Operands.size() != 6)
6664 return;
6665
6666 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6667 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6668 if (!Op3.isReg() || !Op4.isReg())
6669 return;
6670
6671 auto Op3Reg = Op3.getReg();
6672 auto Op4Reg = Op4.getReg();
6673
6674 // For most Thumb2 cases we just generate the 3 operand form and reduce
6675 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6676 // won't accept SP or PC so we do the transformation here taking care
6677 // with immediate range in the 'add sp, sp #imm' case.
6678 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6679 if (isThumbTwo()) {
6680 if (Mnemonic != "add")
6681 return;
6682 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6683 (Op5.isReg() && Op5.getReg() == ARM::PC);
6684 if (!TryTransform) {
6685 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6686 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6687 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6688 Op5.isImm() && !Op5.isImm0_508s4());
6689 }
6690 if (!TryTransform)
6691 return;
6692 } else if (!isThumbOne())
6693 return;
6694
6695 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6696 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6697 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6698 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6699 return;
6700
6701 // If first 2 operands of a 3 operand instruction are the same
6702 // then transform to 2 operand version of the same instruction
6703 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6704 bool Transform = Op3Reg == Op4Reg;
6705
6706 // For communtative operations, we might be able to transform if we swap
6707 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
6708 // as tADDrsp.
6709 const ARMOperand *LastOp = &Op5;
6710 bool Swap = false;
6711 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6712 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6713 Mnemonic == "and" || Mnemonic == "eor" ||
6714 Mnemonic == "adc" || Mnemonic == "orr")) {
6715 Swap = true;
6716 LastOp = &Op4;
6717 Transform = true;
6718 }
6719
6720 // If both registers are the same then remove one of them from
6721 // the operand list, with certain exceptions.
6722 if (Transform) {
6723 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6724 // 2 operand forms don't exist.
6725 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6726 LastOp->isReg())
6727 Transform = false;
6728
6729 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6730 // 3-bits because the ARMARM says not to.
6731 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6732 Transform = false;
6733 }
6734
6735 if (Transform) {
6736 if (Swap)
6737 std::swap(Op4, Op5);
6738 Operands.erase(Operands.begin() + 3);
6739 }
6740}
6741
6742bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6743 OperandVector &Operands) {
6744 // FIXME: This is all horribly hacky. We really need a better way to deal
6745 // with optional operands like this in the matcher table.
6746
6747 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6748 // another does not. Specifically, the MOVW instruction does not. So we
6749 // special case it here and remove the defaulted (non-setting) cc_out
6750 // operand if that's the instruction we're trying to match.
6751 //
6752 // We do this as post-processing of the explicit operands rather than just
6753 // conditionally adding the cc_out in the first place because we need
6754 // to check the type of the parsed immediate operand.
6755 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6756 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6757 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6758 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6759 return true;
6760
6761 // Register-register 'add' for thumb does not have a cc_out operand
6762 // when there are only two register operands.
6763 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6764 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6765 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6766 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6767 return true;
6768 // Register-register 'add' for thumb does not have a cc_out operand
6769 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6770 // have to check the immediate range here since Thumb2 has a variant
6771 // that can handle a different range and has a cc_out operand.
6772 if (((isThumb() && Mnemonic == "add") ||
6773 (isThumbTwo() && Mnemonic == "sub")) &&
6774 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6775 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6776 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6777 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6778 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6779 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6780 return true;
6781 // For Thumb2, add/sub immediate does not have a cc_out operand for the
6782 // imm0_4095 variant. That's the least-preferred variant when
6783 // selecting via the generic "add" mnemonic, so to know that we
6784 // should remove the cc_out operand, we have to explicitly check that
6785 // it's not one of the other variants. Ugh.
6786 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6787 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6788 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6789 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6790 // Nest conditions rather than one big 'if' statement for readability.
6791 //
6792 // If both registers are low, we're in an IT block, and the immediate is
6793 // in range, we should use encoding T1 instead, which has a cc_out.
6794 if (inITBlock() &&
6795 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6796 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6797 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6798 return false;
6799 // Check against T3. If the second register is the PC, this is an
6800 // alternate form of ADR, which uses encoding T4, so check for that too.
6801 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6802 (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
6803 static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
6804 return false;
6805
6806 // Otherwise, we use encoding T4, which does not have a cc_out
6807 // operand.
6808 return true;
6809 }
6810
6811 // The thumb2 multiply instruction doesn't have a CCOut register, so
6812 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6813 // use the 16-bit encoding or not.
6814 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6815 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6816 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6817 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6818 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6819 // If the registers aren't low regs, the destination reg isn't the
6820 // same as one of the source regs, or the cc_out operand is zero
6821 // outside of an IT block, we have to use the 32-bit encoding, so
6822 // remove the cc_out operand.
6823 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6824 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6825 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6826 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6827 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6828 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6829 static_cast<ARMOperand &>(*Operands[4]).getReg())))
6830 return true;
6831
6832 // Also check the 'mul' syntax variant that doesn't specify an explicit
6833 // destination register.
6834 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6835 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6836 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6837 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6838 // If the registers aren't low regs or the cc_out operand is zero
6839 // outside of an IT block, we have to use the 32-bit encoding, so
6840 // remove the cc_out operand.
6841 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6842 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6843 !inITBlock()))
6844 return true;
6845
6846 // Register-register 'add/sub' for thumb does not have a cc_out operand
6847 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6848 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6849 // right, this will result in better diagnostics (which operand is off)
6850 // anyway.
6851 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6852 (Operands.size() == 5 || Operands.size() == 6) &&
6853 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6854 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6855 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6856 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6857 (Operands.size() == 6 &&
6858 static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
6859 // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
6860 return (!(isThumbTwo() &&
6861 (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
6862 static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
6863 }
6864 // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
6865 // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
6866 // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
6867 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6868 (Operands.size() == 5) &&
6869 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6870 static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
6871 static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
6872 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6873 static_cast<ARMOperand &>(*Operands[4]).isImm()) {
6874 const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
6875 if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6876 return false; // add.w / sub.w
6877 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6878 const int64_t Value = CE->getValue();
6879 // Thumb1 imm8 sub / add
6880 if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
6881 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
6882 return false;
6883 return true; // Thumb2 T4 addw / subw
6884 }
6885 }
6886 return false;
6887}
6888
6889bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6890 OperandVector &Operands) {
6891 // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6892 unsigned RegIdx = 3;
6893 if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
6894 Mnemonic == "vrintr") &&
6895 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6896 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6897 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6898 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6899 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6900 RegIdx = 4;
6901
6902 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6903 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6904 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6905 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6906 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6907 return true;
6908 }
6909 return false;
6910}
6911
6912bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6913 OperandVector &Operands) {
6914 if (!hasMVE() || Operands.size() < 3)
6915 return true;
6916
6917 if (Mnemonic.startswith("vld2") || Mnemonic.startswith("vld4") ||
6918 Mnemonic.startswith("vst2") || Mnemonic.startswith("vst4"))
6919 return true;
6920
6921 if (Mnemonic.startswith("vctp") || Mnemonic.startswith("vpnot"))
6922 return false;
6923
6924 if (Mnemonic.startswith("vmov") &&
6925 !(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") ||
6926 Mnemonic.startswith("vmovx"))) {
6927 for (auto &Operand : Operands) {
6928 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6929 ((*Operand).isReg() &&
6930 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6931 (*Operand).getReg()) ||
6932 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6933 (*Operand).getReg())))) {
6934 return true;
6935 }
6936 }
6937 return false;
6938 } else {
6939 for (auto &Operand : Operands) {
6940 // We check the larger class QPR instead of just the legal class
6941 // MQPR, to more accurately report errors when using Q registers
6942 // outside of the allowed range.
6943 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6944 (Operand->isReg() &&
6945 (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6946 Operand->getReg()))))
6947 return false;
6948 }
6949 return true;
6950 }
6951}
6952
6953static bool isDataTypeToken(StringRef Tok) {
6954 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6955 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6956 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6957 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6958 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6959 Tok == ".f" || Tok == ".d";
6960}
6961
6962// FIXME: This bit should probably be handled via an explicit match class
6963// in the .td files that matches the suffix instead of having it be
6964// a literal string token the way it is now.
6965static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6966 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
6967}
6968
6969static void applyMnemonicAliases(StringRef &Mnemonic,
6970 const FeatureBitset &Features,
6971 unsigned VariantID);
6972
6973// The GNU assembler has aliases of ldrd and strd with the second register
6974// omitted. We don't have a way to do that in tablegen, so fix it up here.
6975//
6976// We have to be careful to not emit an invalid Rt2 here, because the rest of
6977// the assembly parser could then generate confusing diagnostics refering to
6978// it. If we do find anything that prevents us from doing the transformation we
6979// bail out, and let the assembly parser report an error on the instruction as
6980// it is written.
6981void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6982 OperandVector &Operands) {
6983 if (Mnemonic != "ldrd" && Mnemonic != "strd")
6984 return;
6985 if (Operands.size() < 4)
6986 return;
6987
6988 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6989 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6990
6991 if (!Op2.isReg())
6992 return;
6993 if (!Op3.isGPRMem())
6994 return;
6995
6996 const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6997 if (!GPR.contains(Op2.getReg()))
6998 return;
6999
7000 unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
7001 if (!isThumb() && (RtEncoding & 1)) {
7002 // In ARM mode, the registers must be from an aligned pair, this
7003 // restriction does not apply in Thumb mode.
7004 return;
7005 }
7006 if (Op2.getReg() == ARM::PC)
7007 return;
7008 unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
7009 if (!PairedReg || PairedReg == ARM::PC ||
7010 (PairedReg == ARM::SP && !hasV8Ops()))
7011 return;
7012
7013 Operands.insert(
7014 Operands.begin() + 3,
7015 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
7016}
7017
7018// Dual-register instruction have the following syntax:
7019// <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
7020// This function tries to remove <Rdest+1> and replace <Rdest> with a pair
7021// operand. If the conversion fails an error is diagnosed, and the function
7022// returns true.
7023bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
7024 OperandVector &Operands) {
7025 assert(MS.isCDEDualRegInstr(Mnemonic))(static_cast <bool> (MS.isCDEDualRegInstr(Mnemonic)) ? void
(0) : __assert_fail ("MS.isCDEDualRegInstr(Mnemonic)", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 7025, __extension__ __PRETTY_FUNCTION__))
;
7026 bool isPredicable =
7027 Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
7028 size_t NumPredOps = isPredicable ? 1 : 0;
7029
7030 if (Operands.size() <= 3 + NumPredOps)
7031 return false;
7032
7033 StringRef Op2Diag(
7034 "operand must be an even-numbered register in the range [r0, r10]");
7035
7036 const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
7037 if (!Op2.isReg())
7038 return Error(Op2.getStartLoc(), Op2Diag);
7039
7040 unsigned RNext;
7041 unsigned RPair;
7042 switch (Op2.getReg()) {
7043 default:
7044 return Error(Op2.getStartLoc(), Op2Diag);
7045 case ARM::R0:
7046 RNext = ARM::R1;
7047 RPair = ARM::R0_R1;
7048 break;
7049 case ARM::R2:
7050 RNext = ARM::R3;
7051 RPair = ARM::R2_R3;
7052 break;
7053 case ARM::R4:
7054 RNext = ARM::R5;
7055 RPair = ARM::R4_R5;
7056 break;
7057 case ARM::R6:
7058 RNext = ARM::R7;
7059 RPair = ARM::R6_R7;
7060 break;
7061 case ARM::R8:
7062 RNext = ARM::R9;
7063 RPair = ARM::R8_R9;
7064 break;
7065 case ARM::R10:
7066 RNext = ARM::R11;
7067 RPair = ARM::R10_R11;
7068 break;
7069 }
7070
7071 const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
7072 if (!Op3.isReg() || Op3.getReg() != RNext)
7073 return Error(Op3.getStartLoc(), "operand must be a consecutive register");
7074
7075 Operands.erase(Operands.begin() + 3 + NumPredOps);
7076 Operands[2 + NumPredOps] =
7077 ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
7078 return false;
7079}
7080
7081/// Parse an arm instruction mnemonic followed by its operands.
7082bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
7083 SMLoc NameLoc, OperandVector &Operands) {
7084 MCAsmParser &Parser = getParser();
7085
7086 // Apply mnemonic aliases before doing anything else, as the destination
7087 // mnemonic may include suffices and we want to handle them normally.
7088 // The generic tblgen'erated code does this later, at the start of
7089 // MatchInstructionImpl(), but that's too late for aliases that include
7090 // any sort of suffix.
7091 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7092 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7093 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
7094
7095 // First check for the ARM-specific .req directive.
7096 if (Parser.getTok().is(AsmToken::Identifier) &&
7097 Parser.getTok().getIdentifier().lower() == ".req") {
7098 parseDirectiveReq(Name, NameLoc);
7099 // We always return 'error' for this, as we're done with this
7100 // statement and don't need to match the 'instruction."
7101 return true;
7102 }
7103
7104 // Create the leading tokens for the mnemonic, split by '.' characters.
7105 size_t Start = 0, Next = Name.find('.');
7106 StringRef Mnemonic = Name.slice(Start, Next);
7107 StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
7108
7109 // Split out the predication code and carry setting flag from the mnemonic.
7110 unsigned PredicationCode;
7111 unsigned VPTPredicationCode;
7112 unsigned ProcessorIMod;
7113 bool CarrySetting;
7114 StringRef ITMask;
7115 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7116 CarrySetting, ProcessorIMod, ITMask);
7117
7118 // In Thumb1, only the branch (B) instruction can be predicated.
7119 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
7120 return Error(NameLoc, "conditional execution not supported in Thumb1");
7121 }
7122
7123 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
7124
7125 // Handle the mask for IT and VPT instructions. In ARMOperand and
7126 // MCOperand, this is stored in a format independent of the
7127 // condition code: the lowest set bit indicates the end of the
7128 // encoding, and above that, a 1 bit indicates 'else', and an 0
7129 // indicates 'then'. E.g.
7130 // IT -> 1000
7131 // ITx -> x100 (ITT -> 0100, ITE -> 1100)
7132 // ITxy -> xy10 (e.g. ITET -> 1010)
7133 // ITxyz -> xyz1 (e.g. ITEET -> 1101)
7134 // Note: See the ARM::PredBlockMask enum in
7135 // /lib/Target/ARM/Utils/ARMBaseInfo.h
7136 if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
7137 Mnemonic.startswith("vpst")) {
7138 SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
7139 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7140 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7141 if (ITMask.size() > 3) {
7142 if (Mnemonic == "it")
7143 return Error(Loc, "too many conditions on IT instruction");
7144 return Error(Loc, "too many conditions on VPT instruction");
7145 }
7146 unsigned Mask = 8;
7147 for (char Pos : llvm::reverse(ITMask)) {
7148 if (Pos != 't' && Pos != 'e') {
7149 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7150 }
7151 Mask >>= 1;
7152 if (Pos == 'e')
7153 Mask |= 8;
7154 }
7155 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7156 }
7157
7158 // FIXME: This is all a pretty gross hack. We should automatically handle
7159 // optional operands like this via tblgen.
7160
7161 // Next, add the CCOut and ConditionCode operands, if needed.
7162 //
7163 // For mnemonics which can ever incorporate a carry setting bit or predication
7164 // code, our matching model involves us always generating CCOut and
7165 // ConditionCode operands to match the mnemonic "as written" and then we let
7166 // the matcher deal with finding the right instruction or generating an
7167 // appropriate error.
7168 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7169 getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7170 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7171
7172 // If we had a carry-set on an instruction that can't do that, issue an
7173 // error.
7174 if (!CanAcceptCarrySet && CarrySetting) {
7175 return Error(NameLoc, "instruction '" + Mnemonic +
7176 "' can not set flags, but 's' suffix specified");
7177 }
7178 // If we had a predication code on an instruction that can't do that, issue an
7179 // error.
7180 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7181 return Error(NameLoc, "instruction '" + Mnemonic +
7182 "' is not predicable, but condition code specified");
7183 }
7184
7185 // If we had a VPT predication code on an instruction that can't do that, issue an
7186 // error.
7187 if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7188 return Error(NameLoc, "instruction '" + Mnemonic +
7189 "' is not VPT predicable, but VPT code T/E is specified");
7190 }
7191
7192 // Add the carry setting operand, if necessary.
7193 if (CanAcceptCarrySet) {
7194 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7195 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7196 Loc));
7197 }
7198
7199 // Add the predication code operand, if necessary.
7200 if (CanAcceptPredicationCode) {
7201 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7202 CarrySetting);
7203 Operands.push_back(ARMOperand::CreateCondCode(
7204 ARMCC::CondCodes(PredicationCode), Loc));
7205 }
7206
7207 // Add the VPT predication code operand, if necessary.
7208 // FIXME: We don't add them for the instructions filtered below as these can
7209 // have custom operands which need special parsing. This parsing requires
7210 // the operand to be in the same place in the OperandVector as their
7211 // definition in tblgen. Since these instructions may also have the
7212 // scalar predication operand we do not add the vector one and leave until
7213 // now to fix it up.
7214 if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
7215 !Mnemonic.startswith("vcmp") &&
7216 !(Mnemonic.startswith("vcvt") && Mnemonic != "vcvta" &&
7217 Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7218 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7219 CarrySetting);
7220 Operands.push_back(ARMOperand::CreateVPTPred(
7221 ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7222 }
7223
7224 // Add the processor imod operand, if necessary.
7225 if (ProcessorIMod) {
7226 Operands.push_back(ARMOperand::CreateImm(
7227 MCConstantExpr::create(ProcessorIMod, getContext()),
7228 NameLoc, NameLoc));
7229 } else if (Mnemonic == "cps" && isMClass()) {
7230 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7231 }
7232
7233 // Add the remaining tokens in the mnemonic.
7234 while (Next != StringRef::npos) {
7235 Start = Next;
7236 Next = Name.find('.', Start + 1);
7237 ExtraToken = Name.slice(Start, Next);
7238
7239 // Some NEON instructions have an optional datatype suffix that is
7240 // completely ignored. Check for that.
7241 if (isDataTypeToken(ExtraToken) &&
7242 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7243 continue;
7244
7245 // For for ARM mode generate an error if the .n qualifier is used.
7246 if (ExtraToken == ".n" && !isThumb()) {
7247 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7248 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7249 "arm mode");
7250 }
7251
7252 // The .n qualifier is always discarded as that is what the tables
7253 // and matcher expect. In ARM mode the .w qualifier has no effect,
7254 // so discard it to avoid errors that can be caused by the matcher.
7255 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7256 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7257 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7258 }
7259 }
7260
7261 // Read the remaining operands.
7262 if (getLexer().isNot(AsmToken::EndOfStatement)) {
7263 // Read the first operand.
7264 if (parseOperand(Operands, Mnemonic)) {
7265 return true;
7266 }
7267
7268 while (parseOptionalToken(AsmToken::Comma)) {
7269 // Parse and remember the operand.
7270 if (parseOperand(Operands, Mnemonic)) {
7271 return true;
7272 }
7273 }
7274 }
7275
7276 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7277 return true;
7278
7279 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
7280
7281 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7282 // Dual-register instructions use even-odd register pairs as their
7283 // destination operand, in assembly such pair is spelled as two
7284 // consecutive registers, without any special syntax. ConvertDualRegOperand
7285 // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7286 // It returns true, if an error message has been emitted. If the function
7287 // returns false, the function either succeeded or an error (e.g. missing
7288 // operand) will be diagnosed elsewhere.
7289 if (MS.isCDEDualRegInstr(Mnemonic)) {
7290 bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
7291 if (GotError)
7292 return GotError;
7293 }
7294 }
7295
7296 // Some instructions, mostly Thumb, have forms for the same mnemonic that
7297 // do and don't have a cc_out optional-def operand. With some spot-checks
7298 // of the operand list, we can figure out which variant we're trying to
7299 // parse and adjust accordingly before actually matching. We shouldn't ever
7300 // try to remove a cc_out operand that was explicitly set on the
7301 // mnemonic, of course (CarrySetting == true). Reason number #317 the
7302 // table driven matcher doesn't fit well with the ARM instruction set.
7303 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
7304 Operands.erase(Operands.begin() + 1);
7305
7306 // Some instructions have the same mnemonic, but don't always
7307 // have a predicate. Distinguish them here and delete the
7308 // appropriate predicate if needed. This could be either the scalar
7309 // predication code or the vector predication code.
7310 if (PredicationCode == ARMCC::AL &&
7311 shouldOmitPredicateOperand(Mnemonic, Operands))
7312 Operands.erase(Operands.begin() + 1);
7313
7314
7315 if (hasMVE()) {
7316 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
7317 Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7318 // Very nasty hack to deal with the vector predicated variant of vmovlt
7319 // the scalar predicated vmov with condition 'lt'. We can not tell them
7320 // apart until we have parsed their operands.
7321 Operands.erase(Operands.begin() + 1);
7322 Operands.erase(Operands.begin());
7323 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7324 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7325 Mnemonic.size() - 1 + CarrySetting);
7326 Operands.insert(Operands.begin(),
7327 ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7328 Operands.insert(Operands.begin(),
7329 ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7330 } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7331 !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7332 // Another nasty hack to deal with the ambiguity between vcvt with scalar
7333 // predication 'ne' and vcvtn with vector predication 'e'. As above we
7334 // can only distinguish between the two after we have parsed their
7335 // operands.
7336 Operands.erase(Operands.begin() + 1);
7337 Operands.erase(Operands.begin());
7338 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7339 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7340 Mnemonic.size() - 1 + CarrySetting);
7341 Operands.insert(Operands.begin(),
7342 ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7343 Operands.insert(Operands.begin(),
7344 ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7345 } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7346 !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7347 // Another hack, this time to distinguish between scalar predicated vmul
7348 // with 'lt' predication code and the vector instruction vmullt with
7349 // vector predication code "none"
7350 Operands.erase(Operands.begin() + 1);
7351 Operands.erase(Operands.begin());
7352 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7353 Operands.insert(Operands.begin(),
7354 ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7355 }
7356 // For vmov and vcmp, as mentioned earlier, we did not add the vector
7357 // predication code, since these may contain operands that require
7358 // special parsing. So now we have to see if they require vector
7359 // predication and replace the scalar one with the vector predication
7360 // operand if that is the case.
7361 else if (Mnemonic == "vmov" || Mnemonic.startswith("vcmp") ||
7362 (Mnemonic.startswith("vcvt") && !Mnemonic.startswith("vcvta") &&
7363 !Mnemonic.startswith("vcvtn") && !Mnemonic.startswith("vcvtp") &&
7364 !Mnemonic.startswith("vcvtm"))) {
7365 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7366 // We could not split the vector predicate off vcvt because it might
7367 // have been the scalar vcvtt instruction. Now we know its a vector
7368 // instruction, we still need to check whether its the vector
7369 // predicated vcvt with 'Then' predication or the vector vcvtt. We can
7370 // distinguish the two based on the suffixes, if it is any of
7371 // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7372 if (Mnemonic.startswith("vcvtt") && Operands.size() >= 4) {
7373 auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
7374 auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
7375 if (!(Sz1.isToken() && Sz1.getToken().startswith(".f") &&
7376 Sz2.isToken() && Sz2.getToken().startswith(".f"))) {
7377 Operands.erase(Operands.begin());
7378 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7379 VPTPredicationCode = ARMVCC::Then;
7380
7381 Mnemonic = Mnemonic.substr(0, 4);
7382 Operands.insert(Operands.begin(),
7383 ARMOperand::CreateToken(Mnemonic, MLoc));
7384 }
7385 }
7386 Operands.erase(Operands.begin() + 1);
7387 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7388 Mnemonic.size() + CarrySetting);
7389 Operands.insert(Operands.begin() + 1,
7390 ARMOperand::CreateVPTPred(
7391 ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7392 }
7393 } else if (CanAcceptVPTPredicationCode) {
7394 // For all other instructions, make sure only one of the two
7395 // predication operands is left behind, depending on whether we should
7396 // use the vector predication.
7397 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7398 if (CanAcceptPredicationCode)
7399 Operands.erase(Operands.begin() + 2);
7400 else
7401 Operands.erase(Operands.begin() + 1);
7402 } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
7403 Operands.erase(Operands.begin() + 1);
7404 }
7405 }
7406 }
7407
7408 if (VPTPredicationCode != ARMVCC::None) {
7409 bool usedVPTPredicationCode = false;
7410 for (unsigned I = 1; I < Operands.size(); ++I)
7411 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7412 usedVPTPredicationCode = true;
7413 if (!usedVPTPredicationCode) {
7414 // If we have a VPT predication code and we haven't just turned it
7415 // into an operand, then it was a mistake for splitMnemonic to
7416 // separate it from the rest of the mnemonic in the first place,
7417 // and this may lead to wrong disassembly (e.g. scalar floating
7418 // point VCMPE is actually a different instruction from VCMP, so
7419 // we mustn't treat them the same). In that situation, glue it
7420 // back on.
7421 Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7422 Operands.erase(Operands.begin());
7423 Operands.insert(Operands.begin(),
7424 ARMOperand::CreateToken(Mnemonic, NameLoc));
7425 }
7426 }
7427
7428 // ARM mode 'blx' need special handling, as the register operand version
7429 // is predicable, but the label operand version is not. So, we can't rely
7430 // on the Mnemonic based checking to correctly figure out when to put
7431 // a k_CondCode operand in the list. If we're trying to match the label
7432 // version, remove the k_CondCode operand here.
7433 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
7434 static_cast<ARMOperand &>(*Operands[2]).isImm())
7435 Operands.erase(Operands.begin() + 1);
7436
7437 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7438 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7439 // a single GPRPair reg operand is used in the .td file to replace the two
7440 // GPRs. However, when parsing from asm, the two GRPs cannot be
7441 // automatically
7442 // expressed as a GPRPair, so we have to manually merge them.
7443 // FIXME: We would really like to be able to tablegen'erate this.
7444 if (!isThumb() && Operands.size() > 4 &&
7445 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7446 Mnemonic == "stlexd")) {
7447 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7448 unsigned Idx = isLoad ? 2 : 3;
7449 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7450 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7451
7452 const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7453 // Adjust only if Op1 and Op2 are GPRs.
7454 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
7455 MRC.contains(Op2.getReg())) {
7456 unsigned Reg1 = Op1.getReg();
7457 unsigned Reg2 = Op2.getReg();
7458 unsigned Rt = MRI->getEncodingValue(Reg1);
7459 unsigned Rt2 = MRI->getEncodingValue(Reg2);
7460
7461 // Rt2 must be Rt + 1 and Rt must be even.
7462 if (Rt + 1 != Rt2 || (Rt & 1)) {
7463 return Error(Op2.getStartLoc(),
7464 isLoad ? "destination operands must be sequential"
7465 : "source operands must be sequential");
7466 }
7467 unsigned NewReg = MRI->getMatchingSuperReg(
7468 Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7469 Operands[Idx] =
7470 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7471 Operands.erase(Operands.begin() + Idx + 1);
7472 }
7473 }
7474
7475 // GNU Assembler extension (compatibility).
7476 fixupGNULDRDAlias(Mnemonic, Operands);
7477
7478 // FIXME: As said above, this is all a pretty gross hack. This instruction
7479 // does not fit with other "subs" and tblgen.
7480 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7481 // so the Mnemonic is the original name "subs" and delete the predicate
7482 // operand so it will match the table entry.
7483 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
7484 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
7485 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
7486 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
7487 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
7488 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
7489 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7490 Operands.erase(Operands.begin() + 1);
7491 }
7492 return false;
7493}
7494
7495// Validate context-sensitive operand constraints.
7496
7497// return 'true' if register list contains non-low GPR registers,
7498// 'false' otherwise. If Reg is in the register list or is HiReg, set
7499// 'containsReg' to true.
7500static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7501 unsigned Reg, unsigned HiReg,
7502 bool &containsReg) {
7503 containsReg = false;
7504 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7505 unsigned OpReg = Inst.getOperand(i).getReg();
7506 if (OpReg == Reg)
7507 containsReg = true;
7508 // Anything other than a low register isn't legal here.
7509 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7510 return true;
7511 }
7512 return false;
7513}
7514
7515// Check if the specified regisgter is in the register list of the inst,
7516// starting at the indicated operand number.
7517static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7518 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7519 unsigned OpReg = Inst.getOperand(i).getReg();
7520 if (OpReg == Reg)
7521 return true;
7522 }
7523 return false;
7524}
7525
7526// Return true if instruction has the interesting property of being
7527// allowed in IT blocks, but not being predicable.
7528static bool instIsBreakpoint(const MCInst &Inst) {
7529 return Inst.getOpcode() == ARM::tBKPT ||
7530 Inst.getOpcode() == ARM::BKPT ||
7531 Inst.getOpcode() == ARM::tHLT ||
7532 Inst.getOpcode() == ARM::HLT;
7533}
7534
7535bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7536 const OperandVector &Operands,
7537 unsigned ListNo, bool IsARPop) {
7538 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7539 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7540
7541 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7542 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
7543 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7544
7545 if (!IsARPop && ListContainsSP)
7546 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7547 "SP may not be in the register list");
7548 else if (ListContainsPC && ListContainsLR)
7549 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7550 "PC and LR may not be in the register list simultaneously");
7551 return false;
7552}
7553
7554bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7555 const OperandVector &Operands,
7556 unsigned ListNo) {
7557 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7558 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7559
7560 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7561 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7562
7563 if (ListContainsSP && ListContainsPC)
7564 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7565 "SP and PC may not be in the register list");
7566 else if (ListContainsSP)
7567 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7568 "SP may not be in the register list");
7569 else if (ListContainsPC)
7570 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7571 "PC may not be in the register list");
7572 return false;
7573}
7574
7575bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
7576 const OperandVector &Operands,
7577 bool Load, bool ARMMode, bool Writeback) {
7578 unsigned RtIndex = Load || !Writeback ? 0 : 1;
7579 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7580 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7581
7582 if (ARMMode) {
7583 // Rt can't be R14.
7584 if (Rt == 14)
7585 return Error(Operands[3]->getStartLoc(),
7586 "Rt can't be R14");
7587
7588 // Rt must be even-numbered.
7589 if ((Rt & 1) == 1)
7590 return Error(Operands[3]->getStartLoc(),
7591 "Rt must be even-numbered");
7592
7593 // Rt2 must be Rt + 1.
7594 if (Rt2 != Rt + 1) {
7595 if (Load)
7596 return Error(Operands[3]->getStartLoc(),
7597 "destination operands must be sequential");
7598 else
7599 return Error(Operands[3]->getStartLoc(),
7600 "source operands must be sequential");
7601 }
7602
7603 // FIXME: Diagnose m == 15
7604 // FIXME: Diagnose ldrd with m == t || m == t2.
7605 }
7606
7607 if (!ARMMode && Load) {
7608 if (Rt2 == Rt)
7609 return Error(Operands[3]->getStartLoc(),
7610 "destination operands can't be identical");
7611 }
7612
7613 if (Writeback) {
7614 unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7615
7616 if (Rn == Rt || Rn == Rt2) {
7617 if (Load)
7618 return Error(Operands[3]->getStartLoc(),
7619 "base register needs to be different from destination "
7620 "registers");
7621 else
7622 return Error(Operands[3]->getStartLoc(),
7623 "source register and base register can't be identical");
7624 }
7625
7626 // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7627 // (Except the immediate form of ldrd?)
7628 }
7629
7630 return false;
7631}
7632
7633static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7634 for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7635 if (ARM::isVpred(MCID.operands()[i].OperandType))
7636 return i;
7637 }
7638 return -1;
7639}
7640
7641static bool isVectorPredicable(const MCInstrDesc &MCID) {
7642 return findFirstVectorPredOperandIdx(MCID) != -1;
7643}
7644
7645// FIXME: We would really like to be able to tablegen'erate this.
7646bool ARMAsmParser::validateInstruction(MCInst &Inst,
7647 const OperandVector &Operands) {
7648 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7649 SMLoc Loc = Operands[0]->getStartLoc();
7650
7651 // Check the IT block state first.
7652 // NOTE: BKPT and HLT instructions have the interesting property of being
7653 // allowed in IT blocks, but not being predicable. They just always execute.
7654 if (inITBlock() && !instIsBreakpoint(Inst)) {
7655 // The instruction must be predicable.
7656 if (!MCID.isPredicable())
7657 return Error(Loc, "instructions in IT block must be predicable");
7658 ARMCC::CondCodes Cond = ARMCC::CondCodes(
7659 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7660 if (Cond != currentITCond()) {
7661 // Find the condition code Operand to get its SMLoc information.
7662 SMLoc CondLoc;
7663 for (unsigned I = 1; I < Operands.size(); ++I)
7664 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7665 CondLoc = Operands[I]->getStartLoc();
7666 return Error(CondLoc, "incorrect condition in IT block; got '" +
7667 StringRef(ARMCondCodeToString(Cond)) +
7668 "', but expected '" +
7669 ARMCondCodeToString(currentITCond()) + "'");
7670 }
7671 // Check for non-'al' condition codes outside of the IT block.
7672 } else if (isThumbTwo() && MCID.isPredicable() &&
7673 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7674 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7675 Inst.getOpcode() != ARM::t2Bcc &&
7676 Inst.getOpcode() != ARM::t2BFic) {
7677 return Error(Loc, "predicated instructions must be in IT block");
7678 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7679 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7680 ARMCC::AL) {
7681 return Warning(Loc, "predicated instructions should be in IT block");
7682 } else if (!MCID.isPredicable()) {
7683 // Check the instruction doesn't have a predicate operand anyway
7684 // that it's not allowed to use. Sometimes this happens in order
7685 // to keep instructions the same shape even though one cannot
7686 // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7687 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7688 if (MCID.operands()[i].isPredicate()) {
7689 if (Inst.getOperand(i).getImm() != ARMCC::AL)
7690 return Error(Loc, "instruction is not predicable");
7691 break;
7692 }
7693 }
7694 }
7695
7696 // PC-setting instructions in an IT block, but not the last instruction of
7697 // the block, are UNPREDICTABLE.
7698 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7699 return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7700 }
7701
7702 if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7703 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7704 if (!isVectorPredicable(MCID))
7705 return Error(Loc, "instruction in VPT block must be predicable");
7706 unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7707 unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7708 if (Pred != VPTPred) {
7709 SMLoc PredLoc;
7710 for (unsigned I = 1; I < Operands.size(); ++I)
7711 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7712 PredLoc = Operands[I]->getStartLoc();
7713 return Error(PredLoc, "incorrect predication in VPT block; got '" +
7714 StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7715 "', but expected '" +
7716 ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7717 }
7718 }
7719 else if (isVectorPredicable(MCID) &&
7720 Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7721 ARMVCC::None)
7722 return Error(Loc, "VPT predicated instructions must be in VPT block");
7723
7724 const unsigned Opcode = Inst.getOpcode();
7725 switch (Opcode) {
7726 case ARM::t2IT: {
7727 // Encoding is unpredictable if it ever results in a notional 'NV'
7728 // predicate. Since we don't parse 'NV' directly this means an 'AL'
7729 // predicate with an "else" mask bit.
7730 unsigned Cond = Inst.getOperand(0).getImm();
7731 unsigned Mask = Inst.getOperand(1).getImm();
7732
7733 // Conditions only allowing a 't' are those with no set bit except
7734 // the lowest-order one that indicates the end of the sequence. In
7735 // other words, powers of 2.
7736 if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7737 return Error(Loc, "unpredictable IT predicate sequence");
7738 break;
7739 }
7740 case ARM::LDRD:
7741 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7742 /*Writeback*/false))
7743 return true;
7744 break;
7745 case ARM::LDRD_PRE:
7746 case ARM::LDRD_POST:
7747 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7748 /*Writeback*/true))
7749 return true;
7750 break;
7751 case ARM::t2LDRDi8:
7752 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7753 /*Writeback*/false))
7754 return true;
7755 break;
7756 case ARM::t2LDRD_PRE:
7757 case ARM::t2LDRD_POST:
7758 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7759 /*Writeback*/true))
7760 return true;
7761 break;
7762 case ARM::t2BXJ: {
7763 const unsigned RmReg = Inst.getOperand(0).getReg();
7764 // Rm = SP is no longer unpredictable in v8-A
7765 if (RmReg == ARM::SP && !hasV8Ops())
7766 return Error(Operands[2]->getStartLoc(),
7767 "r13 (SP) is an unpredictable operand to BXJ");
7768 return false;
7769 }
7770 case ARM::STRD:
7771 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7772 /*Writeback*/false))
7773 return true;
7774 break;
7775 case ARM::STRD_PRE:
7776 case ARM::STRD_POST:
7777 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7778 /*Writeback*/true))
7779 return true;
7780 break;
7781 case ARM::t2STRD_PRE:
7782 case ARM::t2STRD_POST:
7783 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7784 /*Writeback*/true))
7785 return true;
7786 break;
7787 case ARM::STR_PRE_IMM:
7788 case ARM::STR_PRE_REG:
7789 case ARM::t2STR_PRE:
7790 case ARM::STR_POST_IMM:
7791 case ARM::STR_POST_REG:
7792 case ARM::t2STR_POST:
7793 case ARM::STRH_PRE:
7794 case ARM::t2STRH_PRE:
7795 case ARM::STRH_POST:
7796 case ARM::t2STRH_POST:
7797 case ARM::STRB_PRE_IMM:
7798 case ARM::STRB_PRE_REG:
7799 case ARM::t2STRB_PRE:
7800 case ARM::STRB_POST_IMM:
7801 case ARM::STRB_POST_REG:
7802 case ARM::t2STRB_POST: {
7803 // Rt must be different from Rn.
7804 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7805 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7806
7807 if (Rt == Rn)
7808 return Error(Operands[3]->getStartLoc(),
7809 "source register and base register can't be identical");
7810 return false;
7811 }
7812 case ARM::t2LDR_PRE_imm:
7813 case ARM::t2LDR_POST_imm:
7814 case ARM::t2STR_PRE_imm:
7815 case ARM::t2STR_POST_imm: {
7816 // Rt must be different from Rn.
7817 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7818 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7819
7820 if (Rt == Rn)
7821 return Error(Operands[3]->getStartLoc(),
7822 "destination register and base register can't be identical");
7823 if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7824 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7825 int Imm = Inst.getOperand(2).getImm();
7826 if (Imm > 255 || Imm < -255)
7827 return Error(Operands[5]->getStartLoc(),
7828 "operand must be in range [-255, 255]");
7829 }
7830 if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7831 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7832 if (Inst.getOperand(0).getReg() == ARM::PC) {
7833 return Error(Operands[3]->getStartLoc(),
7834 "operand must be a register in range [r0, r14]");
7835 }
7836 }
7837 return false;
7838 }
7839
7840 case ARM::t2LDRB_OFFSET_imm:
7841 case ARM::t2LDRB_PRE_imm:
7842 case ARM::t2LDRB_POST_imm:
7843 case ARM::t2STRB_OFFSET_imm:
7844 case ARM::t2STRB_PRE_imm:
7845 case ARM::t2STRB_POST_imm: {
7846 if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7847 Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7848 Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7849 Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7850 int Imm = Inst.getOperand(2).getImm();
7851 if (Imm > 255 || Imm < -255)
7852 return Error(Operands[5]->getStartLoc(),
7853 "operand must be in range [-255, 255]");
7854 } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7855 Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7856 int Imm = Inst.getOperand(2).getImm();
7857 if (Imm > 0 || Imm < -255)
7858 return Error(Operands[5]->getStartLoc(),
7859 "operand must be in range [0, 255] with a negative sign");
7860 }
7861 if (Inst.getOperand(0).getReg() == ARM::PC) {
7862 return Error(Operands[3]->getStartLoc(),
7863 "if operand is PC, should call the LDRB (literal)");
7864 }
7865 return false;
7866 }
7867
7868 case ARM::t2LDRH_OFFSET_imm:
7869 case ARM::t2LDRH_PRE_imm:
7870 case ARM::t2LDRH_POST_imm:
7871 case ARM::t2STRH_OFFSET_imm:
7872 case ARM::t2STRH_PRE_imm:
7873 case ARM::t2STRH_POST_imm: {
7874 if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7875 Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7876 Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7877 Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7878 int Imm = Inst.getOperand(2).getImm();
7879 if (Imm > 255 || Imm < -255)
7880 return Error(Operands[5]->getStartLoc(),
7881 "operand must be in range [-255, 255]");
7882 } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7883 Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7884 int Imm = Inst.getOperand(2).getImm();
7885 if (Imm > 0 || Imm < -255)
7886 return Error(Operands[5]->getStartLoc(),
7887 "operand must be in range [0, 255] with a negative sign");
7888 }
7889 if (Inst.getOperand(0).getReg() == ARM::PC) {
7890 return Error(Operands[3]->getStartLoc(),
7891 "if operand is PC, should call the LDRH (literal)");
7892 }
7893 return false;
7894 }
7895
7896 case ARM::t2LDRSB_OFFSET_imm:
7897 case ARM::t2LDRSB_PRE_imm:
7898 case ARM::t2LDRSB_POST_imm: {
7899 if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7900 Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7901 int Imm = Inst.getOperand(2).getImm();
7902 if (Imm > 255 || Imm < -255)
7903 return Error(Operands[5]->getStartLoc(),
7904 "operand must be in range [-255, 255]");
7905 } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7906 int Imm = Inst.getOperand(2).getImm();
7907 if (Imm > 0 || Imm < -255)
7908 return Error(Operands[5]->getStartLoc(),
7909 "operand must be in range [0, 255] with a negative sign");
7910 }
7911 if (Inst.getOperand(0).getReg() == ARM::PC) {
7912 return Error(Operands[3]->getStartLoc(),
7913 "if operand is PC, should call the LDRH (literal)");
7914 }
7915 return false;
7916 }
7917
7918 case ARM::t2LDRSH_OFFSET_imm:
7919 case ARM::t2LDRSH_PRE_imm:
7920 case ARM::t2LDRSH_POST_imm: {
7921 if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7922 Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7923 int Imm = Inst.getOperand(2).getImm();
7924 if (Imm > 255 || Imm < -255)
7925 return Error(Operands[5]->getStartLoc(),
7926 "operand must be in range [-255, 255]");
7927 } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7928 int Imm = Inst.getOperand(2).getImm();
7929 if (Imm > 0 || Imm < -255)
7930 return Error(Operands[5]->getStartLoc(),
7931 "operand must be in range [0, 255] with a negative sign");
7932 }
7933 if (Inst.getOperand(0).getReg() == ARM::PC) {
7934 return Error(Operands[3]->getStartLoc(),
7935 "if operand is PC, should call the LDRH (literal)");
7936 }
7937 return false;
7938 }
7939
7940 case ARM::LDR_PRE_IMM:
7941 case ARM::LDR_PRE_REG:
7942 case ARM::t2LDR_PRE:
7943 case ARM::LDR_POST_IMM:
7944 case ARM::LDR_POST_REG:
7945 case ARM::t2LDR_POST:
7946 case ARM::LDRH_PRE:
7947 case ARM::t2LDRH_PRE:
7948 case ARM::LDRH_POST:
7949 case ARM::t2LDRH_POST:
7950 case ARM::LDRSH_PRE:
7951 case ARM::t2LDRSH_PRE:
7952 case ARM::LDRSH_POST:
7953 case ARM::t2LDRSH_POST:
7954 case ARM::LDRB_PRE_IMM:
7955 case ARM::LDRB_PRE_REG:
7956 case ARM::t2LDRB_PRE:
7957 case ARM::LDRB_POST_IMM:
7958 case ARM::LDRB_POST_REG:
7959 case ARM::t2LDRB_POST:
7960 case ARM::LDRSB_PRE:
7961 case ARM::t2LDRSB_PRE:
7962 case ARM::LDRSB_POST:
7963 case ARM::t2LDRSB_POST: {
7964 // Rt must be different from Rn.
7965 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7966 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7967
7968 if (Rt == Rn)
7969 return Error(Operands[3]->getStartLoc(),
7970 "destination register and base register can't be identical");
7971 return false;
7972 }
7973
7974 case ARM::MVE_VLDRBU8_rq:
7975 case ARM::MVE_VLDRBU16_rq:
7976 case ARM::MVE_VLDRBS16_rq:
7977 case ARM::MVE_VLDRBU32_rq:
7978 case ARM::MVE_VLDRBS32_rq:
7979 case ARM::MVE_VLDRHU16_rq:
7980 case ARM::MVE_VLDRHU16_rq_u:
7981 case ARM::MVE_VLDRHU32_rq:
7982 case ARM::MVE_VLDRHU32_rq_u:
7983 case ARM::MVE_VLDRHS32_rq:
7984 case ARM::MVE_VLDRHS32_rq_u:
7985 case ARM::MVE_VLDRWU32_rq:
7986 case ARM::MVE_VLDRWU32_rq_u:
7987 case ARM::MVE_VLDRDU64_rq:
7988 case ARM::MVE_VLDRDU64_rq_u:
7989 case ARM::MVE_VLDRWU32_qi:
7990 case ARM::MVE_VLDRWU32_qi_pre:
7991 case ARM::MVE_VLDRDU64_qi:
7992 case ARM::MVE_VLDRDU64_qi_pre: {
7993 // Qd must be different from Qm.
7994 unsigned QdIdx = 0, QmIdx = 2;
7995 bool QmIsPointer = false;
7996 switch (Opcode) {
7997 case ARM::MVE_VLDRWU32_qi:
7998 case ARM::MVE_VLDRDU64_qi:
7999 QmIdx = 1;
8000 QmIsPointer = true;
8001 break;
8002 case ARM::MVE_VLDRWU32_qi_pre:
8003 case ARM::MVE_VLDRDU64_qi_pre:
8004 QdIdx = 1;
8005 QmIsPointer = true;
8006 break;
8007 }
8008
8009 const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
8010 const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
8011
8012 if (Qd == Qm) {
8013 return Error(Operands[3]->getStartLoc(),
8014 Twine("destination vector register and vector ") +
8015 (QmIsPointer ? "pointer" : "offset") +
8016 " register can't be identical");
8017 }
8018 return false;
8019 }
8020
8021 case ARM::SBFX:
8022 case ARM::t2SBFX:
8023 case ARM::UBFX:
8024 case ARM::t2UBFX: {
8025 // Width must be in range [1, 32-lsb].
8026 unsigned LSB = Inst.getOperand(2).getImm();
8027 unsigned Widthm1 = Inst.getOperand(3).getImm();
8028 if (Widthm1 >= 32 - LSB)
8029 return Error(Operands[5]->getStartLoc(),
8030 "bitfield width must be in range [1,32-lsb]");
8031 return false;
8032 }
8033 // Notionally handles ARM::tLDMIA_UPD too.
8034 case ARM::tLDMIA: {
8035 // If we're parsing Thumb2, the .w variant is available and handles
8036 // most cases that are normally illegal for a Thumb1 LDM instruction.
8037 // We'll make the transformation in processInstruction() if necessary.
8038 //
8039 // Thumb LDM instructions are writeback iff the base register is not
8040 // in the register list.
8041 unsigned Rn = Inst.getOperand(0).getReg();
8042 bool HasWritebackToken =
8043 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8044 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8045 bool ListContainsBase;
8046 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
8047 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
8048 "registers must be in range r0-r7");
8049 // If we should have writeback, then there should be a '!' token.
8050 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8051 return Error(Operands[2]->getStartLoc(),
8052 "writeback operator '!' expected");
8053 // If we should not have writeback, there must not be a '!'. This is
8054 // true even for the 32-bit wide encodings.
8055 if (ListContainsBase && HasWritebackToken)
8056 return Error(Operands[3]->getStartLoc(),
8057 "writeback operator '!' not allowed when base register "
8058 "in register list");
8059
8060 if (validatetLDMRegList(Inst, Operands, 3))
8061 return true;
8062 break;
8063 }
8064 case ARM::LDMIA_UPD:
8065 case ARM::LDMDB_UPD:
8066 case ARM::LDMIB_UPD:
8067 case ARM::LDMDA_UPD:
8068 // ARM variants loading and updating the same register are only officially
8069 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
8070 if (!hasV7Ops())
8071 break;
8072 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8073 return Error(Operands.back()->getStartLoc(),
8074 "writeback register not allowed in register list");
8075 break;
8076 case ARM::t2LDMIA:
8077 case ARM::t2LDMDB:
8078 if (validatetLDMRegList(Inst, Operands, 3))
8079 return true;
8080 break;
8081 case ARM::t2STMIA:
8082 case ARM::t2STMDB:
8083 if (validatetSTMRegList(Inst, Operands, 3))
8084 return true;
8085 break;
8086 case ARM::t2LDMIA_UPD:
8087 case ARM::t2LDMDB_UPD:
8088 case ARM::t2STMIA_UPD:
8089 case ARM::t2STMDB_UPD:
8090 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8091 return Error(Operands.back()->getStartLoc(),
8092 "writeback register not allowed in register list");
8093
8094 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8095 if (validatetLDMRegList(Inst, Operands, 3))
8096 return true;
8097 } else {
8098 if (validatetSTMRegList(Inst, Operands, 3))
8099 return true;
8100 }
8101 break;
8102
8103 case ARM::sysLDMIA_UPD:
8104 case ARM::sysLDMDA_UPD:
8105 case ARM::sysLDMDB_UPD:
8106 case ARM::sysLDMIB_UPD:
8107 if (!listContainsReg(Inst, 3, ARM::PC))
8108 return Error(Operands[4]->getStartLoc(),
8109 "writeback register only allowed on system LDM "
8110 "if PC in register-list");
8111 break;
8112 case ARM::sysSTMIA_UPD:
8113 case ARM::sysSTMDA_UPD:
8114 case ARM::sysSTMDB_UPD:
8115 case ARM::sysSTMIB_UPD:
8116 return Error(Operands[2]->getStartLoc(),
8117 "system STM cannot have writeback register");
8118 case ARM::tMUL:
8119 // The second source operand must be the same register as the destination
8120 // operand.
8121 //
8122 // In this case, we must directly check the parsed operands because the
8123 // cvtThumbMultiply() function is written in such a way that it guarantees
8124 // this first statement is always true for the new Inst. Essentially, the
8125 // destination is unconditionally copied into the second source operand
8126 // without checking to see if it matches what we actually parsed.
8127 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
8128 ((ARMOperand &)*Operands[5]).getReg()) &&
8129 (((ARMOperand &)*Operands[3]).getReg() !=
8130 ((ARMOperand &)*Operands[4]).getReg())) {
8131 return Error(Operands[3]->getStartLoc(),
8132 "destination register must match source register");
8133 }
8134 break;
8135
8136 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8137 // so only issue a diagnostic for thumb1. The instructions will be
8138 // switched to the t2 encodings in processInstruction() if necessary.
8139 case ARM::tPOP: {
8140 bool ListContainsBase;
8141 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8142 !isThumbTwo())
8143 return Error(Operands[2]->getStartLoc(),
8144 "registers must be in range r0-r7 or pc");
8145 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
8146 return true;
8147 break;
8148 }
8149 case ARM::tPUSH: {
8150 bool ListContainsBase;
8151 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8152 !isThumbTwo())
8153 return Error(Operands[2]->getStartLoc(),
8154 "registers must be in range r0-r7 or lr");
8155 if (validatetSTMRegList(Inst, Operands, 2))
8156 return true;
8157 break;
8158 }
8159 case ARM::tSTMIA_UPD: {
8160 bool ListContainsBase, InvalidLowList;
8161 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8162 0, ListContainsBase);
8163 if (InvalidLowList && !isThumbTwo())
8164 return Error(Operands[4]->getStartLoc(),
8165 "registers must be in range r0-r7");
8166
8167 // This would be converted to a 32-bit stm, but that's not valid if the
8168 // writeback register is in the list.
8169 if (InvalidLowList && ListContainsBase)
8170 return Error(Operands[4]->getStartLoc(),
8171 "writeback operator '!' not allowed when base register "
8172 "in register list");
8173
8174 if (validatetSTMRegList(Inst, Operands, 4))
8175 return true;
8176 break;
8177 }
8178 case ARM::tADDrSP:
8179 // If the non-SP source operand and the destination operand are not the
8180 // same, we need thumb2 (for the wide encoding), or we have an error.
8181 if (!isThumbTwo() &&
8182 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8183 return Error(Operands[4]->getStartLoc(),
8184 "source register must be the same as destination");
8185 }
8186 break;
8187
8188 case ARM::t2ADDrr:
8189 case ARM::t2ADDrs:
8190 case ARM::t2SUBrr:
8191 case ARM::t2SUBrs:
8192 if (Inst.getOperand(0).getReg() == ARM::SP &&
8193 Inst.getOperand(1).getReg() != ARM::SP)
8194 return Error(Operands[4]->getStartLoc(),
8195 "source register must be sp if destination is sp");
8196 break;
8197
8198 // Final range checking for Thumb unconditional branch instructions.
8199 case ARM::tB:
8200 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
8201 return Error(Operands[2]->getStartLoc(), "branch target out of range");
8202 break;
8203 case ARM::t2B: {
8204 int op = (Operands[2]->isImm()) ? 2 : 3;
8205 ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8206 // Delay the checks of symbolic expressions until they are resolved.
8207 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8208 !Operand.isSignedOffset<24, 1>())
8209 return Error(Operands[op]->getStartLoc(), "branch target out of range");
8210 break;
8211 }
8212 // Final range checking for Thumb conditional branch instructions.
8213 case ARM::tBcc:
8214 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
8215 return Error(Operands[2]->getStartLoc(), "branch target out of range");
8216 break;
8217 case ARM::t2Bcc: {
8218 int Op = (Operands[2]->isImm()) ? 2 : 3;
8219 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8220 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8221 break;
8222 }
8223 case ARM::tCBZ:
8224 case ARM::tCBNZ: {
8225 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
8226 return Error(Operands[2]->getStartLoc(), "branch target out of range");
8227 break;
8228 }
8229 case ARM::MOVi16:
8230 case ARM::MOVTi16:
8231 case ARM::t2MOVi16:
8232 case ARM::t2MOVTi16:
8233 {
8234 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8235 // especially when we turn it into a movw and the expression <symbol> does
8236 // not have a :lower16: or :upper16 as part of the expression. We don't
8237 // want the behavior of silently truncating, which can be unexpected and
8238 // lead to bugs that are difficult to find since this is an easy mistake
8239 // to make.
8240 int i = (Operands[3]->isImm()) ? 3 : 4;
8241 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8242 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8243 if (CE) break;
8244 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8245 if (!E) break;
8246 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8247 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8248 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8249 return Error(
8250 Op.getStartLoc(),
8251 "immediate expression for mov requires :lower16: or :upper16");
8252 break;
8253 }
8254 case ARM::HINT:
8255 case ARM::t2HINT: {
8256 unsigned Imm8 = Inst.getOperand(0).getImm();
8257 unsigned Pred = Inst.getOperand(1).getImm();
8258 // ESB is not predicable (pred must be AL). Without the RAS extension, this
8259 // behaves as any other unallocated hint.
8260 if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8261 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8262 "predicable, but condition "
8263 "code specified");
8264 if (Imm8 == 0x14 && Pred != ARMCC::AL)
8265 return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8266 "predicable, but condition "
8267 "code specified");
8268 break;
8269 }
8270 case ARM::t2BFi:
8271 case ARM::t2BFr:
8272 case ARM::t2BFLi:
8273 case ARM::t2BFLr: {
8274 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
8275 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8276 return Error(Operands[2]->getStartLoc(),
8277 "branch location out of range or not a multiple of 2");
8278
8279 if (Opcode == ARM::t2BFi) {
8280 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
8281 return Error(Operands[3]->getStartLoc(),
8282 "branch target out of range or not a multiple of 2");
8283 } else if (Opcode == ARM::t2BFLi) {
8284 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
8285 return Error(Operands[3]->getStartLoc(),
8286 "branch target out of range or not a multiple of 2");
8287 }
8288 break;
8289 }
8290 case ARM::t2BFic: {
8291 if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
8292 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8293 return Error(Operands[1]->getStartLoc(),
8294 "branch location out of range or not a multiple of 2");
8295
8296 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
8297 return Error(Operands[2]->getStartLoc(),
8298 "branch target out of range or not a multiple of 2");
8299
8300 assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&(static_cast <bool> (Inst.getOperand(0).isImm() == Inst
.getOperand(2).isImm() && "branch location and else branch target should either both be "
"immediates or both labels") ? void (0) : __assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8302, __extension__
__PRETTY_FUNCTION__))
8301 "branch location and else branch target should either both be "(static_cast <bool> (Inst.getOperand(0).isImm() == Inst
.getOperand(2).isImm() && "branch location and else branch target should either both be "
"immediates or both labels") ? void (0) : __assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8302, __extension__
__PRETTY_FUNCTION__))
8302 "immediates or both labels")(static_cast <bool> (Inst.getOperand(0).isImm() == Inst
.getOperand(2).isImm() && "branch location and else branch target should either both be "
"immediates or both labels") ? void (0) : __assert_fail ("Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && \"branch location and else branch target should either both be \" \"immediates or both labels\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8302, __extension__
__PRETTY_FUNCTION__))
;
8303
8304 if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8305 int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8306 if (Diff != 4 && Diff != 2)
8307 return Error(
8308 Operands[3]->getStartLoc(),
8309 "else branch target must be 2 or 4 greater than the branch location");
8310 }
8311 break;
8312 }
8313 case ARM::t2CLRM: {
8314 for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8315 if (Inst.getOperand(i).isReg() &&
8316 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8317 Inst.getOperand(i).getReg())) {
8318 return Error(Operands[2]->getStartLoc(),
8319 "invalid register in register list. Valid registers are "
8320 "r0-r12, lr/r14 and APSR.");
8321 }
8322 }
8323 break;
8324 }
8325 case ARM::DSB:
8326 case ARM::t2DSB: {
8327
8328 if (Inst.getNumOperands() < 2)
8329 break;
8330
8331 unsigned Option = Inst.getOperand(0).getImm();
8332 unsigned Pred = Inst.getOperand(1).getImm();
8333
8334 // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8335 if (Option == 0 && Pred != ARMCC::AL)
8336 return Error(Operands[1]->getStartLoc(),
8337 "instruction 'ssbb' is not predicable, but condition code "
8338 "specified");
8339 if (Option == 4 && Pred != ARMCC::AL)
8340 return Error(Operands[1]->getStartLoc(),
8341 "instruction 'pssbb' is not predicable, but condition code "
8342 "specified");
8343 break;
8344 }
8345 case ARM::VMOVRRS: {
8346 // Source registers must be sequential.
8347 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8348 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8349 if (Sm1 != Sm + 1)
8350 return Error(Operands[5]->getStartLoc(),
8351 "source operands must be sequential");
8352 break;
8353 }
8354 case ARM::VMOVSRR: {
8355 // Destination registers must be sequential.
8356 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8357 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8358 if (Sm1 != Sm + 1)
8359 return Error(Operands[3]->getStartLoc(),
8360 "destination operands must be sequential");
8361 break;
8362 }
8363 case ARM::VLDMDIA:
8364 case ARM::VSTMDIA: {
8365 ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
8366 auto &RegList = Op.getRegList();
8367 if (RegList.size() < 1 || RegList.size() > 16)
8368 return Error(Operands[3]->getStartLoc(),
8369 "list of registers must be at least 1 and at most 16");
8370 break;
8371 }
8372 case ARM::MVE_VQDMULLs32bh:
8373 case ARM::MVE_VQDMULLs32th:
8374 case ARM::MVE_VCMULf32:
8375 case ARM::MVE_VMULLBs32:
8376 case ARM::MVE_VMULLTs32:
8377 case ARM::MVE_VMULLBu32:
8378 case ARM::MVE_VMULLTu32: {
8379 if (Operands[3]->getReg() == Operands[4]->getReg()) {
8380 return Error (Operands[3]->getStartLoc(),
8381 "Qd register and Qn register can't be identical");
8382 }
8383 if (Operands[3]->getReg() == Operands[5]->getReg()) {
8384 return Error (Operands[3]->getStartLoc(),
8385 "Qd register and Qm register can't be identical");
8386 }
8387 break;
8388 }
8389 case ARM::MVE_VREV64_8:
8390 case ARM::MVE_VREV64_16:
8391 case ARM::MVE_VREV64_32:
8392 case ARM::MVE_VQDMULL_qr_s32bh:
8393 case ARM::MVE_VQDMULL_qr_s32th: {
8394 if (Operands[3]->getReg() == Operands[4]->getReg()) {
8395 return Error (Operands[3]->getStartLoc(),
8396 "Qd register and Qn register can't be identical");
8397 }
8398 break;
8399 }
8400 case ARM::MVE_VCADDi32:
8401 case ARM::MVE_VCADDf32:
8402 case ARM::MVE_VHCADDs32: {
8403 if (Operands[3]->getReg() == Operands[5]->getReg()) {
8404 return Error (Operands[3]->getStartLoc(),
8405 "Qd register and Qm register can't be identical");
8406 }
8407 break;
8408 }
8409 case ARM::MVE_VMOV_rr_q: {
8410 if (Operands[4]->getReg() != Operands[6]->getReg())
8411 return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
8412 if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
8413 static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
8414 return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8415 break;
8416 }
8417 case ARM::MVE_VMOV_q_rr: {
8418 if (Operands[2]->getReg() != Operands[4]->getReg())
8419 return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
8420 if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
8421 static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
8422 return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8423 break;
8424 }
8425 case ARM::UMAAL:
8426 case ARM::UMLAL:
8427 case ARM::UMULL:
8428 case ARM::t2UMAAL:
8429 case ARM::t2UMLAL:
8430 case ARM::t2UMULL:
8431 case ARM::SMLAL:
8432 case ARM::SMLALBB:
8433 case ARM::SMLALBT:
8434 case ARM::SMLALD:
8435 case ARM::SMLALDX:
8436 case ARM::SMLALTB:
8437 case ARM::SMLALTT:
8438 case ARM::SMLSLD:
8439 case ARM::SMLSLDX:
8440 case ARM::SMULL:
8441 case ARM::t2SMLAL:
8442 case ARM::t2SMLALBB:
8443 case ARM::t2SMLALBT:
8444 case ARM::t2SMLALD:
8445 case ARM::t2SMLALDX:
8446 case ARM::t2SMLALTB:
8447 case ARM::t2SMLALTT:
8448 case ARM::t2SMLSLD:
8449 case ARM::t2SMLSLDX:
8450 case ARM::t2SMULL: {
8451 unsigned RdHi = Inst.getOperand(0).getReg();
8452 unsigned RdLo = Inst.getOperand(1).getReg();
8453 if(RdHi == RdLo) {
8454 return Error(Loc,
8455 "unpredictable instruction, RdHi and RdLo must be different");
8456 }
8457 break;
8458 }
8459
8460 case ARM::CDE_CX1:
8461 case ARM::CDE_CX1A:
8462 case ARM::CDE_CX1D:
8463 case ARM::CDE_CX1DA:
8464 case ARM::CDE_CX2:
8465 case ARM::CDE_CX2A:
8466 case ARM::CDE_CX2D:
8467 case ARM::CDE_CX2DA:
8468 case ARM::CDE_CX3:
8469 case ARM::CDE_CX3A:
8470 case ARM::CDE_CX3D:
8471 case ARM::CDE_CX3DA:
8472 case ARM::CDE_VCX1_vec:
8473 case ARM::CDE_VCX1_fpsp:
8474 case ARM::CDE_VCX1_fpdp:
8475 case ARM::CDE_VCX1A_vec:
8476 case ARM::CDE_VCX1A_fpsp:
8477 case ARM::CDE_VCX1A_fpdp:
8478 case ARM::CDE_VCX2_vec:
8479 case ARM::CDE_VCX2_fpsp:
8480 case ARM::CDE_VCX2_fpdp:
8481 case ARM::CDE_VCX2A_vec:
8482 case ARM::CDE_VCX2A_fpsp:
8483 case ARM::CDE_VCX2A_fpdp:
8484 case ARM::CDE_VCX3_vec:
8485 case ARM::CDE_VCX3_fpsp:
8486 case ARM::CDE_VCX3_fpdp:
8487 case ARM::CDE_VCX3A_vec:
8488 case ARM::CDE_VCX3A_fpsp:
8489 case ARM::CDE_VCX3A_fpdp: {
8490 assert(Inst.getOperand(1).isImm() &&(static_cast <bool> (Inst.getOperand(1).isImm() &&
"CDE operand 1 must be a coprocessor ID") ? void (0) : __assert_fail
("Inst.getOperand(1).isImm() && \"CDE operand 1 must be a coprocessor ID\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8491, __extension__
__PRETTY_FUNCTION__))
8491 "CDE operand 1 must be a coprocessor ID")(static_cast <bool> (Inst.getOperand(1).isImm() &&
"CDE operand 1 must be a coprocessor ID") ? void (0) : __assert_fail
("Inst.getOperand(1).isImm() && \"CDE operand 1 must be a coprocessor ID\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8491, __extension__
__PRETTY_FUNCTION__))
;
8492 int64_t Coproc = Inst.getOperand(1).getImm();
8493 if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8494 return Error(Operands[1]->getStartLoc(),
8495 "coprocessor must be configured as CDE");
8496 else if (Coproc >= 8)
8497 return Error(Operands[1]->getStartLoc(),
8498 "coprocessor must be in the range [p0, p7]");
8499 break;
8500 }
8501
8502 case ARM::t2CDP:
8503 case ARM::t2CDP2:
8504 case ARM::t2LDC2L_OFFSET:
8505 case ARM::t2LDC2L_OPTION:
8506 case ARM::t2LDC2L_POST:
8507 case ARM::t2LDC2L_PRE:
8508 case ARM::t2LDC2_OFFSET:
8509 case ARM::t2LDC2_OPTION:
8510 case ARM::t2LDC2_POST:
8511 case ARM::t2LDC2_PRE:
8512 case ARM::t2LDCL_OFFSET:
8513 case ARM::t2LDCL_OPTION:
8514 case ARM::t2LDCL_POST:
8515 case ARM::t2LDCL_PRE:
8516 case ARM::t2LDC_OFFSET:
8517 case ARM::t2LDC_OPTION:
8518 case ARM::t2LDC_POST:
8519 case ARM::t2LDC_PRE:
8520 case ARM::t2MCR:
8521 case ARM::t2MCR2:
8522 case ARM::t2MCRR:
8523 case ARM::t2MCRR2:
8524 case ARM::t2MRC:
8525 case ARM::t2MRC2:
8526 case ARM::t2MRRC:
8527 case ARM::t2MRRC2:
8528 case ARM::t2STC2L_OFFSET:
8529 case ARM::t2STC2L_OPTION:
8530 case ARM::t2STC2L_POST:
8531 case ARM::t2STC2L_PRE:
8532 case ARM::t2STC2_OFFSET:
8533 case ARM::t2STC2_OPTION:
8534 case ARM::t2STC2_POST:
8535 case ARM::t2STC2_PRE:
8536 case ARM::t2STCL_OFFSET:
8537 case ARM::t2STCL_OPTION:
8538 case ARM::t2STCL_POST:
8539 case ARM::t2STCL_PRE:
8540 case ARM::t2STC_OFFSET:
8541 case ARM::t2STC_OPTION:
8542 case ARM::t2STC_POST:
8543 case ARM::t2STC_PRE: {
8544 unsigned Opcode = Inst.getOpcode();
8545 // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8546 // CopInd is the index of the coprocessor operand.
8547 size_t CopInd = 0;
8548 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8549 CopInd = 2;
8550 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8551 CopInd = 1;
8552 assert(Inst.getOperand(CopInd).isImm() &&(static_cast <bool> (Inst.getOperand(CopInd).isImm() &&
"Operand must be a coprocessor ID") ? void (0) : __assert_fail
("Inst.getOperand(CopInd).isImm() && \"Operand must be a coprocessor ID\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8553, __extension__
__PRETTY_FUNCTION__))
8553 "Operand must be a coprocessor ID")(static_cast <bool> (Inst.getOperand(CopInd).isImm() &&
"Operand must be a coprocessor ID") ? void (0) : __assert_fail
("Inst.getOperand(CopInd).isImm() && \"Operand must be a coprocessor ID\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 8553, __extension__
__PRETTY_FUNCTION__))
;
8554 int64_t Coproc = Inst.getOperand(CopInd).getImm();
8555 // Operands[2] is the coprocessor operand at syntactic level
8556 if (ARM::isCDECoproc(Coproc, *STI))
8557 return Error(Operands[2]->getStartLoc(),
8558 "coprocessor must be configured as GCP");
8559 break;
8560 }
8561 }
8562
8563 return false;
8564}
8565
8566static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8567 switch(Opc) {
8568 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8568)
;
8569 // VST1LN
8570 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8571 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8572 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8573 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8574 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8575 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8576 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
8577 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8578 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8579
8580 // VST2LN
8581 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8582 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8583 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8584 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8585 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8586
8587 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8588 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8589 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8590 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8591 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8592
8593 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
8594 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8595 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8596 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8597 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8598
8599 // VST3LN
8600 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8601 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8602 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8603 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8604 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8605 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8606 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8607 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8608 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8609 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8610 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
8611 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8612 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8613 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8614 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8615
8616 // VST3
8617 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8618 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8619 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8620 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8621 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8622 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8623 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8624 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8625 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8626 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8627 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8628 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8629 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
8630 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8631 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8632 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
8633 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8634 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8635
8636 // VST4LN
8637 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8638 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8639 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8640 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8641 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8642 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8643 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8644 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8645 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8646 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8647 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
8648 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8649 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8650 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8651 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8652
8653 // VST4
8654 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8655 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8656 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8657 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8658 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8659 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8660 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8661 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8662 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8663 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8664 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8665 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8666 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
8667 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8668 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8669 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
8670 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8671 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8672 }
8673}
8674
8675static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8676 switch(Opc) {
8677 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 8677)
;
8678 // VLD1LN
8679 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8680 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8681 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8682 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8683 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8684 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8685 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
8686 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8687 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8688
8689 // VLD2LN
8690 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8691 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8692 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8693 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8694 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8695 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8696 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8697 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8698 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8699 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8700 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
8701 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8702 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8703 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8704 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8705
8706 // VLD3DUP
8707 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8708 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8709 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8710 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8711 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8712 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8713 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8714 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8715 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8716 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8717 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8718 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8719 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
8720 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8721 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8722 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8723 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8724 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8725
8726 // VLD3LN
8727 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8728 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8729 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8730 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8731 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8732 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8733 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8734 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8735 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8736 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8737 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
8738 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8739 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8740 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8741 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8742
8743 // VLD3
8744 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8745 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8746 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8747 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8748 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8749 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8750 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8751 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8752 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8753 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8754 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8755 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8756 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
8757 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8758 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8759 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
8760 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8761 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8762
8763 // VLD4LN
8764 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8765 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8766 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8767 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8768 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8769 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8770 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8771 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8772 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8773 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8774 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
8775 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8776 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8777 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8778 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8779
8780 // VLD4DUP
8781 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8782 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8783 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8784 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8785 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8786 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8787 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8788 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8789 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8790 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8791 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8792 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8793 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
8794 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8795 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8796 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8797 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8798 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8799
8800 // VLD4
8801 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8802 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8803 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8804 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8805 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8806 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8807 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8808 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8809 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8810 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8811 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8812 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8813 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
8814 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8815 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8816 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
8817 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8818 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8819 }
8820}
8821
8822bool ARMAsmParser::processInstruction(MCInst &Inst,
8823 const OperandVector &Operands,
8824 MCStreamer &Out) {
8825 // Check if we have the wide qualifier, because if it's present we
8826 // must avoid selecting a 16-bit thumb instruction.
8827 bool HasWideQualifier = false;
8828 for (auto &Op : Operands) {
8829 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8830 if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8831 HasWideQualifier = true;
8832 break;
8833 }
8834 }
8835
8836 switch (Inst.getOpcode()) {
8837 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8838 case ARM::LDRT_POST:
8839 case ARM::LDRBT_POST: {
8840 const unsigned Opcode =
8841 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8842 : ARM::LDRBT_POST_IMM;
8843 MCInst TmpInst;
8844 TmpInst.setOpcode(Opcode);
8845 TmpInst.addOperand(Inst.getOperand(0));
8846 TmpInst.addOperand(Inst.getOperand(1));
8847 TmpInst.addOperand(Inst.getOperand(1));
8848 TmpInst.addOperand(MCOperand::createReg(0));
8849 TmpInst.addOperand(MCOperand::createImm(0));
8850 TmpInst.addOperand(Inst.getOperand(2));
8851 TmpInst.addOperand(Inst.getOperand(3));
8852 Inst = TmpInst;
8853 return true;
8854 }
8855 // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8856 case ARM::LDRSBTii:
8857 case ARM::LDRHTii:
8858 case ARM::LDRSHTii: {
8859 MCInst TmpInst;
8860
8861 if (Inst.getOpcode() == ARM::LDRSBTii)
8862 TmpInst.setOpcode(ARM::LDRSBTi);
8863 else if (Inst.getOpcode() == ARM::LDRHTii)
8864 TmpInst.setOpcode(ARM::LDRHTi);
8865 else if (Inst.getOpcode() == ARM::LDRSHTii)
8866 TmpInst.setOpcode(ARM::LDRSHTi);
8867 TmpInst.addOperand(Inst.getOperand(0));
8868 TmpInst.addOperand(Inst.getOperand(1));
8869 TmpInst.addOperand(Inst.getOperand(1));
8870 TmpInst.addOperand(MCOperand::createImm(256));
8871 TmpInst.addOperand(Inst.getOperand(2));
8872 Inst = TmpInst;
8873 return true;
8874 }
8875 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8876 case ARM::STRT_POST:
8877 case ARM::STRBT_POST: {
8878 const unsigned Opcode =
8879 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8880 : ARM::STRBT_POST_IMM;
8881 MCInst TmpInst;
8882 TmpInst.setOpcode(Opcode);
8883 TmpInst.addOperand(Inst.getOperand(1));
8884 TmpInst.addOperand(Inst.getOperand(0));
8885 TmpInst.addOperand(Inst.getOperand(1));
8886 TmpInst.addOperand(MCOperand::createReg(0));
8887 TmpInst.addOperand(MCOperand::createImm(0));
8888 TmpInst.addOperand(Inst.getOperand(2));
8889 TmpInst.addOperand(Inst.getOperand(3));
8890 Inst = TmpInst;
8891 return true;
8892 }
8893 // Alias for alternate form of 'ADR Rd, #imm' instruction.
8894 case ARM::ADDri: {
8895 if (Inst.getOperand(1).getReg() != ARM::PC ||
8896 Inst.getOperand(5).getReg() != 0 ||
8897 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8898 return false;
8899 MCInst TmpInst;
8900 TmpInst.setOpcode(ARM::ADR);
8901 TmpInst.addOperand(Inst.getOperand(0));
8902 if (Inst.getOperand(2).isImm()) {
8903 // Immediate (mod_imm) will be in its encoded form, we must unencode it
8904 // before passing it to the ADR instruction.
8905 unsigned Enc = Inst.getOperand(2).getImm();
8906 TmpInst.addOperand(MCOperand::createImm(
8907 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8908 } else {
8909 // Turn PC-relative expression into absolute expression.
8910 // Reading PC provides the start of the current instruction + 8 and
8911 // the transform to adr is biased by that.
8912 MCSymbol *Dot = getContext().createTempSymbol();
8913 Out.emitLabel(Dot);
8914 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8915 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8916 MCSymbolRefExpr::VK_None,
8917 getContext());
8918 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8919 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8920 getContext());
8921 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8922 getContext());
8923 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8924 }
8925 TmpInst.addOperand(Inst.getOperand(3));
8926 TmpInst.addOperand(Inst.getOperand(4));
8927 Inst = TmpInst;
8928 return true;
8929 }
8930 // Aliases for imm syntax of LDR instructions.
8931 case ARM::t2LDR_PRE_imm:
8932 case ARM::t2LDR_POST_imm: {
8933 MCInst TmpInst;
8934 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
8935 : ARM::t2LDR_POST);
8936 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8937 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8938 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8939 TmpInst.addOperand(Inst.getOperand(2)); // imm
8940 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8941 Inst = TmpInst;
8942 return true;
8943 }
8944 // Aliases for imm syntax of STR instructions.
8945 case ARM::t2STR_PRE_imm:
8946 case ARM::t2STR_POST_imm: {
8947 MCInst TmpInst;
8948 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
8949 : ARM::t2STR_POST);
8950 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8951 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8952 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8953 TmpInst.addOperand(Inst.getOperand(2)); // imm
8954 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8955 Inst = TmpInst;
8956 return true;
8957 }
8958 // Aliases for imm syntax of LDRB instructions.
8959 case ARM::t2LDRB_OFFSET_imm: {
8960 MCInst TmpInst;
8961 TmpInst.setOpcode(ARM::t2LDRBi8);
8962 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8963 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8964 TmpInst.addOperand(Inst.getOperand(2)); // imm
8965 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8966 Inst = TmpInst;
8967 return true;
8968 }
8969 case ARM::t2LDRB_PRE_imm:
8970 case ARM::t2LDRB_POST_imm: {
8971 MCInst TmpInst;
8972 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
8973 ? ARM::t2LDRB_PRE
8974 : ARM::t2LDRB_POST);
8975 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8976 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8977 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8978 TmpInst.addOperand(Inst.getOperand(2)); // imm
8979 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8980 Inst = TmpInst;
8981 return true;
8982 }
8983 // Aliases for imm syntax of STRB instructions.
8984 case ARM::t2STRB_OFFSET_imm: {
8985 MCInst TmpInst;
8986 TmpInst.setOpcode(ARM::t2STRBi8);
8987 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8988 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8989 TmpInst.addOperand(Inst.getOperand(2)); // imm
8990 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8991 Inst = TmpInst;
8992 return true;
8993 }
8994 case ARM::t2STRB_PRE_imm:
8995 case ARM::t2STRB_POST_imm: {
8996 MCInst TmpInst;
8997 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
8998 ? ARM::t2STRB_PRE
8999 : ARM::t2STRB_POST);
9000 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9001 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9002 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9003 TmpInst.addOperand(Inst.getOperand(2)); // imm
9004 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9005 Inst = TmpInst;
9006 return true;
9007 }
9008 // Aliases for imm syntax of LDRH instructions.
9009 case ARM::t2LDRH_OFFSET_imm: {
9010 MCInst TmpInst;
9011 TmpInst.setOpcode(ARM::t2LDRHi8);
9012 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9013 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9014 TmpInst.addOperand(Inst.getOperand(2)); // imm
9015 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9016 Inst = TmpInst;
9017 return true;
9018 }
9019 case ARM::t2LDRH_PRE_imm:
9020 case ARM::t2LDRH_POST_imm: {
9021 MCInst TmpInst;
9022 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
9023 ? ARM::t2LDRH_PRE
9024 : ARM::t2LDRH_POST);
9025 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9026 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9027 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9028 TmpInst.addOperand(Inst.getOperand(2)); // imm
9029 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9030 Inst = TmpInst;
9031 return true;
9032 }
9033 // Aliases for imm syntax of STRH instructions.
9034 case ARM::t2STRH_OFFSET_imm: {
9035 MCInst TmpInst;
9036 TmpInst.setOpcode(ARM::t2STRHi8);
9037 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9038 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9039 TmpInst.addOperand(Inst.getOperand(2)); // imm
9040 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9041 Inst = TmpInst;
9042 return true;
9043 }
9044 case ARM::t2STRH_PRE_imm:
9045 case ARM::t2STRH_POST_imm: {
9046 MCInst TmpInst;
9047 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
9048 ? ARM::t2STRH_PRE
9049 : ARM::t2STRH_POST);
9050 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9051 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9052 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9053 TmpInst.addOperand(Inst.getOperand(2)); // imm
9054 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9055 Inst = TmpInst;
9056 return true;
9057 }
9058 // Aliases for imm syntax of LDRSB instructions.
9059 case ARM::t2LDRSB_OFFSET_imm: {
9060 MCInst TmpInst;
9061 TmpInst.setOpcode(ARM::t2LDRSBi8);
9062 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9063 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9064 TmpInst.addOperand(Inst.getOperand(2)); // imm
9065 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9066 Inst = TmpInst;
9067 return true;
9068 }
9069 case ARM::t2LDRSB_PRE_imm:
9070 case ARM::t2LDRSB_POST_imm: {
9071 MCInst TmpInst;
9072 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
9073 ? ARM::t2LDRSB_PRE
9074 : ARM::t2LDRSB_POST);
9075 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9076 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9077 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9078 TmpInst.addOperand(Inst.getOperand(2)); // imm
9079 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9080 Inst = TmpInst;
9081 return true;
9082 }
9083 // Aliases for imm syntax of LDRSH instructions.
9084 case ARM::t2LDRSH_OFFSET_imm: {
9085 MCInst TmpInst;
9086 TmpInst.setOpcode(ARM::t2LDRSHi8);
9087 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9088 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9089 TmpInst.addOperand(Inst.getOperand(2)); // imm
9090 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9091 Inst = TmpInst;
9092 return true;
9093 }
9094 case ARM::t2LDRSH_PRE_imm:
9095 case ARM::t2LDRSH_POST_imm: {
9096 MCInst TmpInst;
9097 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
9098 ? ARM::t2LDRSH_PRE
9099 : ARM::t2LDRSH_POST);
9100 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9101 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9102 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9103 TmpInst.addOperand(Inst.getOperand(2)); // imm
9104 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9105 Inst = TmpInst;
9106 return true;
9107 }
9108 // Aliases for alternate PC+imm syntax of LDR instructions.
9109 case ARM::t2LDRpcrel:
9110 // Select the narrow version if the immediate will fit.
9111 if (Inst.getOperand(1).getImm() > 0 &&
9112 Inst.getOperand(1).getImm() <= 0xff &&
9113 !HasWideQualifier)
9114 Inst.setOpcode(ARM::tLDRpci);
9115 else
9116 Inst.setOpcode(ARM::t2LDRpci);
9117 return true;
9118 case ARM::t2LDRBpcrel:
9119 Inst.setOpcode(ARM::t2LDRBpci);
9120 return true;
9121 case ARM::t2LDRHpcrel:
9122 Inst.setOpcode(ARM::t2LDRHpci);
9123 return true;
9124 case ARM::t2LDRSBpcrel:
9125 Inst.setOpcode(ARM::t2LDRSBpci);
9126 return true;
9127 case ARM::t2LDRSHpcrel:
9128 Inst.setOpcode(ARM::t2LDRSHpci);
9129 return true;
9130 case ARM::LDRConstPool:
9131 case ARM::tLDRConstPool:
9132 case ARM::t2LDRConstPool: {
9133 // Pseudo instruction ldr rt, =immediate is converted to a
9134 // MOV rt, immediate if immediate is known and representable
9135 // otherwise we create a constant pool entry that we load from.
9136 MCInst TmpInst;
9137 if (Inst.getOpcode() == ARM::LDRConstPool)
9138 TmpInst.setOpcode(ARM::LDRi12);
9139 else if (Inst.getOpcode() == ARM::tLDRConstPool)
9140 TmpInst.setOpcode(ARM::tLDRpci);
9141 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9142 TmpInst.setOpcode(ARM::t2LDRpci);
9143 const ARMOperand &PoolOperand =
9144 (HasWideQualifier ?
9145 static_cast<ARMOperand &>(*Operands[4]) :
9146 static_cast<ARMOperand &>(*Operands[3]));
9147 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9148 // If SubExprVal is a constant we may be able to use a MOV
9149 if (isa<MCConstantExpr>(SubExprVal) &&
9150 Inst.getOperand(0).getReg() != ARM::PC &&
9151 Inst.getOperand(0).getReg() != ARM::SP) {
9152 int64_t Value =
9153 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9154 bool UseMov = true;
9155 bool MovHasS = true;
9156 if (Inst.getOpcode() == ARM::LDRConstPool) {
9157 // ARM Constant
9158 if (ARM_AM::getSOImmVal(Value) != -1) {
9159 Value = ARM_AM::getSOImmVal(Value);
9160 TmpInst.setOpcode(ARM::MOVi);
9161 }
9162 else if (ARM_AM::getSOImmVal(~Value) != -1) {
9163 Value = ARM_AM::getSOImmVal(~Value);
9164 TmpInst.setOpcode(ARM::MVNi);
9165 }
9166 else if (hasV6T2Ops() &&
9167 Value >=0 && Value < 65536) {
9168 TmpInst.setOpcode(ARM::MOVi16);
9169 MovHasS = false;
9170 }
9171 else
9172 UseMov = false;
9173 }
9174 else {
9175 // Thumb/Thumb2 Constant
9176 if (hasThumb2() &&
9177 ARM_AM::getT2SOImmVal(Value) != -1)
9178 TmpInst.setOpcode(ARM::t2MOVi);
9179 else if (hasThumb2() &&
9180 ARM_AM::getT2SOImmVal(~Value) != -1) {
9181 TmpInst.setOpcode(ARM::t2MVNi);
9182 Value = ~Value;
9183 }
9184 else if (hasV8MBaseline() &&
9185 Value >=0 && Value < 65536) {
9186 TmpInst.setOpcode(ARM::t2MOVi16);
9187 MovHasS = false;
9188 }
9189 else
9190 UseMov = false;
9191 }
9192 if (UseMov) {
9193 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9194 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
9195 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9196 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9197 if (MovHasS)
9198 TmpInst.addOperand(MCOperand::createReg(0)); // S
9199 Inst = TmpInst;
9200 return true;
9201 }
9202 }
9203 // No opportunity to use MOV/MVN create constant pool
9204 const MCExpr *CPLoc =
9205 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9206 PoolOperand.getStartLoc());
9207 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9208 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9209 if (TmpInst.getOpcode() == ARM::LDRi12)
9210 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
9211 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9212 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9213 Inst = TmpInst;
9214 return true;
9215 }
9216 // Handle NEON VST complex aliases.
9217 case ARM::VST1LNdWB_register_Asm_8:
9218 case ARM::VST1LNdWB_register_Asm_16:
9219 case ARM::VST1LNdWB_register_Asm_32: {
9220 MCInst TmpInst;
9221 // Shuffle the operands around so the lane index operand is in the
9222 // right place.
9223 unsigned Spacing;
9224 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9225 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9226 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9227 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9228 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9229 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9230 TmpInst.addOperand(Inst.getOperand(1)); // lane
9231 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9232 TmpInst.addOperand(Inst.getOperand(6));
9233 Inst = TmpInst;
9234 return true;
9235 }
9236
9237 case ARM::VST2LNdWB_register_Asm_8:
9238 case ARM::VST2LNdWB_register_Asm_16:
9239 case ARM::VST2LNdWB_register_Asm_32:
9240 case ARM::VST2LNqWB_register_Asm_16:
9241 case ARM::VST2LNqWB_register_Asm_32: {
9242 MCInst TmpInst;
9243 // Shuffle the operands around so the lane index operand is in the
9244 // right place.
9245 unsigned Spacing;
9246 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9247 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9248 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9249 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9250 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9251 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9252 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9253 Spacing));
9254 TmpInst.addOperand(Inst.getOperand(1)); // lane
9255 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9256 TmpInst.addOperand(Inst.getOperand(6));
9257 Inst = TmpInst;
9258 return true;
9259 }
9260
9261 case ARM::VST3LNdWB_register_Asm_8:
9262 case ARM::VST3LNdWB_register_Asm_16:
9263 case ARM::VST3LNdWB_register_Asm_32:
9264 case ARM::VST3LNqWB_register_Asm_16:
9265 case ARM::VST3LNqWB_register_Asm_32: {
9266 MCInst TmpInst;
9267 // Shuffle the operands around so the lane index operand is in the
9268 // right place.
9269 unsigned Spacing;
9270 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9271 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9272 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9273 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9274 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9275 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9276 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9277 Spacing));
9278 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9279 Spacing * 2));
9280 TmpInst.addOperand(Inst.getOperand(1)); // lane
9281 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9282 TmpInst.addOperand(Inst.getOperand(6));
9283 Inst = TmpInst;
9284 return true;
9285 }
9286
9287 case ARM::VST4LNdWB_register_Asm_8:
9288 case ARM::VST4LNdWB_register_Asm_16:
9289 case ARM::VST4LNdWB_register_Asm_32:
9290 case ARM::VST4LNqWB_register_Asm_16:
9291 case ARM::VST4LNqWB_register_Asm_32: {
9292 MCInst TmpInst;
9293 // Shuffle the operands around so the lane index operand is in the
9294 // right place.
9295 unsigned Spacing;
9296 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9297 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9298 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9299 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9300 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9301 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9302 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9303 Spacing));
9304 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9305 Spacing * 2));
9306 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9307 Spacing * 3));
9308 TmpInst.addOperand(Inst.getOperand(1)); // lane
9309 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9310 TmpInst.addOperand(Inst.getOperand(6));
9311 Inst = TmpInst;
9312 return true;
9313 }
9314
9315 case ARM::VST1LNdWB_fixed_Asm_8:
9316 case ARM::VST1LNdWB_fixed_Asm_16:
9317 case ARM::VST1LNdWB_fixed_Asm_32: {
9318 MCInst TmpInst;
9319 // Shuffle the operands around so the lane index operand is in the
9320 // right place.
9321 unsigned Spacing;
9322 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9323 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9324 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9325 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9326 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9327 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9328 TmpInst.addOperand(Inst.getOperand(1)); // lane
9329 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9330 TmpInst.addOperand(Inst.getOperand(5));
9331 Inst = TmpInst;
9332 return true;
9333 }
9334
9335 case ARM::VST2LNdWB_fixed_Asm_8:
9336 case ARM::VST2LNdWB_fixed_Asm_16:
9337 case ARM::VST2LNdWB_fixed_Asm_32:
9338 case ARM::VST2LNqWB_fixed_Asm_16:
9339 case ARM::VST2LNqWB_fixed_Asm_32: {
9340 MCInst TmpInst;
9341 // Shuffle the operands around so the lane index operand is in the
9342 // right place.
9343 unsigned Spacing;
9344 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9345 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9346 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9347 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9348 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9349 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9350 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9351 Spacing));
9352 TmpInst.addOperand(Inst.getOperand(1)); // lane
9353 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9354 TmpInst.addOperand(Inst.getOperand(5));
9355 Inst = TmpInst;
9356 return true;
9357 }
9358
9359 case ARM::VST3LNdWB_fixed_Asm_8:
9360 case ARM::VST3LNdWB_fixed_Asm_16:
9361 case ARM::VST3LNdWB_fixed_Asm_32:
9362 case ARM::VST3LNqWB_fixed_Asm_16:
9363 case ARM::VST3LNqWB_fixed_Asm_32: {
9364 MCInst TmpInst;
9365 // Shuffle the operands around so the lane index operand is in the
9366 // right place.
9367 unsigned Spacing;
9368 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9369 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9370 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9371 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9372 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9373 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9374 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9375 Spacing));
9376 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9377 Spacing * 2));
9378 TmpInst.addOperand(Inst.getOperand(1)); // lane
9379 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9380 TmpInst.addOperand(Inst.getOperand(5));
9381 Inst = TmpInst;
9382 return true;
9383 }
9384
9385 case ARM::VST4LNdWB_fixed_Asm_8:
9386 case ARM::VST4LNdWB_fixed_Asm_16:
9387 case ARM::VST4LNdWB_fixed_Asm_32:
9388 case ARM::VST4LNqWB_fixed_Asm_16:
9389 case ARM::VST4LNqWB_fixed_Asm_32: {
9390 MCInst TmpInst;
9391 // Shuffle the operands around so the lane index operand is in the
9392 // right place.
9393 unsigned Spacing;
9394 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9395 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9396 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9397 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9398 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9399 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9400 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9401 Spacing));
9402 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9403 Spacing * 2));
9404 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9405 Spacing * 3));
9406 TmpInst.addOperand(Inst.getOperand(1)); // lane
9407 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9408 TmpInst.addOperand(Inst.getOperand(5));
9409 Inst = TmpInst;
9410 return true;
9411 }
9412
9413 case ARM::VST1LNdAsm_8:
9414 case ARM::VST1LNdAsm_16:
9415 case ARM::VST1LNdAsm_32: {
9416 MCInst TmpInst;
9417 // Shuffle the operands around so the lane index operand is in the
9418 // right place.
9419 unsigned Spacing;
9420 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9421 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9422 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9423 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9424 TmpInst.addOperand(Inst.getOperand(1)); // lane
9425 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9426 TmpInst.addOperand(Inst.getOperand(5));
9427 Inst = TmpInst;
9428 return true;
9429 }
9430
9431 case ARM::VST2LNdAsm_8:
9432 case ARM::VST2LNdAsm_16:
9433 case ARM::VST2LNdAsm_32:
9434 case ARM::VST2LNqAsm_16:
9435 case ARM::VST2LNqAsm_32: {
9436 MCInst TmpInst;
9437 // Shuffle the operands around so the lane index operand is in the
9438 // right place.
9439 unsigned Spacing;
9440 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9441 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9442 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9443 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9444 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9445 Spacing));
9446 TmpInst.addOperand(Inst.getOperand(1)); // lane
9447 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9448 TmpInst.addOperand(Inst.getOperand(5));
9449 Inst = TmpInst;
9450 return true;
9451 }
9452
9453 case ARM::VST3LNdAsm_8:
9454 case ARM::VST3LNdAsm_16:
9455 case ARM::VST3LNdAsm_32:
9456 case ARM::VST3LNqAsm_16:
9457 case ARM::VST3LNqAsm_32: {
9458 MCInst TmpInst;
9459 // Shuffle the operands around so the lane index operand is in the
9460 // right place.
9461 unsigned Spacing;
9462 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9463 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9464 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9465 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9466 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9467 Spacing));
9468 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9469 Spacing * 2));
9470 TmpInst.addOperand(Inst.getOperand(1)); // lane
9471 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9472 TmpInst.addOperand(Inst.getOperand(5));
9473 Inst = TmpInst;
9474 return true;
9475 }
9476
9477 case ARM::VST4LNdAsm_8:
9478 case ARM::VST4LNdAsm_16:
9479 case ARM::VST4LNdAsm_32:
9480 case ARM::VST4LNqAsm_16:
9481 case ARM::VST4LNqAsm_32: {
9482 MCInst TmpInst;
9483 // Shuffle the operands around so the lane index operand is in the
9484 // right place.
9485 unsigned Spacing;
9486 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9487 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9488 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9489 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9490 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9491 Spacing));
9492 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9493 Spacing * 2));
9494 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9495 Spacing * 3));
9496 TmpInst.addOperand(Inst.getOperand(1)); // lane
9497 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9498 TmpInst.addOperand(Inst.getOperand(5));
9499 Inst = TmpInst;
9500 return true;
9501 }
9502
9503 // Handle NEON VLD complex aliases.
9504 case ARM::VLD1LNdWB_register_Asm_8:
9505 case ARM::VLD1LNdWB_register_Asm_16:
9506 case ARM::VLD1LNdWB_register_Asm_32: {
9507 MCInst TmpInst;
9508 // Shuffle the operands around so the lane index operand is in the
9509 // right place.
9510 unsigned Spacing;
9511 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9512 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9513 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9514 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9515 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9516 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9517 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9518 TmpInst.addOperand(Inst.getOperand(1)); // lane
9519 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9520 TmpInst.addOperand(Inst.getOperand(6));
9521 Inst = TmpInst;
9522 return true;
9523 }
9524
9525 case ARM::VLD2LNdWB_register_Asm_8:
9526 case ARM::VLD2LNdWB_register_Asm_16:
9527 case ARM::VLD2LNdWB_register_Asm_32:
9528 case ARM::VLD2LNqWB_register_Asm_16:
9529 case ARM::VLD2LNqWB_register_Asm_32: {
9530 MCInst TmpInst;
9531 // Shuffle the operands around so the lane index operand is in the
9532 // right place.
9533 unsigned Spacing;
9534 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9535 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9536 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9537 Spacing));
9538 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9539 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9540 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9541 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9542 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9543 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9544 Spacing));
9545 TmpInst.addOperand(Inst.getOperand(1)); // lane
9546 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9547 TmpInst.addOperand(Inst.getOperand(6));
9548 Inst = TmpInst;
9549 return true;
9550 }
9551
9552 case ARM::VLD3LNdWB_register_Asm_8:
9553 case ARM::VLD3LNdWB_register_Asm_16:
9554 case ARM::VLD3LNdWB_register_Asm_32:
9555 case ARM::VLD3LNqWB_register_Asm_16:
9556 case ARM::VLD3LNqWB_register_Asm_32: {
9557 MCInst TmpInst;
9558 // Shuffle the operands around so the lane index operand is in the
9559 // right place.
9560 unsigned Spacing;
9561 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9562 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9563 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9564 Spacing));
9565 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9566 Spacing * 2));
9567 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9568 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9569 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9570 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9571 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9572 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9573 Spacing));
9574 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9575 Spacing * 2));
9576 TmpInst.addOperand(Inst.getOperand(1)); // lane
9577 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9578 TmpInst.addOperand(Inst.getOperand(6));
9579 Inst = TmpInst;
9580 return true;
9581 }
9582
9583 case ARM::VLD4LNdWB_register_Asm_8:
9584 case ARM::VLD4LNdWB_register_Asm_16:
9585 case ARM::VLD4LNdWB_register_Asm_32:
9586 case ARM::VLD4LNqWB_register_Asm_16:
9587 case ARM::VLD4LNqWB_register_Asm_32: {
9588 MCInst TmpInst;
9589 // Shuffle the operands around so the lane index operand is in the
9590 // right place.
9591 unsigned Spacing;
9592 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9593 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9594 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9595 Spacing));
9596 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9597 Spacing * 2));
9598 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9599 Spacing * 3));
9600 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9601 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9602 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9603 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9604 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9605 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9606 Spacing));
9607 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9608 Spacing * 2));
9609 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9610 Spacing * 3));
9611 TmpInst.addOperand(Inst.getOperand(1)); // lane
9612 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9613 TmpInst.addOperand(Inst.getOperand(6));
9614 Inst = TmpInst;
9615 return true;
9616 }
9617
9618 case ARM::VLD1LNdWB_fixed_Asm_8:
9619 case ARM::VLD1LNdWB_fixed_Asm_16:
9620 case ARM::VLD1LNdWB_fixed_Asm_32: {
9621 MCInst TmpInst;
9622 // Shuffle the operands around so the lane index operand is in the
9623 // right place.
9624 unsigned Spacing;
9625 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9626 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9627 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9628 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9629 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9630 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9631 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9632 TmpInst.addOperand(Inst.getOperand(1)); // lane
9633 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9634 TmpInst.addOperand(Inst.getOperand(5));
9635 Inst = TmpInst;
9636 return true;
9637 }
9638
9639 case ARM::VLD2LNdWB_fixed_Asm_8:
9640 case ARM::VLD2LNdWB_fixed_Asm_16:
9641 case ARM::VLD2LNdWB_fixed_Asm_32:
9642 case ARM::VLD2LNqWB_fixed_Asm_16:
9643 case ARM::VLD2LNqWB_fixed_Asm_32: {
9644 MCInst TmpInst;
9645 // Shuffle the operands around so the lane index operand is in the
9646 // right place.
9647 unsigned Spacing;
9648 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9649 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9650 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9651 Spacing));
9652 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9653 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9654 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9655 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9656 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9657 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9658 Spacing));
9659 TmpInst.addOperand(Inst.getOperand(1)); // lane
9660 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9661 TmpInst.addOperand(Inst.getOperand(5));
9662 Inst = TmpInst;
9663 return true;
9664 }
9665
9666 case ARM::VLD3LNdWB_fixed_Asm_8:
9667 case ARM::VLD3LNdWB_fixed_Asm_16:
9668 case ARM::VLD3LNdWB_fixed_Asm_32:
9669 case ARM::VLD3LNqWB_fixed_Asm_16:
9670 case ARM::VLD3LNqWB_fixed_Asm_32: {
9671 MCInst TmpInst;
9672 // Shuffle the operands around so the lane index operand is in the
9673 // right place.
9674 unsigned Spacing;
9675 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9676 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9677 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9678 Spacing));
9679 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9680 Spacing * 2));
9681 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9682 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9683 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9684 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9685 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9686 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9687 Spacing));
9688 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9689 Spacing * 2));
9690 TmpInst.addOperand(Inst.getOperand(1)); // lane
9691 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9692 TmpInst.addOperand(Inst.getOperand(5));
9693 Inst = TmpInst;
9694 return true;
9695 }
9696
9697 case ARM::VLD4LNdWB_fixed_Asm_8:
9698 case ARM::VLD4LNdWB_fixed_Asm_16:
9699 case ARM::VLD4LNdWB_fixed_Asm_32:
9700 case ARM::VLD4LNqWB_fixed_Asm_16:
9701 case ARM::VLD4LNqWB_fixed_Asm_32: {
9702 MCInst TmpInst;
9703 // Shuffle the operands around so the lane index operand is in the
9704 // right place.
9705 unsigned Spacing;
9706 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9707 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9708 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9709 Spacing));
9710 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9711 Spacing * 2));
9712 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9713 Spacing * 3));
9714 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9715 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9716 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9717 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9718 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9719 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9720 Spacing));
9721 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9722 Spacing * 2));
9723 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9724 Spacing * 3));
9725 TmpInst.addOperand(Inst.getOperand(1)); // lane
9726 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9727 TmpInst.addOperand(Inst.getOperand(5));
9728 Inst = TmpInst;
9729 return true;
9730 }
9731
9732 case ARM::VLD1LNdAsm_8:
9733 case ARM::VLD1LNdAsm_16:
9734 case ARM::VLD1LNdAsm_32: {
9735 MCInst TmpInst;
9736 // Shuffle the operands around so the lane index operand is in the
9737 // right place.
9738 unsigned Spacing;
9739 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9740 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9741 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9742 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9743 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9744 TmpInst.addOperand(Inst.getOperand(1)); // lane
9745 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9746 TmpInst.addOperand(Inst.getOperand(5));
9747 Inst = TmpInst;
9748 return true;
9749 }
9750
9751 case ARM::VLD2LNdAsm_8:
9752 case ARM::VLD2LNdAsm_16:
9753 case ARM::VLD2LNdAsm_32:
9754 case ARM::VLD2LNqAsm_16:
9755 case ARM::VLD2LNqAsm_32: {
9756 MCInst TmpInst;
9757 // Shuffle the operands around so the lane index operand is in the
9758 // right place.
9759 unsigned Spacing;
9760 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9761 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9762 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9763 Spacing));
9764 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9765 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9766 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9767 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9768 Spacing));
9769 TmpInst.addOperand(Inst.getOperand(1)); // lane
9770 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9771 TmpInst.addOperand(Inst.getOperand(5));
9772 Inst = TmpInst;
9773 return true;
9774 }
9775
9776 case ARM::VLD3LNdAsm_8:
9777 case ARM::VLD3LNdAsm_16:
9778 case ARM::VLD3LNdAsm_32:
9779 case ARM::VLD3LNqAsm_16:
9780 case ARM::VLD3LNqAsm_32: {
9781 MCInst TmpInst;
9782 // Shuffle the operands around so the lane index operand is in the
9783 // right place.
9784 unsigned Spacing;
9785 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9786 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9787 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9788 Spacing));
9789 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9790 Spacing * 2));
9791 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9792 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9793 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9794 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9795 Spacing));
9796 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9797 Spacing * 2));
9798 TmpInst.addOperand(Inst.getOperand(1)); // lane
9799 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9800 TmpInst.addOperand(Inst.getOperand(5));
9801 Inst = TmpInst;
9802 return true;
9803 }
9804
9805 case ARM::VLD4LNdAsm_8:
9806 case ARM::VLD4LNdAsm_16:
9807 case ARM::VLD4LNdAsm_32:
9808 case ARM::VLD4LNqAsm_16:
9809 case ARM::VLD4LNqAsm_32: {
9810 MCInst TmpInst;
9811 // Shuffle the operands around so the lane index operand is in the
9812 // right place.
9813 unsigned Spacing;
9814 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9815 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9816 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9817 Spacing));
9818 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9819 Spacing * 2));
9820 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9821 Spacing * 3));
9822 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9823 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9824 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9825 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9826 Spacing));
9827 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9828 Spacing * 2));
9829 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9830 Spacing * 3));
9831 TmpInst.addOperand(Inst.getOperand(1)); // lane
9832 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9833 TmpInst.addOperand(Inst.getOperand(5));
9834 Inst = TmpInst;
9835 return true;
9836 }
9837
9838 // VLD3DUP single 3-element structure to all lanes instructions.
9839 case ARM::VLD3DUPdAsm_8:
9840 case ARM::VLD3DUPdAsm_16:
9841 case ARM::VLD3DUPdAsm_32:
9842 case ARM::VLD3DUPqAsm_8:
9843 case ARM::VLD3DUPqAsm_16:
9844 case ARM::VLD3DUPqAsm_32: {
9845 MCInst TmpInst;
9846 unsigned Spacing;
9847 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9848 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9849 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9850 Spacing));
9851 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9852 Spacing * 2));
9853 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9854 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9855 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9856 TmpInst.addOperand(Inst.getOperand(4));
9857 Inst = TmpInst;
9858 return true;
9859 }
9860
9861 case ARM::VLD3DUPdWB_fixed_Asm_8:
9862 case ARM::VLD3DUPdWB_fixed_Asm_16:
9863 case ARM::VLD3DUPdWB_fixed_Asm_32:
9864 case ARM::VLD3DUPqWB_fixed_Asm_8:
9865 case ARM::VLD3DUPqWB_fixed_Asm_16:
9866 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9867 MCInst TmpInst;
9868 unsigned Spacing;
9869 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9870 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9871 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9872 Spacing));
9873 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9874 Spacing * 2));
9875 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9876 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9877 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9878 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9879 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9880 TmpInst.addOperand(Inst.getOperand(4));
9881 Inst = TmpInst;
9882 return true;
9883 }
9884
9885 case ARM::VLD3DUPdWB_register_Asm_8:
9886 case ARM::VLD3DUPdWB_register_Asm_16:
9887 case ARM::VLD3DUPdWB_register_Asm_32:
9888 case ARM::VLD3DUPqWB_register_Asm_8:
9889 case ARM::VLD3DUPqWB_register_Asm_16:
9890 case ARM::VLD3DUPqWB_register_Asm_32: {
9891 MCInst TmpInst;
9892 unsigned Spacing;
9893 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9894 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9895 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9896 Spacing));
9897 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9898 Spacing * 2));
9899 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9900 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9901 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9902 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9903 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9904 TmpInst.addOperand(Inst.getOperand(5));
9905 Inst = TmpInst;
9906 return true;
9907 }
9908
9909 // VLD3 multiple 3-element structure instructions.
9910 case ARM::VLD3dAsm_8:
9911 case ARM::VLD3dAsm_16:
9912 case ARM::VLD3dAsm_32:
9913 case ARM::VLD3qAsm_8:
9914 case ARM::VLD3qAsm_16:
9915 case ARM::VLD3qAsm_32: {
9916 MCInst TmpInst;
9917 unsigned Spacing;
9918 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9919 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9920 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9921 Spacing));
9922 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9923 Spacing * 2));
9924 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9925 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9926 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9927 TmpInst.addOperand(Inst.getOperand(4));
9928 Inst = TmpInst;
9929 return true;
9930 }
9931
9932 case ARM::VLD3dWB_fixed_Asm_8:
9933 case ARM::VLD3dWB_fixed_Asm_16:
9934 case ARM::VLD3dWB_fixed_Asm_32:
9935 case ARM::VLD3qWB_fixed_Asm_8:
9936 case ARM::VLD3qWB_fixed_Asm_16:
9937 case ARM::VLD3qWB_fixed_Asm_32: {
9938 MCInst TmpInst;
9939 unsigned Spacing;
9940 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9941 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9942 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9943 Spacing));
9944 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9945 Spacing * 2));
9946 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9947 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9948 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9949 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9950 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9951 TmpInst.addOperand(Inst.getOperand(4));
9952 Inst = TmpInst;
9953 return true;
9954 }
9955
9956 case ARM::VLD3dWB_register_Asm_8:
9957 case ARM::VLD3dWB_register_Asm_16:
9958 case ARM::VLD3dWB_register_Asm_32:
9959 case ARM::VLD3qWB_register_Asm_8:
9960 case ARM::VLD3qWB_register_Asm_16:
9961 case ARM::VLD3qWB_register_Asm_32: {
9962 MCInst TmpInst;
9963 unsigned Spacing;
9964 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9965 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9966 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9967 Spacing));
9968 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9969 Spacing * 2));
9970 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9971 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9972 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9973 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9974 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9975 TmpInst.addOperand(Inst.getOperand(5));
9976 Inst = TmpInst;
9977 return true;
9978 }
9979
9980 // VLD4DUP single 3-element structure to all lanes instructions.
9981 case ARM::VLD4DUPdAsm_8:
9982 case ARM::VLD4DUPdAsm_16:
9983 case ARM::VLD4DUPdAsm_32:
9984 case ARM::VLD4DUPqAsm_8:
9985 case ARM::VLD4DUPqAsm_16:
9986 case ARM::VLD4DUPqAsm_32: {
9987 MCInst TmpInst;
9988 unsigned Spacing;
9989 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9990 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9991 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9992 Spacing));
9993 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9994 Spacing * 2));
9995 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9996 Spacing * 3));
9997 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9998 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9999 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10000 TmpInst.addOperand(Inst.getOperand(4));
10001 Inst = TmpInst;
10002 return true;
10003 }
10004
10005 case ARM::VLD4DUPdWB_fixed_Asm_8:
10006 case ARM::VLD4DUPdWB_fixed_Asm_16:
10007 case ARM::VLD4DUPdWB_fixed_Asm_32:
10008 case ARM::VLD4DUPqWB_fixed_Asm_8:
10009 case ARM::VLD4DUPqWB_fixed_Asm_16:
10010 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10011 MCInst TmpInst;
10012 unsigned Spacing;
10013 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10014 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10015 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10016 Spacing));
10017 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10018 Spacing * 2));
10019 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10020 Spacing * 3));
10021 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10022 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10023 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10024 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10025 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10026 TmpInst.addOperand(Inst.getOperand(4));
10027 Inst = TmpInst;
10028 return true;
10029 }
10030
10031 case ARM::VLD4DUPdWB_register_Asm_8:
10032 case ARM::VLD4DUPdWB_register_Asm_16:
10033 case ARM::VLD4DUPdWB_register_Asm_32:
10034 case ARM::VLD4DUPqWB_register_Asm_8:
10035 case ARM::VLD4DUPqWB_register_Asm_16:
10036 case ARM::VLD4DUPqWB_register_Asm_32: {
10037 MCInst TmpInst;
10038 unsigned Spacing;
10039 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10040 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10041 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10042 Spacing));
10043 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10044 Spacing * 2));
10045 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10046 Spacing * 3));
10047 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10048 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10049 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10050 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10051 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10052 TmpInst.addOperand(Inst.getOperand(5));
10053 Inst = TmpInst;
10054 return true;
10055 }
10056
10057 // VLD4 multiple 4-element structure instructions.
10058 case ARM::VLD4dAsm_8:
10059 case ARM::VLD4dAsm_16:
10060 case ARM::VLD4dAsm_32:
10061 case ARM::VLD4qAsm_8:
10062 case ARM::VLD4qAsm_16:
10063 case ARM::VLD4qAsm_32: {
10064 MCInst TmpInst;
10065 unsigned Spacing;
10066 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10067 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10068 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10069 Spacing));
10070 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10071 Spacing * 2));
10072 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10073 Spacing * 3));
10074 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10075 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10076 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10077 TmpInst.addOperand(Inst.getOperand(4));
10078 Inst = TmpInst;
10079 return true;
10080 }
10081
10082 case ARM::VLD4dWB_fixed_Asm_8:
10083 case ARM::VLD4dWB_fixed_Asm_16:
10084 case ARM::VLD4dWB_fixed_Asm_32:
10085 case ARM::VLD4qWB_fixed_Asm_8:
10086 case ARM::VLD4qWB_fixed_Asm_16:
10087 case ARM::VLD4qWB_fixed_Asm_32: {
10088 MCInst TmpInst;
10089 unsigned Spacing;
10090 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10091 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10092 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10093 Spacing));
10094 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10095 Spacing * 2));
10096 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10097 Spacing * 3));
10098 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10099 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10100 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10101 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10102 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10103 TmpInst.addOperand(Inst.getOperand(4));
10104 Inst = TmpInst;
10105 return true;
10106 }
10107
10108 case ARM::VLD4dWB_register_Asm_8:
10109 case ARM::VLD4dWB_register_Asm_16:
10110 case ARM::VLD4dWB_register_Asm_32:
10111 case ARM::VLD4qWB_register_Asm_8:
10112 case ARM::VLD4qWB_register_Asm_16:
10113 case ARM::VLD4qWB_register_Asm_32: {
10114 MCInst TmpInst;
10115 unsigned Spacing;
10116 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10117 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10118 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10119 Spacing));
10120 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10121 Spacing * 2));
10122 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10123 Spacing * 3));
10124 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10125 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10126 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10127 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10128 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10129 TmpInst.addOperand(Inst.getOperand(5));
10130 Inst = TmpInst;
10131 return true;
10132 }
10133
10134 // VST3 multiple 3-element structure instructions.
10135 case ARM::VST3dAsm_8:
10136 case ARM::VST3dAsm_16:
10137 case ARM::VST3dAsm_32:
10138 case ARM::VST3qAsm_8:
10139 case ARM::VST3qAsm_16:
10140 case ARM::VST3qAsm_32: {
10141 MCInst TmpInst;
10142 unsigned Spacing;
10143 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10144 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10145 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10146 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10147 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10148 Spacing));
10149 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10150 Spacing * 2));
10151 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10152 TmpInst.addOperand(Inst.getOperand(4));
10153 Inst = TmpInst;
10154 return true;
10155 }
10156
10157 case ARM::VST3dWB_fixed_Asm_8:
10158 case ARM::VST3dWB_fixed_Asm_16:
10159 case ARM::VST3dWB_fixed_Asm_32:
10160 case ARM::VST3qWB_fixed_Asm_8:
10161 case ARM::VST3qWB_fixed_Asm_16:
10162 case ARM::VST3qWB_fixed_Asm_32: {
10163 MCInst TmpInst;
10164 unsigned Spacing;
10165 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10166 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10167 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10168 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10169 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10170 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10171 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10172 Spacing));
10173 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10174 Spacing * 2));
10175 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10176 TmpInst.addOperand(Inst.getOperand(4));
10177 Inst = TmpInst;
10178 return true;
10179 }
10180
10181 case ARM::VST3dWB_register_Asm_8:
10182 case ARM::VST3dWB_register_Asm_16:
10183 case ARM::VST3dWB_register_Asm_32:
10184 case ARM::VST3qWB_register_Asm_8:
10185 case ARM::VST3qWB_register_Asm_16:
10186 case ARM::VST3qWB_register_Asm_32: {
10187 MCInst TmpInst;
10188 unsigned Spacing;
10189 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10190 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10191 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10192 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10193 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10194 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10195 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10196 Spacing));
10197 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10198 Spacing * 2));
10199 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10200 TmpInst.addOperand(Inst.getOperand(5));
10201 Inst = TmpInst;
10202 return true;
10203 }
10204
10205 // VST4 multiple 3-element structure instructions.
10206 case ARM::VST4dAsm_8:
10207 case ARM::VST4dAsm_16:
10208 case ARM::VST4dAsm_32:
10209 case ARM::VST4qAsm_8:
10210 case ARM::VST4qAsm_16:
10211 case ARM::VST4qAsm_32: {
10212 MCInst TmpInst;
10213 unsigned Spacing;
10214 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10215 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10216 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10217 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10218 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10219 Spacing));
10220 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10221 Spacing * 2));
10222 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10223 Spacing * 3));
10224 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10225 TmpInst.addOperand(Inst.getOperand(4));
10226 Inst = TmpInst;
10227 return true;
10228 }
10229
10230 case ARM::VST4dWB_fixed_Asm_8:
10231 case ARM::VST4dWB_fixed_Asm_16:
10232 case ARM::VST4dWB_fixed_Asm_32:
10233 case ARM::VST4qWB_fixed_Asm_8:
10234 case ARM::VST4qWB_fixed_Asm_16:
10235 case ARM::VST4qWB_fixed_Asm_32: {
10236 MCInst TmpInst;
10237 unsigned Spacing;
10238 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10239 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10240 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10241 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10242 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10243 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10244 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10245 Spacing));
10246 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10247 Spacing * 2));
10248 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10249 Spacing * 3));
10250 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10251 TmpInst.addOperand(Inst.getOperand(4));
10252 Inst = TmpInst;
10253 return true;
10254 }
10255
10256 case ARM::VST4dWB_register_Asm_8:
10257 case ARM::VST4dWB_register_Asm_16:
10258 case ARM::VST4dWB_register_Asm_32:
10259 case ARM::VST4qWB_register_Asm_8:
10260 case ARM::VST4qWB_register_Asm_16:
10261 case ARM::VST4qWB_register_Asm_32: {
10262 MCInst TmpInst;
10263 unsigned Spacing;
10264 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10265 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10266 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10267 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10268 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10269 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10270 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10271 Spacing));
10272 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10273 Spacing * 2));
10274 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10275 Spacing * 3));
10276 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10277 TmpInst.addOperand(Inst.getOperand(5));
10278 Inst = TmpInst;
10279 return true;
10280 }
10281
10282 // Handle encoding choice for the shift-immediate instructions.
10283 case ARM::t2LSLri:
10284 case ARM::t2LSRri:
10285 case ARM::t2ASRri:
10286 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10287 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10288 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10289 !HasWideQualifier) {
10290 unsigned NewOpc;
10291 switch (Inst.getOpcode()) {
10292 default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10292)
;
10293 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10294 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10295 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10296 }
10297 // The Thumb1 operands aren't in the same order. Awesome, eh?
10298 MCInst TmpInst;
10299 TmpInst.setOpcode(NewOpc);
10300 TmpInst.addOperand(Inst.getOperand(0));
10301 TmpInst.addOperand(Inst.getOperand(5));
10302 TmpInst.addOperand(Inst.getOperand(1));
10303 TmpInst.addOperand(Inst.getOperand(2));
10304 TmpInst.addOperand(Inst.getOperand(3));
10305 TmpInst.addOperand(Inst.getOperand(4));
10306 Inst = TmpInst;
10307 return true;
10308 }
10309 return false;
10310
10311 // Handle the Thumb2 mode MOV complex aliases.
10312 case ARM::t2MOVsr:
10313 case ARM::t2MOVSsr: {
10314 // Which instruction to expand to depends on the CCOut operand and
10315 // whether we're in an IT block if the register operands are low
10316 // registers.
10317 bool isNarrow = false;
10318 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10319 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10320 isARMLowRegister(Inst.getOperand(2).getReg()) &&
10321 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10322 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10323 !HasWideQualifier)
10324 isNarrow = true;
10325 MCInst TmpInst;
10326 unsigned newOpc;
10327 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10328 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10328)
;
10329 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10330 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10331 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10332 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
10333 }
10334 TmpInst.setOpcode(newOpc);
10335 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10336 if (isNarrow)
10337 TmpInst.addOperand(MCOperand::createReg(
10338 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10339 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10340 TmpInst.addOperand(Inst.getOperand(2)); // Rm
10341 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10342 TmpInst.addOperand(Inst.getOperand(5));
10343 if (!isNarrow)
10344 TmpInst.addOperand(MCOperand::createReg(
10345 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10346 Inst = TmpInst;
10347 return true;
10348 }
10349 case ARM::t2MOVsi:
10350 case ARM::t2MOVSsi: {
10351 // Which instruction to expand to depends on the CCOut operand and
10352 // whether we're in an IT block if the register operands are low
10353 // registers.
10354 bool isNarrow = false;
10355 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10356 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10357 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10358 !HasWideQualifier)
10359 isNarrow = true;
10360 MCInst TmpInst;
10361 unsigned newOpc;
10362 unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10363 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10364 bool isMov = false;
10365 // MOV rd, rm, LSL #0 is actually a MOV instruction
10366 if (Shift == ARM_AM::lsl && Amount == 0) {
10367 isMov = true;
10368 // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10369 // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10370 // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10371 // instead.
10372 if (inITBlock()) {
10373 isNarrow = false;
10374 }
10375 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10376 } else {
10377 switch(Shift) {
10378 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10378)
;
10379 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10380 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10381 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10382 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10383 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10384 }
10385 }
10386 if (Amount == 32) Amount = 0;
10387 TmpInst.setOpcode(newOpc);
10388 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10389 if (isNarrow && !isMov)
10390 TmpInst.addOperand(MCOperand::createReg(
10391 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10392 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10393 if (newOpc != ARM::t2RRX && !isMov)
10394 TmpInst.addOperand(MCOperand::createImm(Amount));
10395 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10396 TmpInst.addOperand(Inst.getOperand(4));
10397 if (!isNarrow)
10398 TmpInst.addOperand(MCOperand::createReg(
10399 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10400 Inst = TmpInst;
10401 return true;
10402 }
10403 // Handle the ARM mode MOV complex aliases.
10404 case ARM::ASRr:
10405 case ARM::LSRr:
10406 case ARM::LSLr:
10407 case ARM::RORr: {
10408 ARM_AM::ShiftOpc ShiftTy;
10409 switch(Inst.getOpcode()) {
10410 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10410)
;
10411 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10412 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10413 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10414 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10415 }
10416 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10417 MCInst TmpInst;
10418 TmpInst.setOpcode(ARM::MOVsr);
10419 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10420 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10421 TmpInst.addOperand(Inst.getOperand(2)); // Rm
10422 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10423 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10424 TmpInst.addOperand(Inst.getOperand(4));
10425 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10426 Inst = TmpInst;
10427 return true;
10428 }
10429 case ARM::ASRi:
10430 case ARM::LSRi:
10431 case ARM::LSLi:
10432 case ARM::RORi: {
10433 ARM_AM::ShiftOpc ShiftTy;
10434 switch(Inst.getOpcode()) {
10435 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10435)
;
10436 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10437 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10438 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10439 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10440 }
10441 // A shift by zero is a plain MOVr, not a MOVsi.
10442 unsigned Amt = Inst.getOperand(2).getImm();
10443 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10444 // A shift by 32 should be encoded as 0 when permitted
10445 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10446 Amt = 0;
10447 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10448 MCInst TmpInst;
10449 TmpInst.setOpcode(Opc);
10450 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10451 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10452 if (Opc == ARM::MOVsi)
10453 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10454 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10455 TmpInst.addOperand(Inst.getOperand(4));
10456 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10457 Inst = TmpInst;
10458 return true;
10459 }
10460 case ARM::RRXi: {
10461 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10462 MCInst TmpInst;
10463 TmpInst.setOpcode(ARM::MOVsi);
10464 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10465 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10466 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10467 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10468 TmpInst.addOperand(Inst.getOperand(3));
10469 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10470 Inst = TmpInst;
10471 return true;
10472 }
10473 case ARM::t2LDMIA_UPD: {
10474 // If this is a load of a single register, then we should use
10475 // a post-indexed LDR instruction instead, per the ARM ARM.
10476 if (Inst.getNumOperands() != 5)
10477 return false;
10478 MCInst TmpInst;
10479 TmpInst.setOpcode(ARM::t2LDR_POST);
10480 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10481 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10482 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10483 TmpInst.addOperand(MCOperand::createImm(4));
10484 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10485 TmpInst.addOperand(Inst.getOperand(3));
10486 Inst = TmpInst;
10487 return true;
10488 }
10489 case ARM::t2STMDB_UPD: {
10490 // If this is a store of a single register, then we should use
10491 // a pre-indexed STR instruction instead, per the ARM ARM.
10492 if (Inst.getNumOperands() != 5)
10493 return false;
10494 MCInst TmpInst;
10495 TmpInst.setOpcode(ARM::t2STR_PRE);
10496 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10497 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10498 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10499 TmpInst.addOperand(MCOperand::createImm(-4));
10500 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10501 TmpInst.addOperand(Inst.getOperand(3));
10502 Inst = TmpInst;
10503 return true;
10504 }
10505 case ARM::LDMIA_UPD:
10506 // If this is a load of a single register via a 'pop', then we should use
10507 // a post-indexed LDR instruction instead, per the ARM ARM.
10508 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10509 Inst.getNumOperands() == 5) {
10510 MCInst TmpInst;
10511 TmpInst.setOpcode(ARM::LDR_POST_IMM);
10512 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10513 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10514 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10515 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
10516 TmpInst.addOperand(MCOperand::createImm(4));
10517 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10518 TmpInst.addOperand(Inst.getOperand(3));
10519 Inst = TmpInst;
10520 return true;
10521 }
10522 break;
10523 case ARM::STMDB_UPD:
10524 // If this is a store of a single register via a 'push', then we should use
10525 // a pre-indexed STR instruction instead, per the ARM ARM.
10526 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10527 Inst.getNumOperands() == 5) {
10528 MCInst TmpInst;
10529 TmpInst.setOpcode(ARM::STR_PRE_IMM);
10530 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10531 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10532 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10533 TmpInst.addOperand(MCOperand::createImm(-4));
10534 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10535 TmpInst.addOperand(Inst.getOperand(3));
10536 Inst = TmpInst;
10537 }
10538 break;
10539 case ARM::t2ADDri12:
10540 case ARM::t2SUBri12:
10541 case ARM::t2ADDspImm12:
10542 case ARM::t2SUBspImm12: {
10543 // If the immediate fits for encoding T3 and the generic
10544 // mnemonic was used, encoding T3 is preferred.
10545 const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10546 if ((Token != "add" && Token != "sub") ||
10547 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10548 break;
10549 switch (Inst.getOpcode()) {
10550 case ARM::t2ADDri12:
10551 Inst.setOpcode(ARM::t2ADDri);
10552 break;
10553 case ARM::t2SUBri12:
10554 Inst.setOpcode(ARM::t2SUBri);
10555 break;
10556 case ARM::t2ADDspImm12:
10557 Inst.setOpcode(ARM::t2ADDspImm);
10558 break;
10559 case ARM::t2SUBspImm12:
10560 Inst.setOpcode(ARM::t2SUBspImm);
10561 break;
10562 }
10563
10564 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10565 return true;
10566 }
10567 case ARM::tADDi8:
10568 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10569 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10570 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10571 // to encoding T1 if <Rd> is omitted."
10572 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10573 Inst.setOpcode(ARM::tADDi3);
10574 return true;
10575 }
10576 break;
10577 case ARM::tSUBi8:
10578 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10579 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10580 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10581 // to encoding T1 if <Rd> is omitted."
10582 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10583 Inst.setOpcode(ARM::tSUBi3);
10584 return true;
10585 }
10586 break;
10587 case ARM::t2ADDri:
10588 case ARM::t2SUBri: {
10589 // If the destination and first source operand are the same, and
10590 // the flags are compatible with the current IT status, use encoding T2
10591 // instead of T3. For compatibility with the system 'as'. Make sure the
10592 // wide encoding wasn't explicit.
10593 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10594 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10595 (Inst.getOperand(2).isImm() &&
10596 (unsigned)Inst.getOperand(2).getImm() > 255) ||
10597 Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10598 HasWideQualifier)
10599 break;
10600 MCInst TmpInst;
10601 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10602 ARM::tADDi8 : ARM::tSUBi8);
10603 TmpInst.addOperand(Inst.getOperand(0));
10604 TmpInst.addOperand(Inst.getOperand(5));
10605 TmpInst.addOperand(Inst.getOperand(0));
10606 TmpInst.addOperand(Inst.getOperand(2));
10607 TmpInst.addOperand(Inst.getOperand(3));
10608 TmpInst.addOperand(Inst.getOperand(4));
10609 Inst = TmpInst;
10610 return true;
10611 }
10612 case ARM::t2ADDspImm:
10613 case ARM::t2SUBspImm: {
10614 // Prefer T1 encoding if possible
10615 if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10616 break;
10617 unsigned V = Inst.getOperand(2).getImm();
10618 if (V & 3 || V > ((1 << 7) - 1) << 2)
10619 break;
10620 MCInst TmpInst;
10621 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10622 : ARM::tSUBspi);
10623 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10624 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10625 TmpInst.addOperand(MCOperand::createImm(V / 4)); // immediate
10626 TmpInst.addOperand(Inst.getOperand(3)); // pred
10627 TmpInst.addOperand(Inst.getOperand(4));
10628 Inst = TmpInst;
10629 return true;
10630 }
10631 case ARM::t2ADDrr: {
10632 // If the destination and first source operand are the same, and
10633 // there's no setting of the flags, use encoding T2 instead of T3.
10634 // Note that this is only for ADD, not SUB. This mirrors the system
10635 // 'as' behaviour. Also take advantage of ADD being commutative.
10636 // Make sure the wide encoding wasn't explicit.
10637 bool Swap = false;
10638 auto DestReg = Inst.getOperand(0).getReg();
10639 bool Transform = DestReg == Inst.getOperand(1).getReg();
10640 if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10641 Transform = true;
10642 Swap = true;
10643 }
10644 if (!Transform ||
10645 Inst.getOperand(5).getReg() != 0 ||
10646 HasWideQualifier)
10647 break;
10648 MCInst TmpInst;
10649 TmpInst.setOpcode(ARM::tADDhirr);
10650 TmpInst.addOperand(Inst.getOperand(0));
10651 TmpInst.addOperand(Inst.getOperand(0));
10652 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10653 TmpInst.addOperand(Inst.getOperand(3));
10654 TmpInst.addOperand(Inst.getOperand(4));
10655 Inst = TmpInst;
10656 return true;
10657 }
10658 case ARM::tADDrSP:
10659 // If the non-SP source operand and the destination operand are not the
10660 // same, we need to use the 32-bit encoding if it's available.
10661 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10662 Inst.setOpcode(ARM::t2ADDrr);
10663 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10664 return true;
10665 }
10666 break;
10667 case ARM::tB:
10668 // A Thumb conditional branch outside of an IT block is a tBcc.
10669 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10670 Inst.setOpcode(ARM::tBcc);
10671 return true;
10672 }
10673 break;
10674 case ARM::t2B:
10675 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10676 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10677 Inst.setOpcode(ARM::t2Bcc);
10678 return true;
10679 }
10680 break;
10681 case ARM::t2Bcc:
10682 // If the conditional is AL or we're in an IT block, we really want t2B.
10683 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10684 Inst.setOpcode(ARM::t2B);
10685 return true;
10686 }
10687 break;
10688 case ARM::tBcc:
10689 // If the conditional is AL, we really want tB.
10690 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10691 Inst.setOpcode(ARM::tB);
10692 return true;
10693 }
10694 break;
10695 case ARM::tLDMIA: {
10696 // If the register list contains any high registers, or if the writeback
10697 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10698 // instead if we're in Thumb2. Otherwise, this should have generated
10699 // an error in validateInstruction().
10700 unsigned Rn = Inst.getOperand(0).getReg();
10701 bool hasWritebackToken =
10702 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
10703 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
10704 bool listContainsBase;
10705 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10706 (!listContainsBase && !hasWritebackToken) ||
10707 (listContainsBase && hasWritebackToken)) {
10708 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10709 assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail
("isThumbTwo()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10709, __extension__ __PRETTY_FUNCTION__))
;
10710 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10711 // If we're switching to the updating version, we need to insert
10712 // the writeback tied operand.
10713 if (hasWritebackToken)
10714 Inst.insert(Inst.begin(),
10715 MCOperand::createReg(Inst.getOperand(0).getReg()));
10716 return true;
10717 }
10718 break;
10719 }
10720 case ARM::tSTMIA_UPD: {
10721 // If the register list contains any high registers, we need to use
10722 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10723 // should have generated an error in validateInstruction().
10724 unsigned Rn = Inst.getOperand(0).getReg();
10725 bool listContainsBase;
10726 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10727 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10728 assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail
("isThumbTwo()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10728, __extension__ __PRETTY_FUNCTION__))
;
10729 Inst.setOpcode(ARM::t2STMIA_UPD);
10730 return true;
10731 }
10732 break;
10733 }
10734 case ARM::tPOP: {
10735 bool listContainsBase;
10736 // If the register list contains any high registers, we need to use
10737 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10738 // should have generated an error in validateInstruction().
10739 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10740 return false;
10741 assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail
("isThumbTwo()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10741, __extension__ __PRETTY_FUNCTION__))
;
10742 Inst.setOpcode(ARM::t2LDMIA_UPD);
10743 // Add the base register and writeback operands.
10744 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10745 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10746 return true;
10747 }
10748 case ARM::tPUSH: {
10749 bool listContainsBase;
10750 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10751 return false;
10752 assert(isThumbTwo())(static_cast <bool> (isThumbTwo()) ? void (0) : __assert_fail
("isThumbTwo()", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10752, __extension__ __PRETTY_FUNCTION__))
;
10753 Inst.setOpcode(ARM::t2STMDB_UPD);
10754 // Add the base register and writeback operands.
10755 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10756 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10757 return true;
10758 }
10759 case ARM::t2MOVi:
10760 // If we can use the 16-bit encoding and the user didn't explicitly
10761 // request the 32-bit variant, transform it here.
10762 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10763 (Inst.getOperand(1).isImm() &&
10764 (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10765 Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10766 !HasWideQualifier) {
10767 // The operands aren't in the same order for tMOVi8...
10768 MCInst TmpInst;
10769 TmpInst.setOpcode(ARM::tMOVi8);
10770 TmpInst.addOperand(Inst.getOperand(0));
10771 TmpInst.addOperand(Inst.getOperand(4));
10772 TmpInst.addOperand(Inst.getOperand(1));
10773 TmpInst.addOperand(Inst.getOperand(2));
10774 TmpInst.addOperand(Inst.getOperand(3));
10775 Inst = TmpInst;
10776 return true;
10777 }
10778 break;
10779
10780 case ARM::t2MOVr:
10781 // If we can use the 16-bit encoding and the user didn't explicitly
10782 // request the 32-bit variant, transform it here.
10783 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10784 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10785 Inst.getOperand(2).getImm() == ARMCC::AL &&
10786 Inst.getOperand(4).getReg() == ARM::CPSR &&
10787 !HasWideQualifier) {
10788 // The operands aren't the same for tMOV[S]r... (no cc_out)
10789 MCInst TmpInst;
10790 unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10791 TmpInst.setOpcode(Op);
10792 TmpInst.addOperand(Inst.getOperand(0));
10793 TmpInst.addOperand(Inst.getOperand(1));
10794 if (Op == ARM::tMOVr) {
10795 TmpInst.addOperand(Inst.getOperand(2));
10796 TmpInst.addOperand(Inst.getOperand(3));
10797 }
10798 Inst = TmpInst;
10799 return true;
10800 }
10801 break;
10802
10803 case ARM::t2SXTH:
10804 case ARM::t2SXTB:
10805 case ARM::t2UXTH:
10806 case ARM::t2UXTB:
10807 // If we can use the 16-bit encoding and the user didn't explicitly
10808 // request the 32-bit variant, transform it here.
10809 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10810 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10811 Inst.getOperand(2).getImm() == 0 &&
10812 !HasWideQualifier) {
10813 unsigned NewOpc;
10814 switch (Inst.getOpcode()) {
10815 default: llvm_unreachable("Illegal opcode!")::llvm::llvm_unreachable_internal("Illegal opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10815)
;
10816 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10817 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10818 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10819 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10820 }
10821 // The operands aren't the same for thumb1 (no rotate operand).
10822 MCInst TmpInst;
10823 TmpInst.setOpcode(NewOpc);
10824 TmpInst.addOperand(Inst.getOperand(0));
10825 TmpInst.addOperand(Inst.getOperand(1));
10826 TmpInst.addOperand(Inst.getOperand(3));
10827 TmpInst.addOperand(Inst.getOperand(4));
10828 Inst = TmpInst;
10829 return true;
10830 }
10831 break;
10832
10833 case ARM::MOVsi: {
10834 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10835 // rrx shifts and asr/lsr of #32 is encoded as 0
10836 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10837 return false;
10838 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10839 // Shifting by zero is accepted as a vanilla 'MOVr'
10840 MCInst TmpInst;
10841 TmpInst.setOpcode(ARM::MOVr);
10842 TmpInst.addOperand(Inst.getOperand(0));
10843 TmpInst.addOperand(Inst.getOperand(1));
10844 TmpInst.addOperand(Inst.getOperand(3));
10845 TmpInst.addOperand(Inst.getOperand(4));
10846 TmpInst.addOperand(Inst.getOperand(5));
10847 Inst = TmpInst;
10848 return true;
10849 }
10850 return false;
10851 }
10852 case ARM::ANDrsi:
10853 case ARM::ORRrsi:
10854 case ARM::EORrsi:
10855 case ARM::BICrsi:
10856 case ARM::SUBrsi:
10857 case ARM::ADDrsi: {
10858 unsigned newOpc;
10859 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
10860 if (SOpc == ARM_AM::rrx) return false;
10861 switch (Inst.getOpcode()) {
10862 default: llvm_unreachable("unexpected opcode!")::llvm::llvm_unreachable_internal("unexpected opcode!", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10862)
;
10863 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10864 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10865 case ARM::EORrsi: newOpc = ARM::EORrr; break;
10866 case ARM::BICrsi: newOpc = ARM::BICrr; break;
10867 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10868 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10869 }
10870 // If the shift is by zero, use the non-shifted instruction definition.
10871 // The exception is for right shifts, where 0 == 32
10872 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10873 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10874 MCInst TmpInst;
10875 TmpInst.setOpcode(newOpc);
10876 TmpInst.addOperand(Inst.getOperand(0));
10877 TmpInst.addOperand(Inst.getOperand(1));
10878 TmpInst.addOperand(Inst.getOperand(2));
10879 TmpInst.addOperand(Inst.getOperand(4));
10880 TmpInst.addOperand(Inst.getOperand(5));
10881 TmpInst.addOperand(Inst.getOperand(6));
10882 Inst = TmpInst;
10883 return true;
10884 }
10885 return false;
10886 }
10887 case ARM::ITasm:
10888 case ARM::t2IT: {
10889 // Set up the IT block state according to the IT instruction we just
10890 // matched.
10891 assert(!inITBlock() && "nested IT blocks?!")(static_cast <bool> (!inITBlock() && "nested IT blocks?!"
) ? void (0) : __assert_fail ("!inITBlock() && \"nested IT blocks?!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 10891, __extension__
__PRETTY_FUNCTION__))
;
10892 startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10893 Inst.getOperand(1).getImm());
10894 break;
10895 }
10896 case ARM::t2LSLrr:
10897 case ARM::t2LSRrr:
10898 case ARM::t2ASRrr:
10899 case ARM::t2SBCrr:
10900 case ARM::t2RORrr:
10901 case ARM::t2BICrr:
10902 // Assemblers should use the narrow encodings of these instructions when permissible.
10903 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10904 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10905 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10906 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10907 !HasWideQualifier) {
10908 unsigned NewOpc;
10909 switch (Inst.getOpcode()) {
10910 default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10910)
;
10911 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10912 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10913 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10914 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10915 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10916 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10917 }
10918 MCInst TmpInst;
10919 TmpInst.setOpcode(NewOpc);
10920 TmpInst.addOperand(Inst.getOperand(0));
10921 TmpInst.addOperand(Inst.getOperand(5));
10922 TmpInst.addOperand(Inst.getOperand(1));
10923 TmpInst.addOperand(Inst.getOperand(2));
10924 TmpInst.addOperand(Inst.getOperand(3));
10925 TmpInst.addOperand(Inst.getOperand(4));
10926 Inst = TmpInst;
10927 return true;
10928 }
10929 return false;
10930
10931 case ARM::t2ANDrr:
10932 case ARM::t2EORrr:
10933 case ARM::t2ADCrr:
10934 case ARM::t2ORRrr:
10935 // Assemblers should use the narrow encodings of these instructions when permissible.
10936 // These instructions are special in that they are commutable, so shorter encodings
10937 // are available more often.
10938 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10939 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10940 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10941 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10942 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10943 !HasWideQualifier) {
10944 unsigned NewOpc;
10945 switch (Inst.getOpcode()) {
10946 default: llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 10946)
;
10947 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10948 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10949 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10950 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10951 }
10952 MCInst TmpInst;
10953 TmpInst.setOpcode(NewOpc);
10954 TmpInst.addOperand(Inst.getOperand(0));
10955 TmpInst.addOperand(Inst.getOperand(5));
10956 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
10957 TmpInst.addOperand(Inst.getOperand(1));
10958 TmpInst.addOperand(Inst.getOperand(2));
10959 } else {
10960 TmpInst.addOperand(Inst.getOperand(2));
10961 TmpInst.addOperand(Inst.getOperand(1));
10962 }
10963 TmpInst.addOperand(Inst.getOperand(3));
10964 TmpInst.addOperand(Inst.getOperand(4));
10965 Inst = TmpInst;
10966 return true;
10967 }
10968 return false;
10969 case ARM::MVE_VPST:
10970 case ARM::MVE_VPTv16i8:
10971 case ARM::MVE_VPTv8i16:
10972 case ARM::MVE_VPTv4i32:
10973 case ARM::MVE_VPTv16u8:
10974 case ARM::MVE_VPTv8u16:
10975 case ARM::MVE_VPTv4u32:
10976 case ARM::MVE_VPTv16s8:
10977 case ARM::MVE_VPTv8s16:
10978 case ARM::MVE_VPTv4s32:
10979 case ARM::MVE_VPTv4f32:
10980 case ARM::MVE_VPTv8f16:
10981 case ARM::MVE_VPTv16i8r:
10982 case ARM::MVE_VPTv8i16r:
10983 case ARM::MVE_VPTv4i32r:
10984 case ARM::MVE_VPTv16u8r:
10985 case ARM::MVE_VPTv8u16r:
10986 case ARM::MVE_VPTv4u32r:
10987 case ARM::MVE_VPTv16s8r:
10988 case ARM::MVE_VPTv8s16r:
10989 case ARM::MVE_VPTv4s32r:
10990 case ARM::MVE_VPTv4f32r:
10991 case ARM::MVE_VPTv8f16r: {
10992 assert(!inVPTBlock() && "Nested VPT blocks are not allowed")(static_cast <bool> (!inVPTBlock() && "Nested VPT blocks are not allowed"
) ? void (0) : __assert_fail ("!inVPTBlock() && \"Nested VPT blocks are not allowed\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 10992, __extension__
__PRETTY_FUNCTION__))
;
10993 MCOperand &MO = Inst.getOperand(0);
10994 VPTState.Mask = MO.getImm();
10995 VPTState.CurPosition = 0;
10996 break;
10997 }
10998 }
10999 return false;
11000}
11001
11002unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
11003 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
11004 // suffix depending on whether they're in an IT block or not.
11005 unsigned Opc = Inst.getOpcode();
11006 const MCInstrDesc &MCID = MII.get(Opc);
11007 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
11008 assert(MCID.hasOptionalDef() &&(static_cast <bool> (MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand"
) ? void (0) : __assert_fail ("MCID.hasOptionalDef() && \"optionally flag setting instruction missing optional def operand\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 11009, __extension__
__PRETTY_FUNCTION__))
11009 "optionally flag setting instruction missing optional def operand")(static_cast <bool> (MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand"
) ? void (0) : __assert_fail ("MCID.hasOptionalDef() && \"optionally flag setting instruction missing optional def operand\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 11009, __extension__
__PRETTY_FUNCTION__))
;
11010 assert(MCID.NumOperands == Inst.getNumOperands() &&(static_cast <bool> (MCID.NumOperands == Inst.getNumOperands
() && "operand count mismatch!") ? void (0) : __assert_fail
("MCID.NumOperands == Inst.getNumOperands() && \"operand count mismatch!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 11011, __extension__
__PRETTY_FUNCTION__))
11011 "operand count mismatch!")(static_cast <bool> (MCID.NumOperands == Inst.getNumOperands
() && "operand count mismatch!") ? void (0) : __assert_fail
("MCID.NumOperands == Inst.getNumOperands() && \"operand count mismatch!\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 11011, __extension__
__PRETTY_FUNCTION__))
;
11012 // Find the optional-def operand (cc_out).
11013 unsigned OpNo;
11014 for (OpNo = 0;
11015 OpNo < MCID.NumOperands && !MCID.operands()[OpNo].isOptionalDef();
11016 ++OpNo)
11017 ;
11018 // If we're parsing Thumb1, reject it completely.
11019 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
11020 return Match_RequiresFlagSetting;
11021 // If we're parsing Thumb2, which form is legal depends on whether we're
11022 // in an IT block.
11023 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
11024 !inITBlock())
11025 return Match_RequiresITBlock;
11026 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
11027 inITBlock())
11028 return Match_RequiresNotITBlock;
11029 // LSL with zero immediate is not allowed in an IT block
11030 if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
11031 return Match_RequiresNotITBlock;
11032 } else if (isThumbOne()) {
11033 // Some high-register supporting Thumb1 encodings only allow both registers
11034 // to be from r0-r7 when in Thumb2.
11035 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11036 isARMLowRegister(Inst.getOperand(1).getReg()) &&
11037 isARMLowRegister(Inst.getOperand(2).getReg()))
11038 return Match_RequiresThumb2;
11039 // Others only require ARMv6 or later.
11040 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11041 isARMLowRegister(Inst.getOperand(0).getReg()) &&
11042 isARMLowRegister(Inst.getOperand(1).getReg()))
11043 return Match_RequiresV6;
11044 }
11045
11046 // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
11047 // than the loop below can handle, so it uses the GPRnopc register class and
11048 // we do SP handling here.
11049 if (Opc == ARM::t2MOVr && !hasV8Ops())
11050 {
11051 // SP as both source and destination is not allowed
11052 if (Inst.getOperand(0).getReg() == ARM::SP &&
11053 Inst.getOperand(1).getReg() == ARM::SP)
11054 return Match_RequiresV8;
11055 // When flags-setting SP as either source or destination is not allowed
11056 if (Inst.getOperand(4).getReg() == ARM::CPSR &&
11057 (Inst.getOperand(0).getReg() == ARM::SP ||
11058 Inst.getOperand(1).getReg() == ARM::SP))
11059 return Match_RequiresV8;
11060 }
11061
11062 switch (Inst.getOpcode()) {
11063 case ARM::VMRS:
11064 case ARM::VMSR:
11065 case ARM::VMRS_FPCXTS:
11066 case ARM::VMRS_FPCXTNS:
11067 case ARM::VMSR_FPCXTS:
11068 case ARM::VMSR_FPCXTNS:
11069 case ARM::VMRS_FPSCR_NZCVQC:
11070 case ARM::VMSR_FPSCR_NZCVQC:
11071 case ARM::FMSTAT:
11072 case ARM::VMRS_VPR:
11073 case ARM::VMRS_P0:
11074 case ARM::VMSR_VPR:
11075 case ARM::VMSR_P0:
11076 // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
11077 // ARMv8-A.
11078 if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
11079 (isThumb() && !hasV8Ops()))
11080 return Match_InvalidOperand;
11081 break;
11082 case ARM::t2TBB:
11083 case ARM::t2TBH:
11084 // Rn = sp is only allowed with ARMv8-A
11085 if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
11086 return Match_RequiresV8;
11087 break;
11088 default:
11089 break;
11090 }
11091
11092 for (unsigned I = 0; I < MCID.NumOperands; ++I)
11093 if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
11094 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
11095 const auto &Op = Inst.getOperand(I);
11096 if (!Op.isReg()) {
11097 // This can happen in awkward cases with tied operands, e.g. a
11098 // writeback load/store with a complex addressing mode in
11099 // which there's an output operand corresponding to the
11100 // updated written-back base register: the Tablegen-generated
11101 // AsmMatcher will have written a placeholder operand to that
11102 // slot in the form of an immediate 0, because it can't
11103 // generate the register part of the complex addressing-mode
11104 // operand ahead of time.
11105 continue;
11106 }
11107
11108 unsigned Reg = Op.getReg();
11109 if ((Reg == ARM::SP) && !hasV8Ops())
11110 return Match_RequiresV8;
11111 else if (Reg == ARM::PC)
11112 return Match_InvalidOperand;
11113 }
11114
11115 return Match_Success;
11116}
11117
11118namespace llvm {
11119
11120template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11121 return true; // In an assembly source, no need to second-guess
11122}
11123
11124} // end namespace llvm
11125
11126// Returns true if Inst is unpredictable if it is in and IT block, but is not
11127// the last instruction in the block.
11128bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11129 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11130
11131 // All branch & call instructions terminate IT blocks with the exception of
11132 // SVC.
11133 if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11134 MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11135 return true;
11136
11137 // Any arithmetic instruction which writes to the PC also terminates the IT
11138 // block.
11139 if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11140 return true;
11141
11142 return false;
11143}
11144
11145unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11146 SmallVectorImpl<NearMissInfo> &NearMisses,
11147 bool MatchingInlineAsm,
11148 bool &EmitInITBlock,
11149 MCStreamer &Out) {
11150 // If we can't use an implicit IT block here, just match as normal.
11151 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
2
Assuming the condition is false
3
Taking false branch
11152 return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11153
11154 // Try to match the instruction in an extension of the current IT block (if
11155 // there is one).
11156 if (inImplicitITBlock()) {
4
Taking true branch
11157 extendImplicitITBlock(ITState.Cond);
5
Calling 'ARMAsmParser::extendImplicitITBlock'
11158 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11159 Match_Success) {
11160 // The match succeded, but we still have to check that the instruction is
11161 // valid in this implicit IT block.
11162 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11163 if (MCID.isPredicable()) {
11164 ARMCC::CondCodes InstCond =
11165 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11166 .getImm();
11167 ARMCC::CondCodes ITCond = currentITCond();
11168 if (InstCond == ITCond) {
11169 EmitInITBlock = true;
11170 return Match_Success;
11171 } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11172 invertCurrentITCondition();
11173 EmitInITBlock = true;
11174 return Match_Success;
11175 }
11176 }
11177 }
11178 rewindImplicitITPosition();
11179 }
11180
11181 // Finish the current IT block, and try to match outside any IT block.
11182 flushPendingInstructions(Out);
11183 unsigned PlainMatchResult =
11184 MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11185 if (PlainMatchResult == Match_Success) {
11186 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11187 if (MCID.isPredicable()) {
11188 ARMCC::CondCodes InstCond =
11189 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11190 .getImm();
11191 // Some forms of the branch instruction have their own condition code
11192 // fields, so can be conditionally executed without an IT block.
11193 if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11194 EmitInITBlock = false;
11195 return Match_Success;
11196 }
11197 if (InstCond == ARMCC::AL) {
11198 EmitInITBlock = false;
11199 return Match_Success;
11200 }
11201 } else {
11202 EmitInITBlock = false;
11203 return Match_Success;
11204 }
11205 }
11206
11207 // Try to match in a new IT block. The matcher doesn't check the actual
11208 // condition, so we create an IT block with a dummy condition, and fix it up
11209 // once we know the actual condition.
11210 startImplicitITBlock();
11211 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11212 Match_Success) {
11213 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11214 if (MCID.isPredicable()) {
11215 ITState.Cond =
11216 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11217 .getImm();
11218 EmitInITBlock = true;
11219 return Match_Success;
11220 }
11221 }
11222 discardImplicitITBlock();
11223
11224 // If none of these succeed, return the error we got when trying to match
11225 // outside any IT blocks.
11226 EmitInITBlock = false;
11227 return PlainMatchResult;
11228}
11229
11230static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11231 unsigned VariantID = 0);
11232
11233static const char *getSubtargetFeatureName(uint64_t Val);
11234bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11235 OperandVector &Operands,
11236 MCStreamer &Out, uint64_t &ErrorInfo,
11237 bool MatchingInlineAsm) {
11238 MCInst Inst;
11239 unsigned MatchResult;
11240 bool PendConditionalInstruction = false;
11241
11242 SmallVector<NearMissInfo, 4> NearMisses;
11243 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
1
Calling 'ARMAsmParser::MatchInstruction'
11244 PendConditionalInstruction, Out);
11245
11246 switch (MatchResult) {
11247 case Match_Success:
11248 LLVM_DEBUG(dbgs() << "Parsed as: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
11249 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
11250 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Parsed as: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
;
11251
11252 // Context sensitive operand constraints aren't handled by the matcher,
11253 // so check them here.
11254 if (validateInstruction(Inst, Operands)) {
11255 // Still progress the IT block, otherwise one wrong condition causes
11256 // nasty cascading errors.
11257 forwardITPosition();
11258 forwardVPTPosition();
11259 return true;
11260 }
11261
11262 {
11263 // Some instructions need post-processing to, for example, tweak which
11264 // encoding is selected. Loop on it while changes happen so the
11265 // individual transformations can chain off each other. E.g.,
11266 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11267 while (processInstruction(Inst, Operands, Out))
11268 LLVM_DEBUG(dbgs() << "Changed to: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
11269 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
11270 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Changed to: "; Inst.dump_pretty
(dbgs(), MII.getName(Inst.getOpcode())); dbgs() << "\n"
; } } while (false)
;
11271 }
11272
11273 // Only move forward at the very end so that everything in validate
11274 // and process gets a consistent answer about whether we're in an IT
11275 // block.
11276 forwardITPosition();
11277 forwardVPTPosition();
11278
11279 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11280 // doesn't actually encode.
11281 if (Inst.getOpcode() == ARM::ITasm)
11282 return false;
11283
11284 Inst.setLoc(IDLoc);
11285 if (PendConditionalInstruction) {
11286 PendingConditionalInsts.push_back(Inst);
11287 if (isITBlockFull() || isITBlockTerminator(Inst))
11288 flushPendingInstructions(Out);
11289 } else {
11290 Out.emitInstruction(Inst, getSTI());
11291 }
11292 return false;
11293 case Match_NearMisses:
11294 ReportNearMisses(NearMisses, IDLoc, Operands);
11295 return true;
11296 case Match_MnemonicFail: {
11297 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11298 std::string Suggestion = ARMMnemonicSpellCheck(
11299 ((ARMOperand &)*Operands[0]).getToken(), FBS);
11300 return Error(IDLoc, "invalid instruction" + Suggestion,
11301 ((ARMOperand &)*Operands[0]).getLocRange());
11302 }
11303 }
11304
11305 llvm_unreachable("Implement any new match types added!")::llvm::llvm_unreachable_internal("Implement any new match types added!"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 11305)
;
11306}
11307
11308/// parseDirective parses the arm specific directives
11309bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11310 const MCContext::Environment Format = getContext().getObjectFileType();
11311 bool IsMachO = Format == MCContext::IsMachO;
11312 bool IsCOFF = Format == MCContext::IsCOFF;
11313
11314 std::string IDVal = DirectiveID.getIdentifier().lower();
11315 if (IDVal == ".word")
11316 parseLiteralValues(4, DirectiveID.getLoc());
11317 else if (IDVal == ".short" || IDVal == ".hword")
11318 parseLiteralValues(2, DirectiveID.getLoc());
11319 else if (IDVal == ".thumb")
11320 parseDirectiveThumb(DirectiveID.getLoc());
11321 else if (IDVal == ".arm")
11322 parseDirectiveARM(DirectiveID.getLoc());
11323 else if (IDVal == ".thumb_func")
11324 parseDirectiveThumbFunc(DirectiveID.getLoc());
11325 else if (IDVal == ".code")
11326 parseDirectiveCode(DirectiveID.getLoc());
11327 else if (IDVal == ".syntax")
11328 parseDirectiveSyntax(DirectiveID.getLoc());
11329 else if (IDVal == ".unreq")
11330 parseDirectiveUnreq(DirectiveID.getLoc());
11331 else if (IDVal == ".fnend")
11332 parseDirectiveFnEnd(DirectiveID.getLoc());
11333 else if (IDVal == ".cantunwind")
11334 parseDirectiveCantUnwind(DirectiveID.getLoc());
11335 else if (IDVal == ".personality")
11336 parseDirectivePersonality(DirectiveID.getLoc());
11337 else if (IDVal == ".handlerdata")
11338 parseDirectiveHandlerData(DirectiveID.getLoc());
11339 else if (IDVal == ".setfp")
11340 parseDirectiveSetFP(DirectiveID.getLoc());
11341 else if (IDVal == ".pad")
11342 parseDirectivePad(DirectiveID.getLoc());
11343 else if (IDVal == ".save")
11344 parseDirectiveRegSave(DirectiveID.getLoc(), false);
11345 else if (IDVal == ".vsave")
11346 parseDirectiveRegSave(DirectiveID.getLoc(), true);
11347 else if (IDVal == ".ltorg" || IDVal == ".pool")
11348 parseDirectiveLtorg(DirectiveID.getLoc());
11349 else if (IDVal == ".even")
11350 parseDirectiveEven(DirectiveID.getLoc());
11351 else if (IDVal == ".personalityindex")
11352 parseDirectivePersonalityIndex(DirectiveID.getLoc());
11353 else if (IDVal == ".unwind_raw")
11354 parseDirectiveUnwindRaw(DirectiveID.getLoc());
11355 else if (IDVal == ".movsp")
11356 parseDirectiveMovSP(DirectiveID.getLoc());
11357 else if (IDVal == ".arch_extension")
11358 parseDirectiveArchExtension(DirectiveID.getLoc());
11359 else if (IDVal == ".align")
11360 return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11361 else if (IDVal == ".thumb_set")
11362 parseDirectiveThumbSet(DirectiveID.getLoc());
11363 else if (IDVal == ".inst")
11364 parseDirectiveInst(DirectiveID.getLoc());
11365 else if (IDVal == ".inst.n")
11366 parseDirectiveInst(DirectiveID.getLoc(), 'n');
11367 else if (IDVal == ".inst.w")
11368 parseDirectiveInst(DirectiveID.getLoc(), 'w');
11369 else if (!IsMachO && !IsCOFF) {
11370 if (IDVal == ".arch")
11371 parseDirectiveArch(DirectiveID.getLoc());
11372 else if (IDVal == ".cpu")
11373 parseDirectiveCPU(DirectiveID.getLoc());
11374 else if (IDVal == ".eabi_attribute")
11375 parseDirectiveEabiAttr(DirectiveID.getLoc());
11376 else if (IDVal == ".fpu")
11377 parseDirectiveFPU(DirectiveID.getLoc());
11378 else if (IDVal == ".fnstart")
11379 parseDirectiveFnStart(DirectiveID.getLoc());
11380 else if (IDVal == ".object_arch")
11381 parseDirectiveObjectArch(DirectiveID.getLoc());
11382 else if (IDVal == ".tlsdescseq")
11383 parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11384 else
11385 return true;
11386 } else if (IsCOFF) {
11387 if (IDVal == ".seh_stackalloc")
11388 parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11389 else if (IDVal == ".seh_stackalloc_w")
11390 parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11391 else if (IDVal == ".seh_save_regs")
11392 parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11393 else if (IDVal == ".seh_save_regs_w")
11394 parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11395 else if (IDVal == ".seh_save_sp")
11396 parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11397 else if (IDVal == ".seh_save_fregs")
11398 parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11399 else if (IDVal == ".seh_save_lr")
11400 parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11401 else if (IDVal == ".seh_endprologue")
11402 parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11403 else if (IDVal == ".seh_endprologue_fragment")
11404 parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11405 else if (IDVal == ".seh_nop")
11406 parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11407 else if (IDVal == ".seh_nop_w")
11408 parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11409 else if (IDVal == ".seh_startepilogue")
11410 parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11411 else if (IDVal == ".seh_startepilogue_cond")
11412 parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11413 else if (IDVal == ".seh_endepilogue")
11414 parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11415 else if (IDVal == ".seh_custom")
11416 parseDirectiveSEHCustom(DirectiveID.getLoc());
11417 else
11418 return true;
11419 } else
11420 return true;
11421 return false;
11422}
11423
11424/// parseLiteralValues
11425/// ::= .hword expression [, expression]*
11426/// ::= .short expression [, expression]*
11427/// ::= .word expression [, expression]*
11428bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11429 auto parseOne = [&]() -> bool {
11430 const MCExpr *Value;
11431 if (getParser().parseExpression(Value))
11432 return true;
11433 getParser().getStreamer().emitValue(Value, Size, L);
11434 return false;
11435 };
11436 return (parseMany(parseOne));
11437}
11438
11439/// parseDirectiveThumb
11440/// ::= .thumb
11441bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11442 if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11443 return true;
11444
11445 if (!isThumb())
11446 SwitchMode();
11447
11448 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11449 return false;
11450}
11451
11452/// parseDirectiveARM
11453/// ::= .arm
11454bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11455 if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11456 return true;
11457
11458 if (isThumb())
11459 SwitchMode();
11460 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11461 return false;
11462}
11463
11464void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11465 // We need to flush the current implicit IT block on a label, because it is
11466 // not legal to branch into an IT block.
11467 flushPendingInstructions(getStreamer());
11468}
11469
11470void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11471 if (NextSymbolIsThumb) {
11472 getParser().getStreamer().emitThumbFunc(Symbol);
11473 NextSymbolIsThumb = false;
11474 }
11475}
11476
11477/// parseDirectiveThumbFunc
11478/// ::= .thumbfunc symbol_name
11479bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11480 MCAsmParser &Parser = getParser();
11481 const auto Format = getContext().getObjectFileType();
11482 bool IsMachO = Format == MCContext::IsMachO;
11483
11484 // Darwin asm has (optionally) function name after .thumb_func direction
11485 // ELF doesn't
11486
11487 if (IsMachO) {
11488 if (Parser.getTok().is(AsmToken::Identifier) ||
11489 Parser.getTok().is(AsmToken::String)) {
11490 MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11491 Parser.getTok().getIdentifier());
11492 getParser().getStreamer().emitThumbFunc(Func);
11493 Parser.Lex();
11494 if (parseEOL())
11495 return true;
11496 return false;
11497 }
11498 }
11499
11500 if (parseEOL())
11501 return true;
11502
11503 // .thumb_func implies .thumb
11504 if (!isThumb())
11505 SwitchMode();
11506
11507 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11508
11509 NextSymbolIsThumb = true;
11510 return false;
11511}
11512
11513/// parseDirectiveSyntax
11514/// ::= .syntax unified | divided
11515bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11516 MCAsmParser &Parser = getParser();
11517 const AsmToken &Tok = Parser.getTok();
11518 if (Tok.isNot(AsmToken::Identifier)) {
11519 Error(L, "unexpected token in .syntax directive");
11520 return false;
11521 }
11522
11523 StringRef Mode = Tok.getString();
11524 Parser.Lex();
11525 if (check(Mode == "divided" || Mode == "DIVIDED", L,
11526 "'.syntax divided' arm assembly not supported") ||
11527 check(Mode != "unified" && Mode != "UNIFIED", L,
11528 "unrecognized syntax mode in .syntax directive") ||
11529 parseEOL())
11530 return true;
11531
11532 // TODO tell the MC streamer the mode
11533 // getParser().getStreamer().Emit???();
11534 return false;
11535}
11536
11537/// parseDirectiveCode
11538/// ::= .code 16 | 32
11539bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11540 MCAsmParser &Parser = getParser();
11541 const AsmToken &Tok = Parser.getTok();
11542 if (Tok.isNot(AsmToken::Integer))
11543 return Error(L, "unexpected token in .code directive");
11544 int64_t Val = Parser.getTok().getIntVal();
11545 if (Val != 16 && Val != 32) {
11546 Error(L, "invalid operand to .code directive");
11547 return false;
11548 }
11549 Parser.Lex();
11550
11551 if (parseEOL())
11552 return true;
11553
11554 if (Val == 16) {
11555 if (!hasThumb())
11556 return Error(L, "target does not support Thumb mode");
11557
11558 if (!isThumb())
11559 SwitchMode();
11560 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11561 } else {
11562 if (!hasARM())
11563 return Error(L, "target does not support ARM mode");
11564
11565 if (isThumb())
11566 SwitchMode();
11567 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11568 }
11569
11570 return false;
11571}
11572
11573/// parseDirectiveReq
11574/// ::= name .req registername
11575bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11576 MCAsmParser &Parser = getParser();
11577 Parser.Lex(); // Eat the '.req' token.
11578 MCRegister Reg;
11579 SMLoc SRegLoc, ERegLoc;
11580 if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11581 "register name expected") ||
11582 parseEOL())
11583 return true;
11584
11585 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11586 return Error(SRegLoc,
11587 "redefinition of '" + Name + "' does not match original.");
11588
11589 return false;
11590}
11591
11592/// parseDirectiveUneq
11593/// ::= .unreq registername
11594bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11595 MCAsmParser &Parser = getParser();
11596 if (Parser.getTok().isNot(AsmToken::Identifier))
11597 return Error(L, "unexpected input in .unreq directive.");
11598 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11599 Parser.Lex(); // Eat the identifier.
11600 return parseEOL();
11601}
11602
11603// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11604// before, if supported by the new target, or emit mapping symbols for the mode
11605// switch.
11606void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11607 if (WasThumb != isThumb()) {
11608 if (WasThumb && hasThumb()) {
11609 // Stay in Thumb mode
11610 SwitchMode();
11611 } else if (!WasThumb && hasARM()) {
11612 // Stay in ARM mode
11613 SwitchMode();
11614 } else {
11615 // Mode switch forced, because the new arch doesn't support the old mode.
11616 getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11617 : MCAF_Code32);
11618 // Warn about the implcit mode switch. GAS does not switch modes here,
11619 // but instead stays in the old mode, reporting an error on any following
11620 // instructions as the mode does not exist on the target.
11621 Warning(Loc, Twine("new target does not support ") +
11622 (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11623 (!WasThumb ? "thumb" : "arm") + " mode");
11624 }
11625 }
11626}
11627
11628/// parseDirectiveArch
11629/// ::= .arch token
11630bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11631 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11632 ARM::ArchKind ID = ARM::parseArch(Arch);
11633
11634 if (ID == ARM::ArchKind::INVALID)
11635 return Error(L, "Unknown arch name");
11636
11637 bool WasThumb = isThumb();
11638 Triple T;
11639 MCSubtargetInfo &STI = copySTI();
11640 STI.setDefaultFeatures("", /*TuneCPU*/ "",
11641 ("+" + ARM::getArchName(ID)).str());
11642 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11643 FixModeAfterArchChange(WasThumb, L);
11644
11645 getTargetStreamer().emitArch(ID);
11646 return false;
11647}
11648
11649/// parseDirectiveEabiAttr
11650/// ::= .eabi_attribute int, int [, "str"]
11651/// ::= .eabi_attribute Tag_name, int [, "str"]
11652bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11653 MCAsmParser &Parser = getParser();
11654 int64_t Tag;
11655 SMLoc TagLoc;
11656 TagLoc = Parser.getTok().getLoc();
11657 if (Parser.getTok().is(AsmToken::Identifier)) {
11658 StringRef Name = Parser.getTok().getIdentifier();
11659 std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11660 Name, ARMBuildAttrs::getARMAttributeTags());
11661 if (!Ret) {
11662 Error(TagLoc, "attribute name not recognised: " + Name);
11663 return false;
11664 }
11665 Tag = *Ret;
11666 Parser.Lex();
11667 } else {
11668 const MCExpr *AttrExpr;
11669
11670 TagLoc = Parser.getTok().getLoc();
11671 if (Parser.parseExpression(AttrExpr))
11672 return true;
11673
11674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11675 if (check(!CE, TagLoc, "expected numeric constant"))
11676 return true;
11677
11678 Tag = CE->getValue();
11679 }
11680
11681 if (Parser.parseComma())
11682 return true;
11683
11684 StringRef StringValue = "";
11685 bool IsStringValue = false;
11686
11687 int64_t IntegerValue = 0;
11688 bool IsIntegerValue = false;
11689
11690 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11691 IsStringValue = true;
11692 else if (Tag == ARMBuildAttrs::compatibility) {
11693 IsStringValue = true;
11694 IsIntegerValue = true;
11695 } else if (Tag < 32 || Tag % 2 == 0)
11696 IsIntegerValue = true;
11697 else if (Tag % 2 == 1)
11698 IsStringValue = true;
11699 else
11700 llvm_unreachable("invalid tag type")::llvm::llvm_unreachable_internal("invalid tag type", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 11700)
;
11701
11702 if (IsIntegerValue) {
11703 const MCExpr *ValueExpr;
11704 SMLoc ValueExprLoc = Parser.getTok().getLoc();
11705 if (Parser.parseExpression(ValueExpr))
11706 return true;
11707
11708 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11709 if (!CE)
11710 return Error(ValueExprLoc, "expected numeric constant");
11711 IntegerValue = CE->getValue();
11712 }
11713
11714 if (Tag == ARMBuildAttrs::compatibility) {
11715 if (Parser.parseComma())
11716 return true;
11717 }
11718
11719 std::string EscapedValue;
11720 if (IsStringValue) {
11721 if (Parser.getTok().isNot(AsmToken::String))
11722 return Error(Parser.getTok().getLoc(), "bad string constant");
11723
11724 if (Tag == ARMBuildAttrs::also_compatible_with) {
11725 if (Parser.parseEscapedString(EscapedValue))
11726 return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11727
11728 StringValue = EscapedValue;
11729 } else {
11730 StringValue = Parser.getTok().getStringContents();
11731 Parser.Lex();
11732 }
11733 }
11734
11735 if (Parser.parseEOL())
11736 return true;
11737
11738 if (IsIntegerValue && IsStringValue) {
11739 assert(Tag == ARMBuildAttrs::compatibility)(static_cast <bool> (Tag == ARMBuildAttrs::compatibility
) ? void (0) : __assert_fail ("Tag == ARMBuildAttrs::compatibility"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 11739, __extension__
__PRETTY_FUNCTION__))
;
11740 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11741 } else if (IsIntegerValue)
11742 getTargetStreamer().emitAttribute(Tag, IntegerValue);
11743 else if (IsStringValue)
11744 getTargetStreamer().emitTextAttribute(Tag, StringValue);
11745 return false;
11746}
11747
11748/// parseDirectiveCPU
11749/// ::= .cpu str
11750bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11751 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11752 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11753
11754 // FIXME: This is using table-gen data, but should be moved to
11755 // ARMTargetParser once that is table-gen'd.
11756 if (!getSTI().isCPUStringValid(CPU))
11757 return Error(L, "Unknown CPU name");
11758
11759 bool WasThumb = isThumb();
11760 MCSubtargetInfo &STI = copySTI();
11761 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11762 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11763 FixModeAfterArchChange(WasThumb, L);
11764
11765 return false;
11766}
11767
11768/// parseDirectiveFPU
11769/// ::= .fpu str
11770bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11771 SMLoc FPUNameLoc = getTok().getLoc();
11772 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11773
11774 ARM::FPUKind ID = ARM::parseFPU(FPU);
11775 std::vector<StringRef> Features;
11776 if (!ARM::getFPUFeatures(ID, Features))
11777 return Error(FPUNameLoc, "Unknown FPU name");
11778
11779 MCSubtargetInfo &STI = copySTI();
11780 for (auto Feature : Features)
11781 STI.ApplyFeatureFlag(Feature);
11782 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11783
11784 getTargetStreamer().emitFPU(ID);
11785 return false;
11786}
11787
11788/// parseDirectiveFnStart
11789/// ::= .fnstart
11790bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11791 if (parseEOL())
11792 return true;
11793
11794 if (UC.hasFnStart()) {
11795 Error(L, ".fnstart starts before the end of previous one");
11796 UC.emitFnStartLocNotes();
11797 return true;
11798 }
11799
11800 // Reset the unwind directives parser state
11801 UC.reset();
11802
11803 getTargetStreamer().emitFnStart();
11804
11805 UC.recordFnStart(L);
11806 return false;
11807}
11808
11809/// parseDirectiveFnEnd
11810/// ::= .fnend
11811bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11812 if (parseEOL())
11813 return true;
11814 // Check the ordering of unwind directives
11815 if (!UC.hasFnStart())
11816 return Error(L, ".fnstart must precede .fnend directive");
11817
11818 // Reset the unwind directives parser state
11819 getTargetStreamer().emitFnEnd();
11820
11821 UC.reset();
11822 return false;
11823}
11824
11825/// parseDirectiveCantUnwind
11826/// ::= .cantunwind
11827bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11828 if (parseEOL())
11829 return true;
11830
11831 UC.recordCantUnwind(L);
11832 // Check the ordering of unwind directives
11833 if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11834 return true;
11835
11836 if (UC.hasHandlerData()) {
11837 Error(L, ".cantunwind can't be used with .handlerdata directive");
11838 UC.emitHandlerDataLocNotes();
11839 return true;
11840 }
11841 if (UC.hasPersonality()) {
11842 Error(L, ".cantunwind can't be used with .personality directive");
11843 UC.emitPersonalityLocNotes();
11844 return true;
11845 }
11846
11847 getTargetStreamer().emitCantUnwind();
11848 return false;
11849}
11850
11851/// parseDirectivePersonality
11852/// ::= .personality name
11853bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11854 MCAsmParser &Parser = getParser();
11855 bool HasExistingPersonality = UC.hasPersonality();
11856
11857 // Parse the name of the personality routine
11858 if (Parser.getTok().isNot(AsmToken::Identifier))
11859 return Error(L, "unexpected input in .personality directive.");
11860 StringRef Name(Parser.getTok().getIdentifier());
11861 Parser.Lex();
11862
11863 if (parseEOL())
11864 return true;
11865
11866 UC.recordPersonality(L);
11867
11868 // Check the ordering of unwind directives
11869 if (!UC.hasFnStart())
11870 return Error(L, ".fnstart must precede .personality directive");
11871 if (UC.cantUnwind()) {
11872 Error(L, ".personality can't be used with .cantunwind directive");
11873 UC.emitCantUnwindLocNotes();
11874 return true;
11875 }
11876 if (UC.hasHandlerData()) {
11877 Error(L, ".personality must precede .handlerdata directive");
11878 UC.emitHandlerDataLocNotes();
11879 return true;
11880 }
11881 if (HasExistingPersonality) {
11882 Error(L, "multiple personality directives");
11883 UC.emitPersonalityLocNotes();
11884 return true;
11885 }
11886
11887 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
11888 getTargetStreamer().emitPersonality(PR);
11889 return false;
11890}
11891
11892/// parseDirectiveHandlerData
11893/// ::= .handlerdata
11894bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
11895 if (parseEOL())
11896 return true;
11897
11898 UC.recordHandlerData(L);
11899 // Check the ordering of unwind directives
11900 if (!UC.hasFnStart())
11901 return Error(L, ".fnstart must precede .personality directive");
11902 if (UC.cantUnwind()) {
11903 Error(L, ".handlerdata can't be used with .cantunwind directive");
11904 UC.emitCantUnwindLocNotes();
11905 return true;
11906 }
11907
11908 getTargetStreamer().emitHandlerData();
11909 return false;
11910}
11911
11912/// parseDirectiveSetFP
11913/// ::= .setfp fpreg, spreg [, offset]
11914bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
11915 MCAsmParser &Parser = getParser();
11916 // Check the ordering of unwind directives
11917 if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
11918 check(UC.hasHandlerData(), L,
11919 ".setfp must precede .handlerdata directive"))
11920 return true;
11921
11922 // Parse fpreg
11923 SMLoc FPRegLoc = Parser.getTok().getLoc();
11924 int FPReg = tryParseRegister();
11925
11926 if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
11927 Parser.parseComma())
11928 return true;
11929
11930 // Parse spreg
11931 SMLoc SPRegLoc = Parser.getTok().getLoc();
11932 int SPReg = tryParseRegister();
11933 if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
11934 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11935 "register should be either $sp or the latest fp register"))
11936 return true;
11937
11938 // Update the frame pointer register
11939 UC.saveFPReg(FPReg);
11940
11941 // Parse offset
11942 int64_t Offset = 0;
11943 if (Parser.parseOptionalToken(AsmToken::Comma)) {
11944 if (Parser.getTok().isNot(AsmToken::Hash) &&
11945 Parser.getTok().isNot(AsmToken::Dollar))
11946 return Error(Parser.getTok().getLoc(), "'#' expected");
11947 Parser.Lex(); // skip hash token.
11948
11949 const MCExpr *OffsetExpr;
11950 SMLoc ExLoc = Parser.getTok().getLoc();
11951 SMLoc EndLoc;
11952 if (getParser().parseExpression(OffsetExpr, EndLoc))
11953 return Error(ExLoc, "malformed setfp offset");
11954 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11955 if (check(!CE, ExLoc, "setfp offset must be an immediate"))
11956 return true;
11957 Offset = CE->getValue();
11958 }
11959
11960 if (Parser.parseEOL())
11961 return true;
11962
11963 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
11964 static_cast<unsigned>(SPReg), Offset);
11965 return false;
11966}
11967
11968/// parseDirective
11969/// ::= .pad offset
11970bool ARMAsmParser::parseDirectivePad(SMLoc L) {
11971 MCAsmParser &Parser = getParser();
11972 // Check the ordering of unwind directives
11973 if (!UC.hasFnStart())
11974 return Error(L, ".fnstart must precede .pad directive");
11975 if (UC.hasHandlerData())
11976 return Error(L, ".pad must precede .handlerdata directive");
11977
11978 // Parse the offset
11979 if (Parser.getTok().isNot(AsmToken::Hash) &&
11980 Parser.getTok().isNot(AsmToken::Dollar))
11981 return Error(Parser.getTok().getLoc(), "'#' expected");
11982 Parser.Lex(); // skip hash token.
11983
11984 const MCExpr *OffsetExpr;
11985 SMLoc ExLoc = Parser.getTok().getLoc();
11986 SMLoc EndLoc;
11987 if (getParser().parseExpression(OffsetExpr, EndLoc))
11988 return Error(ExLoc, "malformed pad offset");
11989 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11990 if (!CE)
11991 return Error(ExLoc, "pad offset must be an immediate");
11992
11993 if (parseEOL())
11994 return true;
11995
11996 getTargetStreamer().emitPad(CE->getValue());
11997 return false;
11998}
11999
12000/// parseDirectiveRegSave
12001/// ::= .save { registers }
12002/// ::= .vsave { registers }
12003bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
12004 // Check the ordering of unwind directives
12005 if (!UC.hasFnStart())
12006 return Error(L, ".fnstart must precede .save or .vsave directives");
12007 if (UC.hasHandlerData())
12008 return Error(L, ".save or .vsave must precede .handlerdata directive");
12009
12010 // RAII object to make sure parsed operands are deleted.
12011 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12012
12013 // Parse the register list
12014 if (parseRegisterList(Operands, true, true) || parseEOL())
12015 return true;
12016 ARMOperand &Op = (ARMOperand &)*Operands[0];
12017 if (!IsVector && !Op.isRegList())
12018 return Error(L, ".save expects GPR registers");
12019 if (IsVector && !Op.isDPRRegList())
12020 return Error(L, ".vsave expects DPR registers");
12021
12022 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
12023 return false;
12024}
12025
12026/// parseDirectiveInst
12027/// ::= .inst opcode [, ...]
12028/// ::= .inst.n opcode [, ...]
12029/// ::= .inst.w opcode [, ...]
12030bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
12031 int Width = 4;
12032
12033 if (isThumb()) {
12034 switch (Suffix) {
12035 case 'n':
12036 Width = 2;
12037 break;
12038 case 'w':
12039 break;
12040 default:
12041 Width = 0;
12042 break;
12043 }
12044 } else {
12045 if (Suffix)
12046 return Error(Loc, "width suffixes are invalid in ARM mode");
12047 }
12048
12049 auto parseOne = [&]() -> bool {
12050 const MCExpr *Expr;
12051 if (getParser().parseExpression(Expr))
12052 return true;
12053 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
12054 if (!Value) {
12055 return Error(Loc, "expected constant expression");
12056 }
12057
12058 char CurSuffix = Suffix;
12059 switch (Width) {
12060 case 2:
12061 if (Value->getValue() > 0xffff)
12062 return Error(Loc, "inst.n operand is too big, use inst.w instead");
12063 break;
12064 case 4:
12065 if (Value->getValue() > 0xffffffff)
12066 return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
12067 " operand is too big");
12068 break;
12069 case 0:
12070 // Thumb mode, no width indicated. Guess from the opcode, if possible.
12071 if (Value->getValue() < 0xe800)
12072 CurSuffix = 'n';
12073 else if (Value->getValue() >= 0xe8000000)
12074 CurSuffix = 'w';
12075 else
12076 return Error(Loc, "cannot determine Thumb instruction size, "
12077 "use inst.n/inst.w instead");
12078 break;
12079 default:
12080 llvm_unreachable("only supported widths are 2 and 4")::llvm::llvm_unreachable_internal("only supported widths are 2 and 4"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12080)
;
12081 }
12082
12083 getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
12084 return false;
12085 };
12086
12087 if (parseOptionalToken(AsmToken::EndOfStatement))
12088 return Error(Loc, "expected expression following directive");
12089 if (parseMany(parseOne))
12090 return true;
12091 return false;
12092}
12093
12094/// parseDirectiveLtorg
12095/// ::= .ltorg | .pool
12096bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12097 if (parseEOL())
12098 return true;
12099 getTargetStreamer().emitCurrentConstantPool();
12100 return false;
12101}
12102
12103bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12104 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12105
12106 if (parseEOL())
12107 return true;
12108
12109 if (!Section) {
12110 getStreamer().initSections(false, getSTI());
12111 Section = getStreamer().getCurrentSectionOnly();
12112 }
12113
12114 assert(Section && "must have section to emit alignment")(static_cast <bool> (Section && "must have section to emit alignment"
) ? void (0) : __assert_fail ("Section && \"must have section to emit alignment\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12114, __extension__
__PRETTY_FUNCTION__))
;
12115 if (Section->useCodeAlign())
12116 getStreamer().emitCodeAlignment(Align(2), &getSTI());
12117 else
12118 getStreamer().emitValueToAlignment(Align(2));
12119
12120 return false;
12121}
12122
12123/// parseDirectivePersonalityIndex
12124/// ::= .personalityindex index
12125bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12126 MCAsmParser &Parser = getParser();
12127 bool HasExistingPersonality = UC.hasPersonality();
12128
12129 const MCExpr *IndexExpression;
12130 SMLoc IndexLoc = Parser.getTok().getLoc();
12131 if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12132 return true;
12133 }
12134
12135 UC.recordPersonalityIndex(L);
12136
12137 if (!UC.hasFnStart()) {
12138 return Error(L, ".fnstart must precede .personalityindex directive");
12139 }
12140 if (UC.cantUnwind()) {
12141 Error(L, ".personalityindex cannot be used with .cantunwind");
12142 UC.emitCantUnwindLocNotes();
12143 return true;
12144 }
12145 if (UC.hasHandlerData()) {
12146 Error(L, ".personalityindex must precede .handlerdata directive");
12147 UC.emitHandlerDataLocNotes();
12148 return true;
12149 }
12150 if (HasExistingPersonality) {
12151 Error(L, "multiple personality directives");
12152 UC.emitPersonalityLocNotes();
12153 return true;
12154 }
12155
12156 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12157 if (!CE)
12158 return Error(IndexLoc, "index must be a constant number");
12159 if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12160 return Error(IndexLoc,
12161 "personality routine index should be in range [0-3]");
12162
12163 getTargetStreamer().emitPersonalityIndex(CE->getValue());
12164 return false;
12165}
12166
12167/// parseDirectiveUnwindRaw
12168/// ::= .unwind_raw offset, opcode [, opcode...]
12169bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12170 MCAsmParser &Parser = getParser();
12171 int64_t StackOffset;
12172 const MCExpr *OffsetExpr;
12173 SMLoc OffsetLoc = getLexer().getLoc();
12174
12175 if (!UC.hasFnStart())
12176 return Error(L, ".fnstart must precede .unwind_raw directives");
12177 if (getParser().parseExpression(OffsetExpr))
12178 return Error(OffsetLoc, "expected expression");
12179
12180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12181 if (!CE)
12182 return Error(OffsetLoc, "offset must be a constant");
12183
12184 StackOffset = CE->getValue();
12185
12186 if (Parser.parseComma())
12187 return true;
12188
12189 SmallVector<uint8_t, 16> Opcodes;
12190
12191 auto parseOne = [&]() -> bool {
12192 const MCExpr *OE = nullptr;
12193 SMLoc OpcodeLoc = getLexer().getLoc();
12194 if (check(getLexer().is(AsmToken::EndOfStatement) ||
12195 Parser.parseExpression(OE),
12196 OpcodeLoc, "expected opcode expression"))
12197 return true;
12198 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12199 if (!OC)
12200 return Error(OpcodeLoc, "opcode value must be a constant");
12201 const int64_t Opcode = OC->getValue();
12202 if (Opcode & ~0xff)
12203 return Error(OpcodeLoc, "invalid opcode");
12204 Opcodes.push_back(uint8_t(Opcode));
12205 return false;
12206 };
12207
12208 // Must have at least 1 element
12209 SMLoc OpcodeLoc = getLexer().getLoc();
12210 if (parseOptionalToken(AsmToken::EndOfStatement))
12211 return Error(OpcodeLoc, "expected opcode expression");
12212 if (parseMany(parseOne))
12213 return true;
12214
12215 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12216 return false;
12217}
12218
12219/// parseDirectiveTLSDescSeq
12220/// ::= .tlsdescseq tls-variable
12221bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12222 MCAsmParser &Parser = getParser();
12223
12224 if (getLexer().isNot(AsmToken::Identifier))
12225 return TokError("expected variable after '.tlsdescseq' directive");
12226
12227 const MCSymbolRefExpr *SRE =
12228 MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
12229 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
12230 Lex();
12231
12232 if (parseEOL())
12233 return true;
12234
12235 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12236 return false;
12237}
12238
12239/// parseDirectiveMovSP
12240/// ::= .movsp reg [, #offset]
12241bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12242 MCAsmParser &Parser = getParser();
12243 if (!UC.hasFnStart())
12244 return Error(L, ".fnstart must precede .movsp directives");
12245 if (UC.getFPReg() != ARM::SP)
12246 return Error(L, "unexpected .movsp directive");
12247
12248 SMLoc SPRegLoc = Parser.getTok().getLoc();
12249 int SPReg = tryParseRegister();
12250 if (SPReg == -1)
12251 return Error(SPRegLoc, "register expected");
12252 if (SPReg == ARM::SP || SPReg == ARM::PC)
12253 return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12254
12255 int64_t Offset = 0;
12256 if (Parser.parseOptionalToken(AsmToken::Comma)) {
12257 if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12258 return true;
12259
12260 const MCExpr *OffsetExpr;
12261 SMLoc OffsetLoc = Parser.getTok().getLoc();
12262
12263 if (Parser.parseExpression(OffsetExpr))
12264 return Error(OffsetLoc, "malformed offset expression");
12265
12266 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12267 if (!CE)
12268 return Error(OffsetLoc, "offset must be an immediate constant");
12269
12270 Offset = CE->getValue();
12271 }
12272
12273 if (parseEOL())
12274 return true;
12275
12276 getTargetStreamer().emitMovSP(SPReg, Offset);
12277 UC.saveFPReg(SPReg);
12278
12279 return false;
12280}
12281
12282/// parseDirectiveObjectArch
12283/// ::= .object_arch name
12284bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12285 MCAsmParser &Parser = getParser();
12286 if (getLexer().isNot(AsmToken::Identifier))
12287 return Error(getLexer().getLoc(), "unexpected token");
12288
12289 StringRef Arch = Parser.getTok().getString();
12290 SMLoc ArchLoc = Parser.getTok().getLoc();
12291 Lex();
12292
12293 ARM::ArchKind ID = ARM::parseArch(Arch);
12294
12295 if (ID == ARM::ArchKind::INVALID)
12296 return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12297 if (parseToken(AsmToken::EndOfStatement))
12298 return true;
12299
12300 getTargetStreamer().emitObjectArch(ID);
12301 return false;
12302}
12303
12304/// parseDirectiveAlign
12305/// ::= .align
12306bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12307 // NOTE: if this is not the end of the statement, fall back to the target
12308 // agnostic handling for this directive which will correctly handle this.
12309 if (parseOptionalToken(AsmToken::EndOfStatement)) {
12310 // '.align' is target specifically handled to mean 2**2 byte alignment.
12311 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12312 assert(Section && "must have section to emit alignment")(static_cast <bool> (Section && "must have section to emit alignment"
) ? void (0) : __assert_fail ("Section && \"must have section to emit alignment\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12312, __extension__
__PRETTY_FUNCTION__))
;
12313 if (Section->useCodeAlign())
12314 getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12315 else
12316 getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12317 return false;
12318 }
12319 return true;
12320}
12321
12322/// parseDirectiveThumbSet
12323/// ::= .thumb_set name, value
12324bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12325 MCAsmParser &Parser = getParser();
12326
12327 StringRef Name;
12328 if (check(Parser.parseIdentifier(Name),
12329 "expected identifier after '.thumb_set'") ||
12330 Parser.parseComma())
12331 return true;
12332
12333 MCSymbol *Sym;
12334 const MCExpr *Value;
12335 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12336 Parser, Sym, Value))
12337 return true;
12338
12339 getTargetStreamer().emitThumbSet(Sym, Value);
12340 return false;
12341}
12342
12343/// parseDirectiveSEHAllocStack
12344/// ::= .seh_stackalloc
12345/// ::= .seh_stackalloc_w
12346bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12347 int64_t Size;
12348 if (parseImmExpr(Size))
12349 return true;
12350 getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12351 return false;
12352}
12353
12354/// parseDirectiveSEHSaveRegs
12355/// ::= .seh_save_regs
12356/// ::= .seh_save_regs_w
12357bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12358 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12359
12360 if (parseRegisterList(Operands) || parseEOL())
12361 return true;
12362 ARMOperand &Op = (ARMOperand &)*Operands[0];
12363 if (!Op.isRegList())
12364 return Error(L, ".seh_save_regs{_w} expects GPR registers");
12365 const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12366 uint32_t Mask = 0;
12367 for (size_t i = 0; i < RegList.size(); ++i) {
12368 unsigned Reg = MRI->getEncodingValue(RegList[i]);
12369 if (Reg == 15) // pc -> lr
12370 Reg = 14;
12371 if (Reg == 13)
12372 return Error(L, ".seh_save_regs{_w} can't include SP");
12373 assert(Reg < 16U && "Register out of range")(static_cast <bool> (Reg < 16U && "Register out of range"
) ? void (0) : __assert_fail ("Reg < 16U && \"Register out of range\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12373, __extension__
__PRETTY_FUNCTION__))
;
12374 unsigned Bit = (1u << Reg);
12375 Mask |= Bit;
12376 }
12377 if (!Wide && (Mask & 0x1f00) != 0)
12378 return Error(L,
12379 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12380 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12381 return false;
12382}
12383
12384/// parseDirectiveSEHSaveSP
12385/// ::= .seh_save_sp
12386bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12387 int Reg = tryParseRegister();
12388 if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12389 return Error(L, "expected GPR");
12390 unsigned Index = MRI->getEncodingValue(Reg);
12391 if (Index > 14 || Index == 13)
12392 return Error(L, "invalid register for .seh_save_sp");
12393 getTargetStreamer().emitARMWinCFISaveSP(Index);
12394 return false;
12395}
12396
12397/// parseDirectiveSEHSaveFRegs
12398/// ::= .seh_save_fregs
12399bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12400 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12401
12402 if (parseRegisterList(Operands) || parseEOL())
12403 return true;
12404 ARMOperand &Op = (ARMOperand &)*Operands[0];
12405 if (!Op.isDPRRegList())
12406 return Error(L, ".seh_save_fregs expects DPR registers");
12407 const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12408 uint32_t Mask = 0;
12409 for (size_t i = 0; i < RegList.size(); ++i) {
12410 unsigned Reg = MRI->getEncodingValue(RegList[i]);
12411 assert(Reg < 32U && "Register out of range")(static_cast <bool> (Reg < 32U && "Register out of range"
) ? void (0) : __assert_fail ("Reg < 32U && \"Register out of range\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12411, __extension__
__PRETTY_FUNCTION__))
;
12412 unsigned Bit = (1u << Reg);
12413 Mask |= Bit;
12414 }
12415
12416 if (Mask == 0)
12417 return Error(L, ".seh_save_fregs missing registers");
12418
12419 unsigned First = 0;
12420 while ((Mask & 1) == 0) {
12421 First++;
12422 Mask >>= 1;
12423 }
12424 if (((Mask + 1) & Mask) != 0)
12425 return Error(L,
12426 ".seh_save_fregs must take a contiguous range of registers");
12427 unsigned Last = First;
12428 while ((Mask & 2) != 0) {
12429 Last++;
12430 Mask >>= 1;
12431 }
12432 if (First < 16 && Last >= 16)
12433 return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12434 getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12435 return false;
12436}
12437
12438/// parseDirectiveSEHSaveLR
12439/// ::= .seh_save_lr
12440bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12441 int64_t Offset;
12442 if (parseImmExpr(Offset))
12443 return true;
12444 getTargetStreamer().emitARMWinCFISaveLR(Offset);
12445 return false;
12446}
12447
12448/// parseDirectiveSEHPrologEnd
12449/// ::= .seh_endprologue
12450/// ::= .seh_endprologue_fragment
12451bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12452 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12453 return false;
12454}
12455
12456/// parseDirectiveSEHNop
12457/// ::= .seh_nop
12458/// ::= .seh_nop_w
12459bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12460 getTargetStreamer().emitARMWinCFINop(Wide);
12461 return false;
12462}
12463
12464/// parseDirectiveSEHEpilogStart
12465/// ::= .seh_startepilogue
12466/// ::= .seh_startepilogue_cond
12467bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12468 unsigned CC = ARMCC::AL;
12469 if (Condition) {
12470 MCAsmParser &Parser = getParser();
12471 SMLoc S = Parser.getTok().getLoc();
12472 const AsmToken &Tok = Parser.getTok();
12473 if (!Tok.is(AsmToken::Identifier))
12474 return Error(S, ".seh_startepilogue_cond missing condition");
12475 CC = ARMCondCodeFromString(Tok.getString());
12476 if (CC == ~0U)
12477 return Error(S, "invalid condition");
12478 Parser.Lex(); // Eat the token.
12479 }
12480
12481 getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12482 return false;
12483}
12484
12485/// parseDirectiveSEHEpilogEnd
12486/// ::= .seh_endepilogue
12487bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12488 getTargetStreamer().emitARMWinCFIEpilogEnd();
12489 return false;
12490}
12491
12492/// parseDirectiveSEHCustom
12493/// ::= .seh_custom
12494bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12495 unsigned Opcode = 0;
12496 do {
12497 int64_t Byte;
12498 if (parseImmExpr(Byte))
12499 return true;
12500 if (Byte > 0xff || Byte < 0)
12501 return Error(L, "Invalid byte value in .seh_custom");
12502 if (Opcode > 0x00ffffff)
12503 return Error(L, "Too many bytes in .seh_custom");
12504 // Store the bytes as one big endian number in Opcode. In a multi byte
12505 // opcode sequence, the first byte can't be zero.
12506 Opcode = (Opcode << 8) | Byte;
12507 } while (parseOptionalToken(AsmToken::Comma));
12508 getTargetStreamer().emitARMWinCFICustom(Opcode);
12509 return false;
12510}
12511
12512/// Force static initialization.
12513extern "C" LLVM_EXTERNAL_VISIBILITY__attribute__((visibility("default"))) void LLVMInitializeARMAsmParser() {
12514 RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12515 RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12516 RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12517 RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12518}
12519
12520#define GET_REGISTER_MATCHER
12521#define GET_SUBTARGET_FEATURE_NAME
12522#define GET_MATCHER_IMPLEMENTATION
12523#define GET_MNEMONIC_SPELL_CHECKER
12524#include "ARMGenAsmMatcher.inc"
12525
12526// Some diagnostics need to vary with subtarget features, so they are handled
12527// here. For example, the DPR class has either 16 or 32 registers, depending
12528// on the FPU available.
12529const char *
12530ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12531 switch (MatchError) {
12532 // rGPR contains sp starting with ARMv8.
12533 case Match_rGPR:
12534 return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12535 : "operand must be a register in range [r0, r12] or r14";
12536 // DPR contains 16 registers for some FPUs, and 32 for others.
12537 case Match_DPR:
12538 return hasD32() ? "operand must be a register in range [d0, d31]"
12539 : "operand must be a register in range [d0, d15]";
12540 case Match_DPR_RegList:
12541 return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12542 : "operand must be a list of registers in range [d0, d15]";
12543
12544 // For all other diags, use the static string from tablegen.
12545 default:
12546 return getMatchKindDiag(MatchError);
12547 }
12548}
12549
12550// Process the list of near-misses, throwing away ones we don't want to report
12551// to the user, and converting the rest to a source location and string that
12552// should be reported.
12553void
12554ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12555 SmallVectorImpl<NearMissMessage> &NearMissesOut,
12556 SMLoc IDLoc, OperandVector &Operands) {
12557 // TODO: If operand didn't match, sub in a dummy one and run target
12558 // predicate, so that we can avoid reporting near-misses that are invalid?
12559 // TODO: Many operand types dont have SuperClasses set, so we report
12560 // redundant ones.
12561 // TODO: Some operands are superclasses of registers (e.g.
12562 // MCK_RegShiftedImm), we don't have any way to represent that currently.
12563 // TODO: This is not all ARM-specific, can some of it be factored out?
12564
12565 // Record some information about near-misses that we have already seen, so
12566 // that we can avoid reporting redundant ones. For example, if there are
12567 // variants of an instruction that take 8- and 16-bit immediates, we want
12568 // to only report the widest one.
12569 std::multimap<unsigned, unsigned> OperandMissesSeen;
12570 SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12571 bool ReportedTooFewOperands = false;
12572
12573 // Process the near-misses in reverse order, so that we see more general ones
12574 // first, and so can avoid emitting more specific ones.
12575 for (NearMissInfo &I : reverse(NearMissesIn)) {
12576 switch (I.getKind()) {
12577 case NearMissInfo::NearMissOperand: {
12578 SMLoc OperandLoc =
12579 ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12580 const char *OperandDiag =
12581 getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12582
12583 // If we have already emitted a message for a superclass, don't also report
12584 // the sub-class. We consider all operand classes that we don't have a
12585 // specialised diagnostic for to be equal for the propose of this check,
12586 // so that we don't report the generic error multiple times on the same
12587 // operand.
12588 unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12589 auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12590 if (std::any_of(PrevReports.first, PrevReports.second,
12591 [DupCheckMatchClass](
12592 const std::pair<unsigned, unsigned> Pair) {
12593 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12594 return Pair.second == DupCheckMatchClass;
12595 else
12596 return isSubclass((MatchClassKind)DupCheckMatchClass,
12597 (MatchClassKind)Pair.second);
12598 }))
12599 break;
12600 OperandMissesSeen.insert(
12601 std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12602
12603 NearMissMessage Message;
12604 Message.Loc = OperandLoc;
12605 if (OperandDiag) {
12606 Message.Message = OperandDiag;
12607 } else if (I.getOperandClass() == InvalidMatchClass) {
12608 Message.Message = "too many operands for instruction";
12609 } else {
12610 Message.Message = "invalid operand for instruction";
12611 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
12612 dbgs() << "Missing diagnostic string for operand class "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
12613 << getMatchClassName((MatchClassKind)I.getOperandClass())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
12614 << I.getOperandClass() << ", error " << I.getOperandError()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
12615 << ", opcode " << MII.getName(I.getOpcode()) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("asm-parser")) { dbgs() << "Missing diagnostic string for operand class "
<< getMatchClassName((MatchClassKind)I.getOperandClass
()) << I.getOperandClass() << ", error " <<
I.getOperandError() << ", opcode " << MII.getName
(I.getOpcode()) << "\n"; } } while (false)
;
12616 }
12617 NearMissesOut.emplace_back(Message);
12618 break;
12619 }
12620 case NearMissInfo::NearMissFeature: {
12621 const FeatureBitset &MissingFeatures = I.getFeatures();
12622 // Don't report the same set of features twice.
12623 if (FeatureMissesSeen.count(MissingFeatures))
12624 break;
12625 FeatureMissesSeen.insert(MissingFeatures);
12626
12627 // Special case: don't report a feature set which includes arm-mode for
12628 // targets that don't have ARM mode.
12629 if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12630 break;
12631 // Don't report any near-misses that both require switching instruction
12632 // set, and adding other subtarget features.
12633 if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12634 MissingFeatures.count() > 1)
12635 break;
12636 if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12637 MissingFeatures.count() > 1)
12638 break;
12639 if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12640 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12641 Feature_IsThumbBit})).any())
12642 break;
12643 if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12644 break;
12645
12646 NearMissMessage Message;
12647 Message.Loc = IDLoc;
12648 raw_svector_ostream OS(Message.Message);
12649
12650 OS << "instruction requires:";
12651 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12652 if (MissingFeatures.test(i))
12653 OS << ' ' << getSubtargetFeatureName(i);
12654
12655 NearMissesOut.emplace_back(Message);
12656
12657 break;
12658 }
12659 case NearMissInfo::NearMissPredicate: {
12660 NearMissMessage Message;
12661 Message.Loc = IDLoc;
12662 switch (I.getPredicateError()) {
12663 case Match_RequiresNotITBlock:
12664 Message.Message = "flag setting instruction only valid outside IT block";
12665 break;
12666 case Match_RequiresITBlock:
12667 Message.Message = "instruction only valid inside IT block";
12668 break;
12669 case Match_RequiresV6:
12670 Message.Message = "instruction variant requires ARMv6 or later";
12671 break;
12672 case Match_RequiresThumb2:
12673 Message.Message = "instruction variant requires Thumb2";
12674 break;
12675 case Match_RequiresV8:
12676 Message.Message = "instruction variant requires ARMv8 or later";
12677 break;
12678 case Match_RequiresFlagSetting:
12679 Message.Message = "no flag-preserving variant of this instruction available";
12680 break;
12681 case Match_InvalidOperand:
12682 Message.Message = "invalid operand for instruction";
12683 break;
12684 default:
12685 llvm_unreachable("Unhandled target predicate error")::llvm::llvm_unreachable_internal("Unhandled target predicate error"
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12685)
;
12686 break;
12687 }
12688 NearMissesOut.emplace_back(Message);
12689 break;
12690 }
12691 case NearMissInfo::NearMissTooFewOperands: {
12692 if (!ReportedTooFewOperands) {
12693 SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12694 NearMissesOut.emplace_back(NearMissMessage{
12695 EndLoc, StringRef("too few operands for instruction")});
12696 ReportedTooFewOperands = true;
12697 }
12698 break;
12699 }
12700 case NearMissInfo::NoNearMiss:
12701 // This should never leave the matcher.
12702 llvm_unreachable("not a near-miss")::llvm::llvm_unreachable_internal("not a near-miss", "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp"
, 12702)
;
12703 break;
12704 }
12705 }
12706}
12707
12708void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12709 SMLoc IDLoc, OperandVector &Operands) {
12710 SmallVector<NearMissMessage, 4> Messages;
12711 FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12712
12713 if (Messages.size() == 0) {
12714 // No near-misses were found, so the best we can do is "invalid
12715 // instruction".
12716 Error(IDLoc, "invalid instruction");
12717 } else if (Messages.size() == 1) {
12718 // One near miss was found, report it as the sole error.
12719 Error(Messages[0].Loc, Messages[0].Message);
12720 } else {
12721 // More than one near miss, so report a generic "invalid instruction"
12722 // error, followed by notes for each of the near-misses.
12723 Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12724 for (auto &M : Messages) {
12725 Note(M.Loc, M.Message);
12726 }
12727 }
12728}
12729
12730bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12731 // FIXME: This structure should be moved inside ARMTargetParser
12732 // when we start to table-generate them, and we can use the ARM
12733 // flags below, that were generated by table-gen.
12734 static const struct {
12735 const uint64_t Kind;
12736 const FeatureBitset ArchCheck;
12737 const FeatureBitset Features;
12738 } Extensions[] = {
12739 {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12740 {ARM::AEK_AES,
12741 {Feature_HasV8Bit},
12742 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12743 {ARM::AEK_SHA2,
12744 {Feature_HasV8Bit},
12745 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12746 {ARM::AEK_CRYPTO,
12747 {Feature_HasV8Bit},
12748 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12749 {ARM::AEK_FP,
12750 {Feature_HasV8Bit},
12751 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12752 {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12753 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12754 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12755 {ARM::AEK_MP,
12756 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12757 {ARM::FeatureMP}},
12758 {ARM::AEK_SIMD,
12759 {Feature_HasV8Bit},
12760 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12761 {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12762 // FIXME: Only available in A-class, isel not predicated
12763 {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12764 {ARM::AEK_FP16,
12765 {Feature_HasV8_2aBit},
12766 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12767 {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12768 {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12769 {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12770 // FIXME: Unsupported extensions.
12771 {ARM::AEK_OS, {}, {}},
12772 {ARM::AEK_IWMMXT, {}, {}},
12773 {ARM::AEK_IWMMXT2, {}, {}},
12774 {ARM::AEK_MAVERICK, {}, {}},
12775 {ARM::AEK_XSCALE, {}, {}},
12776 };
12777 bool EnableFeature = true;
12778 if (Name.startswith_insensitive("no")) {
12779 EnableFeature = false;
12780 Name = Name.substr(2);
12781 }
12782 uint64_t FeatureKind = ARM::parseArchExt(Name);
12783 if (FeatureKind == ARM::AEK_INVALID)
12784 return Error(ExtLoc, "unknown architectural extension: " + Name);
12785
12786 for (const auto &Extension : Extensions) {
12787 if (Extension.Kind != FeatureKind)
12788 continue;
12789
12790 if (Extension.Features.none())
12791 return Error(ExtLoc, "unsupported architectural extension: " + Name);
12792
12793 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12794 return Error(ExtLoc, "architectural extension '" + Name +
12795 "' is not "
12796 "allowed for the current base architecture");
12797
12798 MCSubtargetInfo &STI = copySTI();
12799 if (EnableFeature) {
12800 STI.SetFeatureBitsTransitively(Extension.Features);
12801 } else {
12802 STI.ClearFeatureBitsTransitively(Extension.Features);
12803 }
12804 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12805 setAvailableFeatures(Features);
12806 return true;
12807 }
12808 return false;
12809}
12810
12811/// parseDirectiveArchExtension
12812/// ::= .arch_extension [no]feature
12813bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12814
12815 MCAsmParser &Parser = getParser();
12816
12817 if (getLexer().isNot(AsmToken::Identifier))
12818 return Error(getLexer().getLoc(), "expected architecture extension name");
12819
12820 StringRef Name = Parser.getTok().getString();
12821 SMLoc ExtLoc = Parser.getTok().getLoc();
12822 Lex();
12823
12824 if (parseEOL())
12825 return true;
12826
12827 if (Name == "nocrypto") {
12828 enableArchExtFeature("nosha2", ExtLoc);
12829 enableArchExtFeature("noaes", ExtLoc);
12830 }
12831
12832 if (enableArchExtFeature(Name, ExtLoc))
12833 return false;
12834
12835 return Error(ExtLoc, "unknown architectural extension: " + Name);
12836}
12837
12838// Define this matcher function after the auto-generated include so we
12839// have the match class enum definitions.
12840unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12841 unsigned Kind) {
12842 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12843 // If the kind is a token for a literal immediate, check if our asm
12844 // operand matches. This is for InstAliases which have a fixed-value
12845 // immediate in the syntax.
12846 switch (Kind) {
12847 default: break;
12848 case MCK__HASH_0:
12849 if (Op.isImm())
12850 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12851 if (CE->getValue() == 0)
12852 return Match_Success;
12853 break;
12854 case MCK__HASH_8:
12855 if (Op.isImm())
12856 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12857 if (CE->getValue() == 8)
12858 return Match_Success;
12859 break;
12860 case MCK__HASH_16:
12861 if (Op.isImm())
12862 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12863 if (CE->getValue() == 16)
12864 return Match_Success;
12865 break;
12866 case MCK_ModImm:
12867 if (Op.isImm()) {
12868 const MCExpr *SOExpr = Op.getImm();
12869 int64_t Value;
12870 if (!SOExpr->evaluateAsAbsolute(Value))
12871 return Match_Success;
12872 assert((Value >= std::numeric_limits<int32_t>::min() &&(static_cast <bool> ((Value >= std::numeric_limits<
int32_t>::min() && Value <= std::numeric_limits
<uint32_t>::max()) && "expression value must be representable in 32 bits"
) ? void (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12874, __extension__
__PRETTY_FUNCTION__))
12873 Value <= std::numeric_limits<uint32_t>::max()) &&(static_cast <bool> ((Value >= std::numeric_limits<
int32_t>::min() && Value <= std::numeric_limits
<uint32_t>::max()) && "expression value must be representable in 32 bits"
) ? void (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12874, __extension__
__PRETTY_FUNCTION__))
12874 "expression value must be representable in 32 bits")(static_cast <bool> ((Value >= std::numeric_limits<
int32_t>::min() && Value <= std::numeric_limits
<uint32_t>::max()) && "expression value must be representable in 32 bits"
) ? void (0) : __assert_fail ("(Value >= std::numeric_limits<int32_t>::min() && Value <= std::numeric_limits<uint32_t>::max()) && \"expression value must be representable in 32 bits\""
, "llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp", 12874, __extension__
__PRETTY_FUNCTION__))
;
12875 }
12876 break;
12877 case MCK_rGPR:
12878 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
12879 return Match_Success;
12880 return Match_rGPR;
12881 case MCK_GPRPair:
12882 if (Op.isReg() &&
12883 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
12884 return Match_Success;
12885 break;
12886 }
12887 return Match_InvalidOperand;
12888}
12889
12890bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
12891 StringRef ExtraToken) {
12892 if (!hasMVE())
12893 return false;
12894
12895 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
12896 (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
12897 (Mnemonic.startswith("vmov") &&
12898 !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
12899 ExtraToken == ".8")) ||
12900 (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
12901 (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi"))
12902 return true;
12903
12904 const char *predicable_prefixes[] = {
12905 "vabav", "vabd", "vabs", "vadc", "vadd",
12906 "vaddlv", "vaddv", "vand", "vbic", "vbrsr",
12907 "vcadd", "vcls", "vclz", "vcmla", "vcmp",
12908 "vcmul", "vctp", "vcvt", "vddup", "vdup",
12909 "vdwdup", "veor", "vfma", "vfmas", "vfms",
12910 "vhadd", "vhcadd", "vhsub", "vidup", "viwdup",
12911 "vldrb", "vldrd", "vldrw", "vmax", "vmaxa",
12912 "vmaxav", "vmaxnm", "vmaxnma", "vmaxnmav", "vmaxnmv",
12913 "vmaxv", "vmin", "vminav", "vminnm", "vminnmav",
12914 "vminnmv", "vminv", "vmla", "vmladav", "vmlaldav",
12915 "vmlalv", "vmlas", "vmlav", "vmlsdav", "vmlsldav",
12916 "vmovlb", "vmovlt", "vmovnb", "vmovnt", "vmul",
12917 "vmvn", "vneg", "vorn", "vorr", "vpnot",
12918 "vpsel", "vqabs", "vqadd", "vqdmladh", "vqdmlah",
12919 "vqdmlash", "vqdmlsdh", "vqdmulh", "vqdmull", "vqmovn",
12920 "vqmovun", "vqneg", "vqrdmladh", "vqrdmlah", "vqrdmlash",
12921 "vqrdmlsdh", "vqrdmulh", "vqrshl", "vqrshrn", "vqrshrun",
12922 "vqshl", "vqshrn", "vqshrun", "vqsub", "vrev16",
12923 "vrev32", "vrev64", "vrhadd", "vrmlaldavh", "vrmlalvh",
12924 "vrmlsldavh", "vrmulh", "vrshl", "vrshr", "vrshrn",
12925 "vsbc", "vshl", "vshlc", "vshll", "vshr",
12926 "vshrn", "vsli", "vsri", "vstrb", "vstrd",
12927 "vstrw", "vsub"};
12928
12929 return std::any_of(
12930 std::begin(predicable_prefixes), std::end(predicable_prefixes),
12931 [&Mnemonic](const char *prefix) { return Mnemonic.startswith(prefix); });
12932}

/build/source/llvm/include/llvm/ADT/bit.h

1//===-- llvm/ADT/bit.h - C++20 <bit> ----------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the C++20 <bit> header.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_BIT_H
15#define LLVM_ADT_BIT_H
16
17#include "llvm/Support/Compiler.h"
18#include <cstdint>
19#include <limits>
20#include <type_traits>
21
22#if !__has_builtin(__builtin_bit_cast)1
23#include <cstring>
24#endif
25
26#if defined(_MSC_VER) && !defined(_DEBUG1)
27#include <cstdlib> // for _byteswap_{ushort,ulong,uint64}
28#endif
29
30#ifdef _MSC_VER
31// Declare these intrinsics manually rather including intrin.h. It's very
32// expensive, and bit.h is popular via MathExtras.h.
33// #include <intrin.h>
34extern "C" {
35unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
36unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
37unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
38unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
39}
40#endif
41
42namespace llvm {
43
44// This implementation of bit_cast is different from the C++20 one in two ways:
45// - It isn't constexpr because that requires compiler support.
46// - It requires trivially-constructible To, to avoid UB in the implementation.
47template <
48 typename To, typename From,
49 typename = std::enable_if_t<sizeof(To) == sizeof(From)>,
50 typename = std::enable_if_t<std::is_trivially_constructible<To>::value>,
51 typename = std::enable_if_t<std::is_trivially_copyable<To>::value>,
52 typename = std::enable_if_t<std::is_trivially_copyable<From>::value>>
53[[nodiscard]] inline To bit_cast(const From &from) noexcept {
54#if __has_builtin(__builtin_bit_cast)1
55 return __builtin_bit_cast(To, from);
56#else
57 To to;
58 std::memcpy(&to, &from, sizeof(To));
59 return to;
60#endif
61}
62
63/// Reverses the bytes in the given integer value V.
64template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
65[[nodiscard]] constexpr T byteswap(T V) noexcept {
66 if constexpr (sizeof(T) == 1) {
67 return V;
68 } else if constexpr (sizeof(T) == 2) {
69 uint16_t UV = V;
70#if defined(_MSC_VER) && !defined(_DEBUG1)
71 // The DLL version of the runtime lacks these functions (bug!?), but in a
72 // release build they're replaced with BSWAP instructions anyway.
73 return _byteswap_ushort(UV);
74#else
75 uint16_t Hi = UV << 8;
76 uint16_t Lo = UV >> 8;
77 return Hi | Lo;
78#endif
79 } else if constexpr (sizeof(T) == 4) {
80 uint32_t UV = V;
81#if __has_builtin(__builtin_bswap32)1
82 return __builtin_bswap32(UV);
83#elif defined(_MSC_VER) && !defined(_DEBUG1)
84 return _byteswap_ulong(UV);
85#else
86 uint32_t Byte0 = UV & 0x000000FF;
87 uint32_t Byte1 = UV & 0x0000FF00;
88 uint32_t Byte2 = UV & 0x00FF0000;
89 uint32_t Byte3 = UV & 0xFF000000;
90 return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
91#endif
92 } else if constexpr (sizeof(T) == 8) {
93 uint64_t UV = V;
94#if __has_builtin(__builtin_bswap64)1
95 return __builtin_bswap64(UV);
96#elif defined(_MSC_VER) && !defined(_DEBUG1)
97 return _byteswap_uint64(UV);
98#else
99 uint64_t Hi = llvm::byteswap<uint32_t>(UV);
100 uint32_t Lo = llvm::byteswap<uint32_t>(UV >> 32);
101 return (Hi << 32) | Lo;
102#endif
103 } else {
104 static_assert(!sizeof(T *), "Don't know how to handle the given type.");
105 return 0;
106 }
107}
108
109template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
110[[nodiscard]] constexpr inline bool has_single_bit(T Value) noexcept {
111 return (Value != 0) && ((Value & (Value - 1)) == 0);
112}
113
114namespace detail {
115template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
116 static unsigned count(T Val) {
117 if (!Val)
118 return std::numeric_limits<T>::digits;
119 if (Val & 0x1)
120 return 0;
121
122 // Bisection method.
123 unsigned ZeroBits = 0;
124 T Shift = std::numeric_limits<T>::digits >> 1;
125 T Mask = std::numeric_limits<T>::max() >> Shift;
126 while (Shift) {
127 if ((Val & Mask) == 0) {
128 Val >>= Shift;
129 ZeroBits |= Shift;
130 }
131 Shift >>= 1;
132 Mask >>= Shift;
133 }
134 return ZeroBits;
135 }
136};
137
138#if defined(__GNUC__4) || defined(_MSC_VER)
139template <typename T> struct TrailingZerosCounter<T, 4> {
140 static unsigned count(T Val) {
141 if (Val == 0)
12
Assuming 'Val' is equal to 0
13
Taking true branch
142 return 32;
14
Returning the value 32
143
144#if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4)
145 return __builtin_ctz(Val);
146#elif defined(_MSC_VER)
147 unsigned long Index;
148 _BitScanForward(&Index, Val);
149 return Index;
150#endif
151 }
152};
153
154#if !defined(_MSC_VER) || defined(_M_X64)
155template <typename T> struct TrailingZerosCounter<T, 8> {
156 static unsigned count(T Val) {
157 if (Val == 0)
158 return 64;
159
160#if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4)
161 return __builtin_ctzll(Val);
162#elif defined(_MSC_VER)
163 unsigned long Index;
164 _BitScanForward64(&Index, Val);
165 return Index;
166#endif
167 }
168};
169#endif
170#endif
171} // namespace detail
172
173/// Count number of 0's from the least significant bit to the most
174/// stopping at the first 1.
175///
176/// Only unsigned integral types are allowed.
177///
178/// Returns std::numeric_limits<T>::digits on an input of 0.
179template <typename T> [[nodiscard]] int countr_zero(T Val) {
180 static_assert(std::is_unsigned_v<T>,
181 "Only unsigned integral types are allowed.");
182 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val);
11
Calling 'TrailingZerosCounter::count'
15
Returning from 'TrailingZerosCounter::count'
16
Returning the value 32
183}
184
185namespace detail {
186template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
187 static unsigned count(T Val) {
188 if (!Val)
189 return std::numeric_limits<T>::digits;
190
191 // Bisection method.
192 unsigned ZeroBits = 0;
193 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
194 T Tmp = Val >> Shift;
195 if (Tmp)
196 Val = Tmp;
197 else
198 ZeroBits |= Shift;
199 }
200 return ZeroBits;
201 }
202};
203
204#if defined(__GNUC__4) || defined(_MSC_VER)
205template <typename T> struct LeadingZerosCounter<T, 4> {
206 static unsigned count(T Val) {
207 if (Val == 0)
208 return 32;
209
210#if __has_builtin(__builtin_clz)1 || defined(__GNUC__4)
211 return __builtin_clz(Val);
212#elif defined(_MSC_VER)
213 unsigned long Index;
214 _BitScanReverse(&Index, Val);
215 return Index ^ 31;
216#endif
217 }
218};
219
220#if !defined(_MSC_VER) || defined(_M_X64)
221template <typename T> struct LeadingZerosCounter<T, 8> {
222 static unsigned count(T Val) {
223 if (Val == 0)
224 return 64;
225
226#if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4)
227 return __builtin_clzll(Val);
228#elif defined(_MSC_VER)
229 unsigned long Index;
230 _BitScanReverse64(&Index, Val);
231 return Index ^ 63;
232#endif
233 }
234};
235#endif
236#endif
237} // namespace detail
238
239/// Count number of 0's from the most significant bit to the least
240/// stopping at the first 1.
241///
242/// Only unsigned integral types are allowed.
243///
244/// Returns std::numeric_limits<T>::digits on an input of 0.
245template <typename T> [[nodiscard]] int countl_zero(T Val) {
246 static_assert(std::is_unsigned_v<T>,
247 "Only unsigned integral types are allowed.");
248 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val);
249}
250
251/// Count the number of ones from the most significant bit to the first
252/// zero bit.
253///
254/// Ex. countl_one(0xFF0FFF00) == 8.
255/// Only unsigned integral types are allowed.
256///
257/// Returns std::numeric_limits<T>::digits on an input of all ones.
258template <typename T> [[nodiscard]] int countl_one(T Value) {
259 static_assert(std::is_unsigned_v<T>,
260 "Only unsigned integral types are allowed.");
261 return llvm::countl_zero<T>(~Value);
262}
263
264/// Count the number of ones from the least significant bit to the first
265/// zero bit.
266///
267/// Ex. countr_one(0x00FF00FF) == 8.
268/// Only unsigned integral types are allowed.
269///
270/// Returns std::numeric_limits<T>::digits on an input of all ones.
271template <typename T> [[nodiscard]] int countr_one(T Value) {
272 static_assert(std::is_unsigned_v<T>,
273 "Only unsigned integral types are allowed.");
274 return llvm::countr_zero<T>(~Value);
275}
276
277/// Returns the number of bits needed to represent Value if Value is nonzero.
278/// Returns 0 otherwise.
279///
280/// Ex. bit_width(5) == 3.
281template <typename T> [[nodiscard]] int bit_width(T Value) {
282 static_assert(std::is_unsigned_v<T>,
283 "Only unsigned integral types are allowed.");
284 return std::numeric_limits<T>::digits - llvm::countl_zero(Value);
285}
286
287/// Returns the largest integral power of two no greater than Value if Value is
288/// nonzero. Returns 0 otherwise.
289///
290/// Ex. bit_floor(5) == 4.
291template <typename T> [[nodiscard]] T bit_floor(T Value) {
292 static_assert(std::is_unsigned_v<T>,
293 "Only unsigned integral types are allowed.");
294 if (!Value)
295 return 0;
296 return T(1) << (llvm::bit_width(Value) - 1);
297}
298
299/// Returns the smallest integral power of two no smaller than Value if Value is
300/// nonzero. Returns 1 otherwise.
301///
302/// Ex. bit_ceil(5) == 8.
303///
304/// The return value is undefined if the input is larger than the largest power
305/// of two representable in T.
306template <typename T> [[nodiscard]] T bit_ceil(T Value) {
307 static_assert(std::is_unsigned_v<T>,
308 "Only unsigned integral types are allowed.");
309 if (Value < 2)
310 return 1;
311 return T(1) << llvm::bit_width<T>(Value - 1u);
312}
313
314namespace detail {
315template <typename T, std::size_t SizeOfT> struct PopulationCounter {
316 static int count(T Value) {
317 // Generic version, forward to 32 bits.
318 static_assert(SizeOfT <= 4, "Not implemented!");
319#if defined(__GNUC__4)
320 return (int)__builtin_popcount(Value);
321#else
322 uint32_t v = Value;
323 v = v - ((v >> 1) & 0x55555555);
324 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
325 return int(((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24);
326#endif
327 }
328};
329
330template <typename T> struct PopulationCounter<T, 8> {
331 static int count(T Value) {
332#if defined(__GNUC__4)
333 return (int)__builtin_popcountll(Value);
334#else
335 uint64_t v = Value;
336 v = v - ((v >> 1) & 0x5555555555555555ULL);
337 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
338 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
339 return int((uint64_t)(v * 0x0101010101010101ULL) >> 56);
340#endif
341 }
342};
343} // namespace detail
344
345/// Count the number of set bits in a value.
346/// Ex. popcount(0xF000F000) = 8
347/// Returns 0 if the word is zero.
348template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
349[[nodiscard]] inline int popcount(T Value) noexcept {
350 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
351}
352
353// Forward-declare rotr so that rotl can use it.
354template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
355[[nodiscard]] constexpr T rotr(T V, int R);
356
357template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
358[[nodiscard]] constexpr T rotl(T V, int R) {
359 unsigned N = std::numeric_limits<T>::digits;
360
361 R = R % N;
362 if (!R)
363 return V;
364
365 if (R < 0)
366 return llvm::rotr(V, -R);
367
368 return (V << R) | (V >> (N - R));
369}
370
371template <typename T, typename> [[nodiscard]] constexpr T rotr(T V, int R) {
372 unsigned N = std::numeric_limits<T>::digits;
373
374 R = R % N;
375 if (!R)
376 return V;
377
378 if (R < 0)
379 return llvm::rotl(V, -R);
380
381 return (V >> R) | (V << (N - R));
382}
383
384} // namespace llvm
385
386#endif