Bug Summary

File:llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Warning:line 345, column 36
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ARMAsmParser.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM/AsmParser -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM/AsmParser -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/AsmParser -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Target/ARM/AsmParser -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp

1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMFeatures.h"
10#include "ARMBaseInstrInfo.h"
11#include "Utils/ARMBaseInfo.h"
12#include "MCTargetDesc/ARMAddressingModes.h"
13#include "MCTargetDesc/ARMBaseInfo.h"
14#include "MCTargetDesc/ARMInstPrinter.h"
15#include "MCTargetDesc/ARMMCExpr.h"
16#include "MCTargetDesc/ARMMCTargetDesc.h"
17#include "TargetInfo/ARMTargetInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringSet.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/StringSwitch.h"
28#include "llvm/ADT/Triple.h"
29#include "llvm/ADT/Twine.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
33#include "llvm/MC/MCInstrDesc.h"
34#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
37#include "llvm/MC/MCParser/MCAsmParserExtension.h"
38#include "llvm/MC/MCParser/MCAsmParserUtils.h"
39#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
40#include "llvm/MC/MCParser/MCTargetAsmParser.h"
41#include "llvm/MC/MCRegisterInfo.h"
42#include "llvm/MC/MCSection.h"
43#include "llvm/MC/MCStreamer.h"
44#include "llvm/MC/MCSubtargetInfo.h"
45#include "llvm/MC/MCSymbol.h"
46#include "llvm/MC/SubtargetFeature.h"
47#include "llvm/Support/ARMBuildAttributes.h"
48#include "llvm/Support/ARMEHABI.h"
49#include "llvm/Support/Casting.h"
50#include "llvm/Support/CommandLine.h"
51#include "llvm/Support/Compiler.h"
52#include "llvm/Support/ErrorHandling.h"
53#include "llvm/Support/MathExtras.h"
54#include "llvm/Support/SMLoc.h"
55#include "llvm/Support/TargetParser.h"
56#include "llvm/Support/TargetRegistry.h"
57#include "llvm/Support/raw_ostream.h"
58#include <algorithm>
59#include <cassert>
60#include <cstddef>
61#include <cstdint>
62#include <iterator>
63#include <limits>
64#include <memory>
65#include <string>
66#include <utility>
67#include <vector>
68
69#define DEBUG_TYPE"asm-parser" "asm-parser"
70
71using namespace llvm;
72
73namespace llvm {
74extern const MCInstrDesc ARMInsts[];
75} // end namespace llvm
76
77namespace {
78
79enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
80
81static cl::opt<ImplicitItModeTy> ImplicitItMode(
82 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
83 cl::desc("Allow conditional instructions outdside of an IT block"),
84 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
85 "Accept in both ISAs, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "always", int(ImplicitItModeTy::Always
), "Accept in both ISAs, emit implicit ITs in Thumb" }
,
86 clEnumValN(ImplicitItModeTy::Never, "never",llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
87 "Warn in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "never", int(ImplicitItModeTy::Never
), "Warn in ARM, reject in Thumb" }
,
88 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
89 "Accept in ARM, reject in Thumb")llvm::cl::OptionEnumValue { "arm", int(ImplicitItModeTy::ARMOnly
), "Accept in ARM, reject in Thumb" }
,
90 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
91 "Warn in ARM, emit implicit ITs in Thumb")llvm::cl::OptionEnumValue { "thumb", int(ImplicitItModeTy::ThumbOnly
), "Warn in ARM, emit implicit ITs in Thumb" }
));
92
93static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
94 cl::init(false));
95
96enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
97
98static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
99 // Position==0 means we're not in an IT block at all. Position==1
100 // means we want the first state bit, which is always 0 (Then).
101 // Position==2 means we want the second state bit, stored at bit 3
102 // of Mask, and so on downwards. So (5 - Position) will shift the
103 // right bit down to bit 0, including the always-0 bit at bit 4 for
104 // the mandatory initial Then.
105 return (Mask >> (5 - Position) & 1);
106}
107
108class UnwindContext {
109 using Locs = SmallVector<SMLoc, 4>;
110
111 MCAsmParser &Parser;
112 Locs FnStartLocs;
113 Locs CantUnwindLocs;
114 Locs PersonalityLocs;
115 Locs PersonalityIndexLocs;
116 Locs HandlerDataLocs;
117 int FPReg;
118
119public:
120 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
121
122 bool hasFnStart() const { return !FnStartLocs.empty(); }
123 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
124 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
125
126 bool hasPersonality() const {
127 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
128 }
129
130 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
131 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
132 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
133 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
134 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
135
136 void saveFPReg(int Reg) { FPReg = Reg; }
137 int getFPReg() const { return FPReg; }
138
139 void emitFnStartLocNotes() const {
140 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
141 FI != FE; ++FI)
142 Parser.Note(*FI, ".fnstart was specified here");
143 }
144
145 void emitCantUnwindLocNotes() const {
146 for (Locs::const_iterator UI = CantUnwindLocs.begin(),
147 UE = CantUnwindLocs.end(); UI != UE; ++UI)
148 Parser.Note(*UI, ".cantunwind was specified here");
149 }
150
151 void emitHandlerDataLocNotes() const {
152 for (Locs::const_iterator HI = HandlerDataLocs.begin(),
153 HE = HandlerDataLocs.end(); HI != HE; ++HI)
154 Parser.Note(*HI, ".handlerdata was specified here");
155 }
156
157 void emitPersonalityLocNotes() const {
158 for (Locs::const_iterator PI = PersonalityLocs.begin(),
159 PE = PersonalityLocs.end(),
160 PII = PersonalityIndexLocs.begin(),
161 PIE = PersonalityIndexLocs.end();
162 PI != PE || PII != PIE;) {
163 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
164 Parser.Note(*PI++, ".personality was specified here");
165 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
166 Parser.Note(*PII++, ".personalityindex was specified here");
167 else
168 llvm_unreachable(".personality and .personalityindex cannot be "__builtin_unreachable()
169 "at the same location")__builtin_unreachable();
170 }
171 }
172
173 void reset() {
174 FnStartLocs = Locs();
175 CantUnwindLocs = Locs();
176 PersonalityLocs = Locs();
177 HandlerDataLocs = Locs();
178 PersonalityIndexLocs = Locs();
179 FPReg = ARM::SP;
180 }
181};
182
183// Various sets of ARM instruction mnemonics which are used by the asm parser
184class ARMMnemonicSets {
185 StringSet<> CDE;
186 StringSet<> CDEWithVPTSuffix;
187public:
188 ARMMnemonicSets(const MCSubtargetInfo &STI);
189
190 /// Returns true iff a given mnemonic is a CDE instruction
191 bool isCDEInstr(StringRef Mnemonic) {
192 // Quick check before searching the set
193 if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
194 return false;
195 return CDE.count(Mnemonic);
196 }
197
198 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
199 /// (possibly with a predication suffix "e" or "t")
200 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
201 if (!Mnemonic.startswith("vcx"))
202 return false;
203 return CDEWithVPTSuffix.count(Mnemonic);
204 }
205
206 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
207 /// (possibly with a condition suffix)
208 bool isITPredicableCDEInstr(StringRef Mnemonic) {
209 if (!Mnemonic.startswith("cx"))
210 return false;
211 return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
212 Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
213 Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
214 }
215
216 /// Return true iff a given mnemonic is an integer CDE instruction with
217 /// dual-register destination
218 bool isCDEDualRegInstr(StringRef Mnemonic) {
219 if (!Mnemonic.startswith("cx"))
220 return false;
221 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
222 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
223 Mnemonic == "cx3d" || Mnemonic == "cx3da";
224 }
225};
226
227ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
228 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
229 "cx2", "cx2a", "cx2d", "cx2da",
230 "cx3", "cx3a", "cx3d", "cx3da", })
231 CDE.insert(Mnemonic);
232 for (StringRef Mnemonic :
233 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
234 CDE.insert(Mnemonic);
235 CDEWithVPTSuffix.insert(Mnemonic);
236 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
237 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
238 }
239}
240
241class ARMAsmParser : public MCTargetAsmParser {
242 const MCRegisterInfo *MRI;
243 UnwindContext UC;
244 ARMMnemonicSets MS;
245
246 ARMTargetStreamer &getTargetStreamer() {
247 assert(getParser().getStreamer().getTargetStreamer() &&(static_cast<void> (0))
248 "do not have a target streamer")(static_cast<void> (0));
249 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
250 return static_cast<ARMTargetStreamer &>(TS);
251 }
252
253 // Map of register aliases registers via the .req directive.
254 StringMap<unsigned> RegisterReqs;
255
256 bool NextSymbolIsThumb;
257
258 bool useImplicitITThumb() const {
259 return ImplicitItMode == ImplicitItModeTy::Always ||
260 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
261 }
262
263 bool useImplicitITARM() const {
264 return ImplicitItMode == ImplicitItModeTy::Always ||
265 ImplicitItMode == ImplicitItModeTy::ARMOnly;
266 }
267
268 struct {
269 ARMCC::CondCodes Cond; // Condition for IT block.
270 unsigned Mask:4; // Condition mask for instructions.
271 // Starting at first 1 (from lsb).
272 // '1' condition as indicated in IT.
273 // '0' inverse of condition (else).
274 // Count of instructions in IT block is
275 // 4 - trailingzeroes(mask)
276 // Note that this does not have the same encoding
277 // as in the IT instruction, which also depends
278 // on the low bit of the condition code.
279
280 unsigned CurPosition; // Current position in parsing of IT
281 // block. In range [0,4], with 0 being the IT
282 // instruction itself. Initialized according to
283 // count of instructions in block. ~0U if no
284 // active IT block.
285
286 bool IsExplicit; // true - The IT instruction was present in the
287 // input, we should not modify it.
288 // false - The IT instruction was added
289 // implicitly, we can extend it if that
290 // would be legal.
291 } ITState;
292
293 SmallVector<MCInst, 4> PendingConditionalInsts;
294
295 void flushPendingInstructions(MCStreamer &Out) override {
296 if (!inImplicitITBlock()) {
297 assert(PendingConditionalInsts.size() == 0)(static_cast<void> (0));
298 return;
299 }
300
301 // Emit the IT instruction
302 MCInst ITInst;
303 ITInst.setOpcode(ARM::t2IT);
304 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
305 ITInst.addOperand(MCOperand::createImm(ITState.Mask));
306 Out.emitInstruction(ITInst, getSTI());
307
308 // Emit the conditonal instructions
309 assert(PendingConditionalInsts.size() <= 4)(static_cast<void> (0));
310 for (const MCInst &Inst : PendingConditionalInsts) {
311 Out.emitInstruction(Inst, getSTI());
312 }
313 PendingConditionalInsts.clear();
314
315 // Clear the IT state
316 ITState.Mask = 0;
317 ITState.CurPosition = ~0U;
318 }
319
320 bool inITBlock() { return ITState.CurPosition != ~0U; }
321 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
322 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
323
324 bool lastInITBlock() {
325 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
326 }
327
328 void forwardITPosition() {
329 if (!inITBlock()) return;
330 // Move to the next instruction in the IT block, if there is one. If not,
331 // mark the block as done, except for implicit IT blocks, which we leave
332 // open until we find an instruction that can't be added to it.
333 unsigned TZ = countTrailingZeros(ITState.Mask);
334 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
335 ITState.CurPosition = ~0U; // Done with the IT block after this.
336 }
337
338 // Rewind the state of the current IT block, removing the last slot from it.
339 void rewindImplicitITPosition() {
340 assert(inImplicitITBlock())(static_cast<void> (0));
341 assert(ITState.CurPosition > 1)(static_cast<void> (0));
342 ITState.CurPosition--;
343 unsigned TZ = countTrailingZeros(ITState.Mask);
8
Calling 'countTrailingZeros<unsigned int>'
15
Returning from 'countTrailingZeros<unsigned int>'
16
'TZ' initialized to 32
344 unsigned NewMask = 0;
345 NewMask |= ITState.Mask & (0xC << TZ);
17
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'
346 NewMask |= 0x2 << TZ;
347 ITState.Mask = NewMask;
348 }
349
350 // Rewind the state of the current IT block, removing the last slot from it.
351 // If we were at the first slot, this closes the IT block.
352 void discardImplicitITBlock() {
353 assert(inImplicitITBlock())(static_cast<void> (0));
354 assert(ITState.CurPosition == 1)(static_cast<void> (0));
355 ITState.CurPosition = ~0U;
356 }
357
358 // Return the low-subreg of a given Q register.
359 unsigned getDRegFromQReg(unsigned QReg) const {
360 return MRI->getSubReg(QReg, ARM::dsub_0);
361 }
362
363 // Get the condition code corresponding to the current IT block slot.
364 ARMCC::CondCodes currentITCond() {
365 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
366 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
367 }
368
369 // Invert the condition of the current IT block slot without changing any
370 // other slots in the same block.
371 void invertCurrentITCondition() {
372 if (ITState.CurPosition == 1) {
373 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
374 } else {
375 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
376 }
377 }
378
379 // Returns true if the current IT block is full (all 4 slots used).
380 bool isITBlockFull() {
381 return inITBlock() && (ITState.Mask & 1);
382 }
383
384 // Extend the current implicit IT block to have one more slot with the given
385 // condition code.
386 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
387 assert(inImplicitITBlock())(static_cast<void> (0));
388 assert(!isITBlockFull())(static_cast<void> (0));
389 assert(Cond == ITState.Cond ||(static_cast<void> (0))
390 Cond == ARMCC::getOppositeCondition(ITState.Cond))(static_cast<void> (0));
391 unsigned TZ = countTrailingZeros(ITState.Mask);
392 unsigned NewMask = 0;
393 // Keep any existing condition bits.
394 NewMask |= ITState.Mask & (0xE << TZ);
395 // Insert the new condition bit.
396 NewMask |= (Cond != ITState.Cond) << TZ;
397 // Move the trailing 1 down one bit.
398 NewMask |= 1 << (TZ - 1);
399 ITState.Mask = NewMask;
400 }
401
402 // Create a new implicit IT block with a dummy condition code.
403 void startImplicitITBlock() {
404 assert(!inITBlock())(static_cast<void> (0));
405 ITState.Cond = ARMCC::AL;
406 ITState.Mask = 8;
407 ITState.CurPosition = 1;
408 ITState.IsExplicit = false;
409 }
410
411 // Create a new explicit IT block with the given condition and mask.
412 // The mask should be in the format used in ARMOperand and
413 // MCOperand, with a 1 implying 'e', regardless of the low bit of
414 // the condition.
415 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
416 assert(!inITBlock())(static_cast<void> (0));
417 ITState.Cond = Cond;
418 ITState.Mask = Mask;
419 ITState.CurPosition = 0;
420 ITState.IsExplicit = true;
421 }
422
423 struct {
424 unsigned Mask : 4;
425 unsigned CurPosition;
426 } VPTState;
427 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
428 void forwardVPTPosition() {
429 if (!inVPTBlock()) return;
430 unsigned TZ = countTrailingZeros(VPTState.Mask);
431 if (++VPTState.CurPosition == 5 - TZ)
432 VPTState.CurPosition = ~0U;
433 }
434
435 void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
436 return getParser().Note(L, Msg, Range);
437 }
438
439 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
440 return getParser().Warning(L, Msg, Range);
441 }
442
443 bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
444 return getParser().Error(L, Msg, Range);
445 }
446
447 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
448 unsigned ListNo, bool IsARPop = false);
449 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
450 unsigned ListNo);
451
452 int tryParseRegister();
453 bool tryParseRegisterWithWriteBack(OperandVector &);
454 int tryParseShiftRegister(OperandVector &);
455 bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
456 bool parseMemory(OperandVector &);
457 bool parseOperand(OperandVector &, StringRef Mnemonic);
458 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
459 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
460 unsigned &ShiftAmount);
461 bool parseLiteralValues(unsigned Size, SMLoc L);
462 bool parseDirectiveThumb(SMLoc L);
463 bool parseDirectiveARM(SMLoc L);
464 bool parseDirectiveThumbFunc(SMLoc L);
465 bool parseDirectiveCode(SMLoc L);
466 bool parseDirectiveSyntax(SMLoc L);
467 bool parseDirectiveReq(StringRef Name, SMLoc L);
468 bool parseDirectiveUnreq(SMLoc L);
469 bool parseDirectiveArch(SMLoc L);
470 bool parseDirectiveEabiAttr(SMLoc L);
471 bool parseDirectiveCPU(SMLoc L);
472 bool parseDirectiveFPU(SMLoc L);
473 bool parseDirectiveFnStart(SMLoc L);
474 bool parseDirectiveFnEnd(SMLoc L);
475 bool parseDirectiveCantUnwind(SMLoc L);
476 bool parseDirectivePersonality(SMLoc L);
477 bool parseDirectiveHandlerData(SMLoc L);
478 bool parseDirectiveSetFP(SMLoc L);
479 bool parseDirectivePad(SMLoc L);
480 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
481 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
482 bool parseDirectiveLtorg(SMLoc L);
483 bool parseDirectiveEven(SMLoc L);
484 bool parseDirectivePersonalityIndex(SMLoc L);
485 bool parseDirectiveUnwindRaw(SMLoc L);
486 bool parseDirectiveTLSDescSeq(SMLoc L);
487 bool parseDirectiveMovSP(SMLoc L);
488 bool parseDirectiveObjectArch(SMLoc L);
489 bool parseDirectiveArchExtension(SMLoc L);
490 bool parseDirectiveAlign(SMLoc L);
491 bool parseDirectiveThumbSet(SMLoc L);
492
493 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
494 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
495 unsigned &PredicationCode,
496 unsigned &VPTPredicationCode, bool &CarrySetting,
497 unsigned &ProcessorIMod, StringRef &ITMask);
498 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
499 StringRef FullInst, bool &CanAcceptCarrySet,
500 bool &CanAcceptPredicationCode,
501 bool &CanAcceptVPTPredicationCode);
502 bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
503
504 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
505 OperandVector &Operands);
506 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
507
508 bool isThumb() const {
509 // FIXME: Can tablegen auto-generate this?
510 return getSTI().getFeatureBits()[ARM::ModeThumb];
511 }
512
513 bool isThumbOne() const {
514 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
515 }
516
517 bool isThumbTwo() const {
518 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
519 }
520
521 bool hasThumb() const {
522 return getSTI().getFeatureBits()[ARM::HasV4TOps];
523 }
524
525 bool hasThumb2() const {
526 return getSTI().getFeatureBits()[ARM::FeatureThumb2];
527 }
528
529 bool hasV6Ops() const {
530 return getSTI().getFeatureBits()[ARM::HasV6Ops];
531 }
532
533 bool hasV6T2Ops() const {
534 return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
535 }
536
537 bool hasV6MOps() const {
538 return getSTI().getFeatureBits()[ARM::HasV6MOps];
539 }
540
541 bool hasV7Ops() const {
542 return getSTI().getFeatureBits()[ARM::HasV7Ops];
543 }
544
545 bool hasV8Ops() const {
546 return getSTI().getFeatureBits()[ARM::HasV8Ops];
547 }
548
549 bool hasV8MBaseline() const {
550 return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
551 }
552
553 bool hasV8MMainline() const {
554 return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
555 }
556 bool hasV8_1MMainline() const {
557 return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
558 }
559 bool hasMVE() const {
560 return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
561 }
562 bool hasMVEFloat() const {
563 return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
564 }
565 bool hasCDE() const {
566 return getSTI().getFeatureBits()[ARM::HasCDEOps];
567 }
568 bool has8MSecExt() const {
569 return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
570 }
571
572 bool hasARM() const {
573 return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
574 }
575
576 bool hasDSP() const {
577 return getSTI().getFeatureBits()[ARM::FeatureDSP];
578 }
579
580 bool hasD32() const {
581 return getSTI().getFeatureBits()[ARM::FeatureD32];
582 }
583
584 bool hasV8_1aOps() const {
585 return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
586 }
587
588 bool hasRAS() const {
589 return getSTI().getFeatureBits()[ARM::FeatureRAS];
590 }
591
592 void SwitchMode() {
593 MCSubtargetInfo &STI = copySTI();
594 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
595 setAvailableFeatures(FB);
596 }
597
598 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
599
600 bool isMClass() const {
601 return getSTI().getFeatureBits()[ARM::FeatureMClass];
602 }
603
604 /// @name Auto-generated Match Functions
605 /// {
606
607#define GET_ASSEMBLER_HEADER
608#include "ARMGenAsmMatcher.inc"
609
610 /// }
611
612 OperandMatchResultTy parseITCondCode(OperandVector &);
613 OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
614 OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
615 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
616 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
617 OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
618 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
619 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
620 OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
621 OperandMatchResultTy parseBankedRegOperand(OperandVector &);
622 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
623 int High);
624 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
625 return parsePKHImm(O, "lsl", 0, 31);
626 }
627 OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
628 return parsePKHImm(O, "asr", 1, 32);
629 }
630 OperandMatchResultTy parseSetEndImm(OperandVector &);
631 OperandMatchResultTy parseShifterImm(OperandVector &);
632 OperandMatchResultTy parseRotImm(OperandVector &);
633 OperandMatchResultTy parseModImm(OperandVector &);
634 OperandMatchResultTy parseBitfield(OperandVector &);
635 OperandMatchResultTy parsePostIdxReg(OperandVector &);
636 OperandMatchResultTy parseAM3Offset(OperandVector &);
637 OperandMatchResultTy parseFPImm(OperandVector &);
638 OperandMatchResultTy parseVectorList(OperandVector &);
639 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
640 SMLoc &EndLoc);
641
642 // Asm Match Converter Methods
643 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
644 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
645 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
646
647 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
648 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
649 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
650 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
651 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
652 bool isITBlockTerminator(MCInst &Inst) const;
653 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
654 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
655 bool Load, bool ARMMode, bool Writeback);
656
657public:
658 enum ARMMatchResultTy {
659 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
660 Match_RequiresNotITBlock,
661 Match_RequiresV6,
662 Match_RequiresThumb2,
663 Match_RequiresV8,
664 Match_RequiresFlagSetting,
665#define GET_OPERAND_DIAGNOSTIC_TYPES
666#include "ARMGenAsmMatcher.inc"
667
668 };
669
670 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
671 const MCInstrInfo &MII, const MCTargetOptions &Options)
672 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
673 MCAsmParserExtension::Initialize(Parser);
674
675 // Cache the MCRegisterInfo.
676 MRI = getContext().getRegisterInfo();
677
678 // Initialize the set of available features.
679 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
680
681 // Add build attributes based on the selected target.
682 if (AddBuildAttributes)
683 getTargetStreamer().emitTargetAttributes(STI);
684
685 // Not in an ITBlock to start with.
686 ITState.CurPosition = ~0U;
687
688 VPTState.CurPosition = ~0U;
689
690 NextSymbolIsThumb = false;
691 }
692
693 // Implementation of the MCTargetAsmParser interface:
694 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
695 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
696 SMLoc &EndLoc) override;
697 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
698 SMLoc NameLoc, OperandVector &Operands) override;
699 bool ParseDirective(AsmToken DirectiveID) override;
700
701 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
702 unsigned Kind) override;
703 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
704
705 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
706 OperandVector &Operands, MCStreamer &Out,
707 uint64_t &ErrorInfo,
708 bool MatchingInlineAsm) override;
709 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
710 SmallVectorImpl<NearMissInfo> &NearMisses,
711 bool MatchingInlineAsm, bool &EmitInITBlock,
712 MCStreamer &Out);
713
714 struct NearMissMessage {
715 SMLoc Loc;
716 SmallString<128> Message;
717 };
718
719 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
720
721 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
722 SmallVectorImpl<NearMissMessage> &NearMissesOut,
723 SMLoc IDLoc, OperandVector &Operands);
724 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
725 OperandVector &Operands);
726
727 void doBeforeLabelEmit(MCSymbol *Symbol) override;
728
729 void onLabelParsed(MCSymbol *Symbol) override;
730};
731
732/// ARMOperand - Instances of this class represent a parsed ARM machine
733/// operand.
734class ARMOperand : public MCParsedAsmOperand {
735 enum KindTy {
736 k_CondCode,
737 k_VPTPred,
738 k_CCOut,
739 k_ITCondMask,
740 k_CoprocNum,
741 k_CoprocReg,
742 k_CoprocOption,
743 k_Immediate,
744 k_MemBarrierOpt,
745 k_InstSyncBarrierOpt,
746 k_TraceSyncBarrierOpt,
747 k_Memory,
748 k_PostIndexRegister,
749 k_MSRMask,
750 k_BankedReg,
751 k_ProcIFlags,
752 k_VectorIndex,
753 k_Register,
754 k_RegisterList,
755 k_RegisterListWithAPSR,
756 k_DPRRegisterList,
757 k_SPRRegisterList,
758 k_FPSRegisterListWithVPR,
759 k_FPDRegisterListWithVPR,
760 k_VectorList,
761 k_VectorListAllLanes,
762 k_VectorListIndexed,
763 k_ShiftedRegister,
764 k_ShiftedImmediate,
765 k_ShifterImmediate,
766 k_RotateImmediate,
767 k_ModifiedImmediate,
768 k_ConstantPoolImmediate,
769 k_BitfieldDescriptor,
770 k_Token,
771 } Kind;
772
773 SMLoc StartLoc, EndLoc, AlignmentLoc;
774 SmallVector<unsigned, 8> Registers;
775
776 struct CCOp {
777 ARMCC::CondCodes Val;
778 };
779
780 struct VCCOp {
781 ARMVCC::VPTCodes Val;
782 };
783
784 struct CopOp {
785 unsigned Val;
786 };
787
788 struct CoprocOptionOp {
789 unsigned Val;
790 };
791
792 struct ITMaskOp {
793 unsigned Mask:4;
794 };
795
796 struct MBOptOp {
797 ARM_MB::MemBOpt Val;
798 };
799
800 struct ISBOptOp {
801 ARM_ISB::InstSyncBOpt Val;
802 };
803
804 struct TSBOptOp {
805 ARM_TSB::TraceSyncBOpt Val;
806 };
807
808 struct IFlagsOp {
809 ARM_PROC::IFlags Val;
810 };
811
812 struct MMaskOp {
813 unsigned Val;
814 };
815
816 struct BankedRegOp {
817 unsigned Val;
818 };
819
820 struct TokOp {
821 const char *Data;
822 unsigned Length;
823 };
824
825 struct RegOp {
826 unsigned RegNum;
827 };
828
829 // A vector register list is a sequential list of 1 to 4 registers.
830 struct VectorListOp {
831 unsigned RegNum;
832 unsigned Count;
833 unsigned LaneIndex;
834 bool isDoubleSpaced;
835 };
836
837 struct VectorIndexOp {
838 unsigned Val;
839 };
840
841 struct ImmOp {
842 const MCExpr *Val;
843 };
844
845 /// Combined record for all forms of ARM address expressions.
846 struct MemoryOp {
847 unsigned BaseRegNum;
848 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
849 // was specified.
850 const MCExpr *OffsetImm; // Offset immediate value
851 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
852 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
853 unsigned ShiftImm; // shift for OffsetReg.
854 unsigned Alignment; // 0 = no alignment specified
855 // n = alignment in bytes (2, 4, 8, 16, or 32)
856 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
857 };
858
859 struct PostIdxRegOp {
860 unsigned RegNum;
861 bool isAdd;
862 ARM_AM::ShiftOpc ShiftTy;
863 unsigned ShiftImm;
864 };
865
866 struct ShifterImmOp {
867 bool isASR;
868 unsigned Imm;
869 };
870
871 struct RegShiftedRegOp {
872 ARM_AM::ShiftOpc ShiftTy;
873 unsigned SrcReg;
874 unsigned ShiftReg;
875 unsigned ShiftImm;
876 };
877
878 struct RegShiftedImmOp {
879 ARM_AM::ShiftOpc ShiftTy;
880 unsigned SrcReg;
881 unsigned ShiftImm;
882 };
883
884 struct RotImmOp {
885 unsigned Imm;
886 };
887
888 struct ModImmOp {
889 unsigned Bits;
890 unsigned Rot;
891 };
892
893 struct BitfieldOp {
894 unsigned LSB;
895 unsigned Width;
896 };
897
898 union {
899 struct CCOp CC;
900 struct VCCOp VCC;
901 struct CopOp Cop;
902 struct CoprocOptionOp CoprocOption;
903 struct MBOptOp MBOpt;
904 struct ISBOptOp ISBOpt;
905 struct TSBOptOp TSBOpt;
906 struct ITMaskOp ITMask;
907 struct IFlagsOp IFlags;
908 struct MMaskOp MMask;
909 struct BankedRegOp BankedReg;
910 struct TokOp Tok;
911 struct RegOp Reg;
912 struct VectorListOp VectorList;
913 struct VectorIndexOp VectorIndex;
914 struct ImmOp Imm;
915 struct MemoryOp Memory;
916 struct PostIdxRegOp PostIdxReg;
917 struct ShifterImmOp ShifterImm;
918 struct RegShiftedRegOp RegShiftedReg;
919 struct RegShiftedImmOp RegShiftedImm;
920 struct RotImmOp RotImm;
921 struct ModImmOp ModImm;
922 struct BitfieldOp Bitfield;
923 };
924
925public:
926 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
927
928 /// getStartLoc - Get the location of the first token of this operand.
929 SMLoc getStartLoc() const override { return StartLoc; }
930
931 /// getEndLoc - Get the location of the last token of this operand.
932 SMLoc getEndLoc() const override { return EndLoc; }
933
934 /// getLocRange - Get the range between the first and last token of this
935 /// operand.
936 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
937
938 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
939 SMLoc getAlignmentLoc() const {
940 assert(Kind == k_Memory && "Invalid access!")(static_cast<void> (0));
941 return AlignmentLoc;
942 }
943
944 ARMCC::CondCodes getCondCode() const {
945 assert(Kind == k_CondCode && "Invalid access!")(static_cast<void> (0));
946 return CC.Val;
947 }
948
949 ARMVCC::VPTCodes getVPTPred() const {
950 assert(isVPTPred() && "Invalid access!")(static_cast<void> (0));
951 return VCC.Val;
952 }
953
954 unsigned getCoproc() const {
955 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!")(static_cast<void> (0));
956 return Cop.Val;
957 }
958
959 StringRef getToken() const {
960 assert(Kind == k_Token && "Invalid access!")(static_cast<void> (0));
961 return StringRef(Tok.Data, Tok.Length);
962 }
963
964 unsigned getReg() const override {
965 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!")(static_cast<void> (0));
966 return Reg.RegNum;
967 }
968
969 const SmallVectorImpl<unsigned> &getRegList() const {
970 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||(static_cast<void> (0))
971 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||(static_cast<void> (0))
972 Kind == k_FPSRegisterListWithVPR ||(static_cast<void> (0))
973 Kind == k_FPDRegisterListWithVPR) &&(static_cast<void> (0))
974 "Invalid access!")(static_cast<void> (0));
975 return Registers;
976 }
977
978 const MCExpr *getImm() const {
979 assert(isImm() && "Invalid access!")(static_cast<void> (0));
980 return Imm.Val;
981 }
982
983 const MCExpr *getConstantPoolImm() const {
984 assert(isConstantPoolImm() && "Invalid access!")(static_cast<void> (0));
985 return Imm.Val;
986 }
987
988 unsigned getVectorIndex() const {
989 assert(Kind == k_VectorIndex && "Invalid access!")(static_cast<void> (0));
990 return VectorIndex.Val;
991 }
992
993 ARM_MB::MemBOpt getMemBarrierOpt() const {
994 assert(Kind == k_MemBarrierOpt && "Invalid access!")(static_cast<void> (0));
995 return MBOpt.Val;
996 }
997
998 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
999 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!")(static_cast<void> (0));
1000 return ISBOpt.Val;
1001 }
1002
1003 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1004 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!")(static_cast<void> (0));
1005 return TSBOpt.Val;
1006 }
1007
1008 ARM_PROC::IFlags getProcIFlags() const {
1009 assert(Kind == k_ProcIFlags && "Invalid access!")(static_cast<void> (0));
1010 return IFlags.Val;
1011 }
1012
1013 unsigned getMSRMask() const {
1014 assert(Kind == k_MSRMask && "Invalid access!")(static_cast<void> (0));
1015 return MMask.Val;
1016 }
1017
1018 unsigned getBankedReg() const {
1019 assert(Kind == k_BankedReg && "Invalid access!")(static_cast<void> (0));
1020 return BankedReg.Val;
1021 }
1022
1023 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1024 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1025 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1026 bool isCondCode() const { return Kind == k_CondCode; }
1027 bool isVPTPred() const { return Kind == k_VPTPred; }
1028 bool isCCOut() const { return Kind == k_CCOut; }
1029 bool isITMask() const { return Kind == k_ITCondMask; }
1030 bool isITCondCode() const { return Kind == k_CondCode; }
1031 bool isImm() const override {
1032 return Kind == k_Immediate;
1033 }
1034
1035 bool isARMBranchTarget() const {
1036 if (!isImm()) return false;
1037
1038 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1039 return CE->getValue() % 4 == 0;
1040 return true;
1041 }
1042
1043
1044 bool isThumbBranchTarget() const {
1045 if (!isImm()) return false;
1046
1047 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1048 return CE->getValue() % 2 == 0;
1049 return true;
1050 }
1051
1052 // checks whether this operand is an unsigned offset which fits is a field
1053 // of specified width and scaled by a specific number of bits
1054 template<unsigned width, unsigned scale>
1055 bool isUnsignedOffset() const {
1056 if (!isImm()) return false;
1057 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1058 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1059 int64_t Val = CE->getValue();
1060 int64_t Align = 1LL << scale;
1061 int64_t Max = Align * ((1LL << width) - 1);
1062 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1063 }
1064 return false;
1065 }
1066
1067 // checks whether this operand is an signed offset which fits is a field
1068 // of specified width and scaled by a specific number of bits
1069 template<unsigned width, unsigned scale>
1070 bool isSignedOffset() const {
1071 if (!isImm()) return false;
1072 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1073 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1074 int64_t Val = CE->getValue();
1075 int64_t Align = 1LL << scale;
1076 int64_t Max = Align * ((1LL << (width-1)) - 1);
1077 int64_t Min = -Align * (1LL << (width-1));
1078 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1079 }
1080 return false;
1081 }
1082
1083 // checks whether this operand is an offset suitable for the LE /
1084 // LETP instructions in Arm v8.1M
1085 bool isLEOffset() const {
1086 if (!isImm()) return false;
1087 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1089 int64_t Val = CE->getValue();
1090 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1091 }
1092 return false;
1093 }
1094
1095 // checks whether this operand is a memory operand computed as an offset
1096 // applied to PC. the offset may have 8 bits of magnitude and is represented
1097 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1098 // relocable expression...
1099 bool isThumbMemPC() const {
1100 int64_t Val = 0;
1101 if (isImm()) {
1102 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1103 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1104 if (!CE) return false;
1105 Val = CE->getValue();
1106 }
1107 else if (isGPRMem()) {
1108 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1109 if(Memory.BaseRegNum != ARM::PC) return false;
1110 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1111 Val = CE->getValue();
1112 else
1113 return false;
1114 }
1115 else return false;
1116 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1117 }
1118
1119 bool isFPImm() const {
1120 if (!isImm()) return false;
1121 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1122 if (!CE) return false;
1123 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1124 return Val != -1;
1125 }
1126
1127 template<int64_t N, int64_t M>
1128 bool isImmediate() const {
1129 if (!isImm()) return false;
1130 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131 if (!CE) return false;
1132 int64_t Value = CE->getValue();
1133 return Value >= N && Value <= M;
1134 }
1135
1136 template<int64_t N, int64_t M>
1137 bool isImmediateS4() const {
1138 if (!isImm()) return false;
1139 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1140 if (!CE) return false;
1141 int64_t Value = CE->getValue();
1142 return ((Value & 3) == 0) && Value >= N && Value <= M;
1143 }
1144 template<int64_t N, int64_t M>
1145 bool isImmediateS2() const {
1146 if (!isImm()) return false;
1147 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1148 if (!CE) return false;
1149 int64_t Value = CE->getValue();
1150 return ((Value & 1) == 0) && Value >= N && Value <= M;
1151 }
1152 bool isFBits16() const {
1153 return isImmediate<0, 17>();
1154 }
1155 bool isFBits32() const {
1156 return isImmediate<1, 33>();
1157 }
1158 bool isImm8s4() const {
1159 return isImmediateS4<-1020, 1020>();
1160 }
1161 bool isImm7s4() const {
1162 return isImmediateS4<-508, 508>();
1163 }
1164 bool isImm7Shift0() const {
1165 return isImmediate<-127, 127>();
1166 }
1167 bool isImm7Shift1() const {
1168 return isImmediateS2<-255, 255>();
1169 }
1170 bool isImm7Shift2() const {
1171 return isImmediateS4<-511, 511>();
1172 }
1173 bool isImm7() const {
1174 return isImmediate<-127, 127>();
1175 }
1176 bool isImm0_1020s4() const {
1177 return isImmediateS4<0, 1020>();
1178 }
1179 bool isImm0_508s4() const {
1180 return isImmediateS4<0, 508>();
1181 }
1182 bool isImm0_508s4Neg() const {
1183 if (!isImm()) return false;
1184 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1185 if (!CE) return false;
1186 int64_t Value = -CE->getValue();
1187 // explicitly exclude zero. we want that to use the normal 0_508 version.
1188 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1189 }
1190
1191 bool isImm0_4095Neg() const {
1192 if (!isImm()) return false;
1193 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1194 if (!CE) return false;
1195 // isImm0_4095Neg is used with 32-bit immediates only.
1196 // 32-bit immediates are zero extended to 64-bit when parsed,
1197 // thus simple -CE->getValue() results in a big negative number,
1198 // not a small positive number as intended
1199 if ((CE->getValue() >> 32) > 0) return false;
1200 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1201 return Value > 0 && Value < 4096;
1202 }
1203
1204 bool isImm0_7() const {
1205 return isImmediate<0, 7>();
1206 }
1207
1208 bool isImm1_16() const {
1209 return isImmediate<1, 16>();
1210 }
1211
1212 bool isImm1_32() const {
1213 return isImmediate<1, 32>();
1214 }
1215
1216 bool isImm8_255() const {
1217 return isImmediate<8, 255>();
1218 }
1219
1220 bool isImm256_65535Expr() const {
1221 if (!isImm()) return false;
1222 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1223 // If it's not a constant expression, it'll generate a fixup and be
1224 // handled later.
1225 if (!CE) return true;
1226 int64_t Value = CE->getValue();
1227 return Value >= 256 && Value < 65536;
1228 }
1229
1230 bool isImm0_65535Expr() const {
1231 if (!isImm()) return false;
1232 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1233 // If it's not a constant expression, it'll generate a fixup and be
1234 // handled later.
1235 if (!CE) return true;
1236 int64_t Value = CE->getValue();
1237 return Value >= 0 && Value < 65536;
1238 }
1239
1240 bool isImm24bit() const {
1241 return isImmediate<0, 0xffffff + 1>();
1242 }
1243
1244 bool isImmThumbSR() const {
1245 return isImmediate<1, 33>();
1246 }
1247
1248 template<int shift>
1249 bool isExpImmValue(uint64_t Value) const {
1250 uint64_t mask = (1 << shift) - 1;
1251 if ((Value & mask) != 0 || (Value >> shift) > 0xff)
1252 return false;
1253 return true;
1254 }
1255
1256 template<int shift>
1257 bool isExpImm() const {
1258 if (!isImm()) return false;
1259 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1260 if (!CE) return false;
1261
1262 return isExpImmValue<shift>(CE->getValue());
1263 }
1264
1265 template<int shift, int size>
1266 bool isInvertedExpImm() const {
1267 if (!isImm()) return false;
1268 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1269 if (!CE) return false;
1270
1271 uint64_t OriginalValue = CE->getValue();
1272 uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
1273 return isExpImmValue<shift>(InvertedValue);
1274 }
1275
1276 bool isPKHLSLImm() const {
1277 return isImmediate<0, 32>();
1278 }
1279
1280 bool isPKHASRImm() const {
1281 return isImmediate<0, 33>();
1282 }
1283
1284 bool isAdrLabel() const {
1285 // If we have an immediate that's not a constant, treat it as a label
1286 // reference needing a fixup.
1287 if (isImm() && !isa<MCConstantExpr>(getImm()))
1288 return true;
1289
1290 // If it is a constant, it must fit into a modified immediate encoding.
1291 if (!isImm()) return false;
1292 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1293 if (!CE) return false;
1294 int64_t Value = CE->getValue();
1295 return (ARM_AM::getSOImmVal(Value) != -1 ||
1296 ARM_AM::getSOImmVal(-Value) != -1);
1297 }
1298
1299 bool isT2SOImm() const {
1300 // If we have an immediate that's not a constant, treat it as an expression
1301 // needing a fixup.
1302 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1303 // We want to avoid matching :upper16: and :lower16: as we want these
1304 // expressions to match in isImm0_65535Expr()
1305 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1306 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1307 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1308 }
1309 if (!isImm()) return false;
1310 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1311 if (!CE) return false;
1312 int64_t Value = CE->getValue();
1313 return ARM_AM::getT2SOImmVal(Value) != -1;
1314 }
1315
1316 bool isT2SOImmNot() const {
1317 if (!isImm()) return false;
1318 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319 if (!CE) return false;
1320 int64_t Value = CE->getValue();
1321 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1322 ARM_AM::getT2SOImmVal(~Value) != -1;
1323 }
1324
1325 bool isT2SOImmNeg() const {
1326 if (!isImm()) return false;
1327 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1328 if (!CE) return false;
1329 int64_t Value = CE->getValue();
1330 // Only use this when not representable as a plain so_imm.
1331 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1332 ARM_AM::getT2SOImmVal(-Value) != -1;
1333 }
1334
1335 bool isSetEndImm() const {
1336 if (!isImm()) return false;
1337 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1338 if (!CE) return false;
1339 int64_t Value = CE->getValue();
1340 return Value == 1 || Value == 0;
1341 }
1342
1343 bool isReg() const override { return Kind == k_Register; }
1344 bool isRegList() const { return Kind == k_RegisterList; }
1345 bool isRegListWithAPSR() const {
1346 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1347 }
1348 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1349 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1350 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1351 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1352 bool isToken() const override { return Kind == k_Token; }
1353 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1354 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1355 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1356 bool isMem() const override {
1357 return isGPRMem() || isMVEMem();
1358 }
1359 bool isMVEMem() const {
1360 if (Kind != k_Memory)
1361 return false;
1362 if (Memory.BaseRegNum &&
1363 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1364 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1365 return false;
1366 if (Memory.OffsetRegNum &&
1367 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1368 Memory.OffsetRegNum))
1369 return false;
1370 return true;
1371 }
1372 bool isGPRMem() const {
1373 if (Kind != k_Memory)
1374 return false;
1375 if (Memory.BaseRegNum &&
1376 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1377 return false;
1378 if (Memory.OffsetRegNum &&
1379 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1380 return false;
1381 return true;
1382 }
1383 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1384 bool isRegShiftedReg() const {
1385 return Kind == k_ShiftedRegister &&
1386 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1387 RegShiftedReg.SrcReg) &&
1388 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1389 RegShiftedReg.ShiftReg);
1390 }
1391 bool isRegShiftedImm() const {
1392 return Kind == k_ShiftedImmediate &&
1393 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1394 RegShiftedImm.SrcReg);
1395 }
1396 bool isRotImm() const { return Kind == k_RotateImmediate; }
1397
1398 template<unsigned Min, unsigned Max>
1399 bool isPowerTwoInRange() const {
1400 if (!isImm()) return false;
1401 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1402 if (!CE) return false;
1403 int64_t Value = CE->getValue();
1404 return Value > 0 && countPopulation((uint64_t)Value) == 1 &&
1405 Value >= Min && Value <= Max;
1406 }
1407 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1408
1409 bool isModImmNot() const {
1410 if (!isImm()) return false;
1411 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412 if (!CE) return false;
1413 int64_t Value = CE->getValue();
1414 return ARM_AM::getSOImmVal(~Value) != -1;
1415 }
1416
1417 bool isModImmNeg() const {
1418 if (!isImm()) return false;
1419 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1420 if (!CE) return false;
1421 int64_t Value = CE->getValue();
1422 return ARM_AM::getSOImmVal(Value) == -1 &&
1423 ARM_AM::getSOImmVal(-Value) != -1;
1424 }
1425
1426 bool isThumbModImmNeg1_7() const {
1427 if (!isImm()) return false;
1428 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1429 if (!CE) return false;
1430 int32_t Value = -(int32_t)CE->getValue();
1431 return 0 < Value && Value < 8;
1432 }
1433
1434 bool isThumbModImmNeg8_255() const {
1435 if (!isImm()) return false;
1436 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1437 if (!CE) return false;
1438 int32_t Value = -(int32_t)CE->getValue();
1439 return 7 < Value && Value < 256;
1440 }
1441
1442 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1443 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1444 bool isPostIdxRegShifted() const {
1445 return Kind == k_PostIndexRegister &&
1446 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1447 }
1448 bool isPostIdxReg() const {
1449 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1450 }
1451 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1452 if (!isGPRMem())
1453 return false;
1454 // No offset of any kind.
1455 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1456 (alignOK || Memory.Alignment == Alignment);
1457 }
1458 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1459 if (!isGPRMem())
1460 return false;
1461
1462 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1463 Memory.BaseRegNum))
1464 return false;
1465
1466 // No offset of any kind.
1467 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1468 (alignOK || Memory.Alignment == Alignment);
1469 }
1470 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1471 if (!isGPRMem())
1472 return false;
1473
1474 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1475 Memory.BaseRegNum))
1476 return false;
1477
1478 // No offset of any kind.
1479 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1480 (alignOK || Memory.Alignment == Alignment);
1481 }
1482 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1483 if (!isGPRMem())
1484 return false;
1485
1486 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1487 Memory.BaseRegNum))
1488 return false;
1489
1490 // No offset of any kind.
1491 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1492 (alignOK || Memory.Alignment == Alignment);
1493 }
1494 bool isMemPCRelImm12() const {
1495 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1496 return false;
1497 // Base register must be PC.
1498 if (Memory.BaseRegNum != ARM::PC)
1499 return false;
1500 // Immediate offset in range [-4095, 4095].
1501 if (!Memory.OffsetImm) return true;
1502 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1503 int64_t Val = CE->getValue();
1504 return (Val > -4096 && Val < 4096) ||
1505 (Val == std::numeric_limits<int32_t>::min());
1506 }
1507 return false;
1508 }
1509
1510 bool isAlignedMemory() const {
1511 return isMemNoOffset(true);
1512 }
1513
1514 bool isAlignedMemoryNone() const {
1515 return isMemNoOffset(false, 0);
1516 }
1517
1518 bool isDupAlignedMemoryNone() const {
1519 return isMemNoOffset(false, 0);
1520 }
1521
1522 bool isAlignedMemory16() const {
1523 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1524 return true;
1525 return isMemNoOffset(false, 0);
1526 }
1527
1528 bool isDupAlignedMemory16() const {
1529 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1530 return true;
1531 return isMemNoOffset(false, 0);
1532 }
1533
1534 bool isAlignedMemory32() const {
1535 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1536 return true;
1537 return isMemNoOffset(false, 0);
1538 }
1539
1540 bool isDupAlignedMemory32() const {
1541 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1542 return true;
1543 return isMemNoOffset(false, 0);
1544 }
1545
1546 bool isAlignedMemory64() const {
1547 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1548 return true;
1549 return isMemNoOffset(false, 0);
1550 }
1551
1552 bool isDupAlignedMemory64() const {
1553 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1554 return true;
1555 return isMemNoOffset(false, 0);
1556 }
1557
1558 bool isAlignedMemory64or128() const {
1559 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1560 return true;
1561 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1562 return true;
1563 return isMemNoOffset(false, 0);
1564 }
1565
1566 bool isDupAlignedMemory64or128() const {
1567 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1568 return true;
1569 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1570 return true;
1571 return isMemNoOffset(false, 0);
1572 }
1573
1574 bool isAlignedMemory64or128or256() const {
1575 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1576 return true;
1577 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1578 return true;
1579 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1580 return true;
1581 return isMemNoOffset(false, 0);
1582 }
1583
1584 bool isAddrMode2() const {
1585 if (!isGPRMem() || Memory.Alignment != 0) return false;
1586 // Check for register offset.
1587 if (Memory.OffsetRegNum) return true;
1588 // Immediate offset in range [-4095, 4095].
1589 if (!Memory.OffsetImm) return true;
1590 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1591 int64_t Val = CE->getValue();
1592 return Val > -4096 && Val < 4096;
1593 }
1594 return false;
1595 }
1596
1597 bool isAM2OffsetImm() const {
1598 if (!isImm()) return false;
1599 // Immediate offset in range [-4095, 4095].
1600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1601 if (!CE) return false;
1602 int64_t Val = CE->getValue();
1603 return (Val == std::numeric_limits<int32_t>::min()) ||
1604 (Val > -4096 && Val < 4096);
1605 }
1606
1607 bool isAddrMode3() const {
1608 // If we have an immediate that's not a constant, treat it as a label
1609 // reference needing a fixup. If it is a constant, it's something else
1610 // and we reject it.
1611 if (isImm() && !isa<MCConstantExpr>(getImm()))
1612 return true;
1613 if (!isGPRMem() || Memory.Alignment != 0) return false;
1614 // No shifts are legal for AM3.
1615 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1616 // Check for register offset.
1617 if (Memory.OffsetRegNum) return true;
1618 // Immediate offset in range [-255, 255].
1619 if (!Memory.OffsetImm) return true;
1620 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1621 int64_t Val = CE->getValue();
1622 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1623 // we have to check for this too.
1624 return (Val > -256 && Val < 256) ||
1625 Val == std::numeric_limits<int32_t>::min();
1626 }
1627 return false;
1628 }
1629
1630 bool isAM3Offset() const {
1631 if (isPostIdxReg())
1632 return true;
1633 if (!isImm())
1634 return false;
1635 // Immediate offset in range [-255, 255].
1636 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1637 if (!CE) return false;
1638 int64_t Val = CE->getValue();
1639 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1640 return (Val > -256 && Val < 256) ||
1641 Val == std::numeric_limits<int32_t>::min();
1642 }
1643
1644 bool isAddrMode5() const {
1645 // If we have an immediate that's not a constant, treat it as a label
1646 // reference needing a fixup. If it is a constant, it's something else
1647 // and we reject it.
1648 if (isImm() && !isa<MCConstantExpr>(getImm()))
1649 return true;
1650 if (!isGPRMem() || Memory.Alignment != 0) return false;
1651 // Check for register offset.
1652 if (Memory.OffsetRegNum) return false;
1653 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1654 if (!Memory.OffsetImm) return true;
1655 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1656 int64_t Val = CE->getValue();
1657 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1658 Val == std::numeric_limits<int32_t>::min();
1659 }
1660 return false;
1661 }
1662
1663 bool isAddrMode5FP16() const {
1664 // If we have an immediate that's not a constant, treat it as a label
1665 // reference needing a fixup. If it is a constant, it's something else
1666 // and we reject it.
1667 if (isImm() && !isa<MCConstantExpr>(getImm()))
1668 return true;
1669 if (!isGPRMem() || Memory.Alignment != 0) return false;
1670 // Check for register offset.
1671 if (Memory.OffsetRegNum) return false;
1672 // Immediate offset in range [-510, 510] and a multiple of 2.
1673 if (!Memory.OffsetImm) return true;
1674 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1675 int64_t Val = CE->getValue();
1676 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1677 Val == std::numeric_limits<int32_t>::min();
1678 }
1679 return false;
1680 }
1681
1682 bool isMemTBB() const {
1683 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1684 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1685 return false;
1686 return true;
1687 }
1688
1689 bool isMemTBH() const {
1690 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1691 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1692 Memory.Alignment != 0 )
1693 return false;
1694 return true;
1695 }
1696
1697 bool isMemRegOffset() const {
1698 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1699 return false;
1700 return true;
1701 }
1702
1703 bool isT2MemRegOffset() const {
1704 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1705 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1706 return false;
1707 // Only lsl #{0, 1, 2, 3} allowed.
1708 if (Memory.ShiftType == ARM_AM::no_shift)
1709 return true;
1710 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1711 return false;
1712 return true;
1713 }
1714
1715 bool isMemThumbRR() const {
1716 // Thumb reg+reg addressing is simple. Just two registers, a base and
1717 // an offset. No shifts, negations or any other complicating factors.
1718 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1719 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1720 return false;
1721 return isARMLowRegister(Memory.BaseRegNum) &&
1722 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1723 }
1724
1725 bool isMemThumbRIs4() const {
1726 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1727 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1728 return false;
1729 // Immediate offset, multiple of 4 in range [0, 124].
1730 if (!Memory.OffsetImm) return true;
1731 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1732 int64_t Val = CE->getValue();
1733 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1734 }
1735 return false;
1736 }
1737
1738 bool isMemThumbRIs2() const {
1739 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1740 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1741 return false;
1742 // Immediate offset, multiple of 4 in range [0, 62].
1743 if (!Memory.OffsetImm) return true;
1744 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1745 int64_t Val = CE->getValue();
1746 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1747 }
1748 return false;
1749 }
1750
1751 bool isMemThumbRIs1() const {
1752 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1753 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1754 return false;
1755 // Immediate offset in range [0, 31].
1756 if (!Memory.OffsetImm) return true;
1757 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1758 int64_t Val = CE->getValue();
1759 return Val >= 0 && Val <= 31;
1760 }
1761 return false;
1762 }
1763
1764 bool isMemThumbSPI() const {
1765 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1766 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1767 return false;
1768 // Immediate offset, multiple of 4 in range [0, 1020].
1769 if (!Memory.OffsetImm) return true;
1770 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1771 int64_t Val = CE->getValue();
1772 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1773 }
1774 return false;
1775 }
1776
1777 bool isMemImm8s4Offset() const {
1778 // If we have an immediate that's not a constant, treat it as a label
1779 // reference needing a fixup. If it is a constant, it's something else
1780 // and we reject it.
1781 if (isImm() && !isa<MCConstantExpr>(getImm()))
1782 return true;
1783 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1784 return false;
1785 // Immediate offset a multiple of 4 in range [-1020, 1020].
1786 if (!Memory.OffsetImm) return true;
1787 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1788 int64_t Val = CE->getValue();
1789 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1790 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1791 Val == std::numeric_limits<int32_t>::min();
1792 }
1793 return false;
1794 }
1795
1796 bool isMemImm7s4Offset() const {
1797 // If we have an immediate that's not a constant, treat it as a label
1798 // reference needing a fixup. If it is a constant, it's something else
1799 // and we reject it.
1800 if (isImm() && !isa<MCConstantExpr>(getImm()))
1801 return true;
1802 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1803 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1804 Memory.BaseRegNum))
1805 return false;
1806 // Immediate offset a multiple of 4 in range [-508, 508].
1807 if (!Memory.OffsetImm) return true;
1808 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1809 int64_t Val = CE->getValue();
1810 // Special case, #-0 is INT32_MIN.
1811 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN(-2147483647-1);
1812 }
1813 return false;
1814 }
1815
1816 bool isMemImm0_1020s4Offset() const {
1817 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1818 return false;
1819 // Immediate offset a multiple of 4 in range [0, 1020].
1820 if (!Memory.OffsetImm) return true;
1821 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1822 int64_t Val = CE->getValue();
1823 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1824 }
1825 return false;
1826 }
1827
1828 bool isMemImm8Offset() const {
1829 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1830 return false;
1831 // Base reg of PC isn't allowed for these encodings.
1832 if (Memory.BaseRegNum == ARM::PC) return false;
1833 // Immediate offset in range [-255, 255].
1834 if (!Memory.OffsetImm) return true;
1835 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1836 int64_t Val = CE->getValue();
1837 return (Val == std::numeric_limits<int32_t>::min()) ||
1838 (Val > -256 && Val < 256);
1839 }
1840 return false;
1841 }
1842
1843 template<unsigned Bits, unsigned RegClassID>
1844 bool isMemImm7ShiftedOffset() const {
1845 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1846 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1847 return false;
1848
1849 // Expect an immediate offset equal to an element of the range
1850 // [-127, 127], shifted left by Bits.
1851
1852 if (!Memory.OffsetImm) return true;
1853 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1854 int64_t Val = CE->getValue();
1855
1856 // INT32_MIN is a special-case value (indicating the encoding with
1857 // zero offset and the subtract bit set)
1858 if (Val == INT32_MIN(-2147483647-1))
1859 return true;
1860
1861 unsigned Divisor = 1U << Bits;
1862
1863 // Check that the low bits are zero
1864 if (Val % Divisor != 0)
1865 return false;
1866
1867 // Check that the remaining offset is within range.
1868 Val /= Divisor;
1869 return (Val >= -127 && Val <= 127);
1870 }
1871 return false;
1872 }
1873
1874 template <int shift> bool isMemRegRQOffset() const {
1875 if (!isMVEMem() || Memory.OffsetImm != 0 || Memory.Alignment != 0)
1876 return false;
1877
1878 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1879 Memory.BaseRegNum))
1880 return false;
1881 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1882 Memory.OffsetRegNum))
1883 return false;
1884
1885 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1886 return false;
1887
1888 if (shift > 0 &&
1889 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1890 return false;
1891
1892 return true;
1893 }
1894
1895 template <int shift> bool isMemRegQOffset() const {
1896 if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1897 return false;
1898
1899 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1900 Memory.BaseRegNum))
1901 return false;
1902
1903 if (!Memory.OffsetImm)
1904 return true;
1905 static_assert(shift < 56,
1906 "Such that we dont shift by a value higher than 62");
1907 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1908 int64_t Val = CE->getValue();
1909
1910 // The value must be a multiple of (1 << shift)
1911 if ((Val & ((1U << shift) - 1)) != 0)
1912 return false;
1913
1914 // And be in the right range, depending on the amount that it is shifted
1915 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1916 // separately.
1917 int64_t Range = (1U << (7 + shift)) - 1;
1918 return (Val == INT32_MIN(-2147483647-1)) || (Val > -Range && Val < Range);
1919 }
1920 return false;
1921 }
1922
1923 bool isMemPosImm8Offset() const {
1924 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1925 return false;
1926 // Immediate offset in range [0, 255].
1927 if (!Memory.OffsetImm) return true;
1928 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1929 int64_t Val = CE->getValue();
1930 return Val >= 0 && Val < 256;
1931 }
1932 return false;
1933 }
1934
1935 bool isMemNegImm8Offset() const {
1936 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1937 return false;
1938 // Base reg of PC isn't allowed for these encodings.
1939 if (Memory.BaseRegNum == ARM::PC) return false;
1940 // Immediate offset in range [-255, -1].
1941 if (!Memory.OffsetImm) return false;
1942 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1943 int64_t Val = CE->getValue();
1944 return (Val == std::numeric_limits<int32_t>::min()) ||
1945 (Val > -256 && Val < 0);
1946 }
1947 return false;
1948 }
1949
1950 bool isMemUImm12Offset() const {
1951 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1952 return false;
1953 // Immediate offset in range [0, 4095].
1954 if (!Memory.OffsetImm) return true;
1955 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1956 int64_t Val = CE->getValue();
1957 return (Val >= 0 && Val < 4096);
1958 }
1959 return false;
1960 }
1961
1962 bool isMemImm12Offset() const {
1963 // If we have an immediate that's not a constant, treat it as a label
1964 // reference needing a fixup. If it is a constant, it's something else
1965 // and we reject it.
1966
1967 if (isImm() && !isa<MCConstantExpr>(getImm()))
1968 return true;
1969
1970 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1971 return false;
1972 // Immediate offset in range [-4095, 4095].
1973 if (!Memory.OffsetImm) return true;
1974 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1975 int64_t Val = CE->getValue();
1976 return (Val > -4096 && Val < 4096) ||
1977 (Val == std::numeric_limits<int32_t>::min());
1978 }
1979 // If we have an immediate that's not a constant, treat it as a
1980 // symbolic expression needing a fixup.
1981 return true;
1982 }
1983
1984 bool isConstPoolAsmImm() const {
1985 // Delay processing of Constant Pool Immediate, this will turn into
1986 // a constant. Match no other operand
1987 return (isConstantPoolImm());
1988 }
1989
1990 bool isPostIdxImm8() const {
1991 if (!isImm()) return false;
1992 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1993 if (!CE) return false;
1994 int64_t Val = CE->getValue();
1995 return (Val > -256 && Val < 256) ||
1996 (Val == std::numeric_limits<int32_t>::min());
1997 }
1998
1999 bool isPostIdxImm8s4() const {
2000 if (!isImm()) return false;
2001 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2002 if (!CE) return false;
2003 int64_t Val = CE->getValue();
2004 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2005 (Val == std::numeric_limits<int32_t>::min());
2006 }
2007
2008 bool isMSRMask() const { return Kind == k_MSRMask; }
2009 bool isBankedReg() const { return Kind == k_BankedReg; }
2010 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2011
2012 // NEON operands.
2013 bool isSingleSpacedVectorList() const {
2014 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2015 }
2016
2017 bool isDoubleSpacedVectorList() const {
2018 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2019 }
2020
2021 bool isVecListOneD() const {
2022 if (!isSingleSpacedVectorList()) return false;
2023 return VectorList.Count == 1;
2024 }
2025
2026 bool isVecListTwoMQ() const {
2027 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2028 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2029 VectorList.RegNum);
2030 }
2031
2032 bool isVecListDPair() const {
2033 if (!isSingleSpacedVectorList()) return false;
2034 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2035 .contains(VectorList.RegNum));
2036 }
2037
2038 bool isVecListThreeD() const {
2039 if (!isSingleSpacedVectorList()) return false;
2040 return VectorList.Count == 3;
2041 }
2042
2043 bool isVecListFourD() const {
2044 if (!isSingleSpacedVectorList()) return false;
2045 return VectorList.Count == 4;
2046 }
2047
2048 bool isVecListDPairSpaced() const {
2049 if (Kind != k_VectorList) return false;
2050 if (isSingleSpacedVectorList()) return false;
2051 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2052 .contains(VectorList.RegNum));
2053 }
2054
2055 bool isVecListThreeQ() const {
2056 if (!isDoubleSpacedVectorList()) return false;
2057 return VectorList.Count == 3;
2058 }
2059
2060 bool isVecListFourQ() const {
2061 if (!isDoubleSpacedVectorList()) return false;
2062 return VectorList.Count == 4;
2063 }
2064
2065 bool isVecListFourMQ() const {
2066 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2067 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2068 VectorList.RegNum);
2069 }
2070
2071 bool isSingleSpacedVectorAllLanes() const {
2072 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2073 }
2074
2075 bool isDoubleSpacedVectorAllLanes() const {
2076 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2077 }
2078
2079 bool isVecListOneDAllLanes() const {
2080 if (!isSingleSpacedVectorAllLanes()) return false;
2081 return VectorList.Count == 1;
2082 }
2083
2084 bool isVecListDPairAllLanes() const {
2085 if (!isSingleSpacedVectorAllLanes()) return false;
2086 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2087 .contains(VectorList.RegNum));
2088 }
2089
2090 bool isVecListDPairSpacedAllLanes() const {
2091 if (!isDoubleSpacedVectorAllLanes()) return false;
2092 return VectorList.Count == 2;
2093 }
2094
2095 bool isVecListThreeDAllLanes() const {
2096 if (!isSingleSpacedVectorAllLanes()) return false;
2097 return VectorList.Count == 3;
2098 }
2099
2100 bool isVecListThreeQAllLanes() const {
2101 if (!isDoubleSpacedVectorAllLanes()) return false;
2102 return VectorList.Count == 3;
2103 }
2104
2105 bool isVecListFourDAllLanes() const {
2106 if (!isSingleSpacedVectorAllLanes()) return false;
2107 return VectorList.Count == 4;
2108 }
2109
2110 bool isVecListFourQAllLanes() const {
2111 if (!isDoubleSpacedVectorAllLanes()) return false;
2112 return VectorList.Count == 4;
2113 }
2114
2115 bool isSingleSpacedVectorIndexed() const {
2116 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2117 }
2118
2119 bool isDoubleSpacedVectorIndexed() const {
2120 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2121 }
2122
2123 bool isVecListOneDByteIndexed() const {
2124 if (!isSingleSpacedVectorIndexed()) return false;
2125 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2126 }
2127
2128 bool isVecListOneDHWordIndexed() const {
2129 if (!isSingleSpacedVectorIndexed()) return false;
2130 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2131 }
2132
2133 bool isVecListOneDWordIndexed() const {
2134 if (!isSingleSpacedVectorIndexed()) return false;
2135 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2136 }
2137
2138 bool isVecListTwoDByteIndexed() const {
2139 if (!isSingleSpacedVectorIndexed()) return false;
2140 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2141 }
2142
2143 bool isVecListTwoDHWordIndexed() const {
2144 if (!isSingleSpacedVectorIndexed()) return false;
2145 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2146 }
2147
2148 bool isVecListTwoQWordIndexed() const {
2149 if (!isDoubleSpacedVectorIndexed()) return false;
2150 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2151 }
2152
2153 bool isVecListTwoQHWordIndexed() const {
2154 if (!isDoubleSpacedVectorIndexed()) return false;
2155 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2156 }
2157
2158 bool isVecListTwoDWordIndexed() const {
2159 if (!isSingleSpacedVectorIndexed()) return false;
2160 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2161 }
2162
2163 bool isVecListThreeDByteIndexed() const {
2164 if (!isSingleSpacedVectorIndexed()) return false;
2165 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2166 }
2167
2168 bool isVecListThreeDHWordIndexed() const {
2169 if (!isSingleSpacedVectorIndexed()) return false;
2170 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2171 }
2172
2173 bool isVecListThreeQWordIndexed() const {
2174 if (!isDoubleSpacedVectorIndexed()) return false;
2175 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2176 }
2177
2178 bool isVecListThreeQHWordIndexed() const {
2179 if (!isDoubleSpacedVectorIndexed()) return false;
2180 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2181 }
2182
2183 bool isVecListThreeDWordIndexed() const {
2184 if (!isSingleSpacedVectorIndexed()) return false;
2185 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2186 }
2187
2188 bool isVecListFourDByteIndexed() const {
2189 if (!isSingleSpacedVectorIndexed()) return false;
2190 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2191 }
2192
2193 bool isVecListFourDHWordIndexed() const {
2194 if (!isSingleSpacedVectorIndexed()) return false;
2195 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2196 }
2197
2198 bool isVecListFourQWordIndexed() const {
2199 if (!isDoubleSpacedVectorIndexed()) return false;
2200 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2201 }
2202
2203 bool isVecListFourQHWordIndexed() const {
2204 if (!isDoubleSpacedVectorIndexed()) return false;
2205 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2206 }
2207
2208 bool isVecListFourDWordIndexed() const {
2209 if (!isSingleSpacedVectorIndexed()) return false;
2210 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2211 }
2212
2213 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2214
2215 template <unsigned NumLanes>
2216 bool isVectorIndexInRange() const {
2217 if (Kind != k_VectorIndex) return false;
2218 return VectorIndex.Val < NumLanes;
2219 }
2220
2221 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2222 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2223 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2224 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2225
2226 template<int PermittedValue, int OtherPermittedValue>
2227 bool isMVEPairVectorIndex() const {
2228 if (Kind != k_VectorIndex) return false;
2229 return VectorIndex.Val == PermittedValue ||
2230 VectorIndex.Val == OtherPermittedValue;
2231 }
2232
2233 bool isNEONi8splat() const {
2234 if (!isImm()) return false;
2235 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2236 // Must be a constant.
2237 if (!CE) return false;
2238 int64_t Value = CE->getValue();
2239 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2240 // value.
2241 return Value >= 0 && Value < 256;
2242 }
2243
2244 bool isNEONi16splat() const {
2245 if (isNEONByteReplicate(2))
2246 return false; // Leave that for bytes replication and forbid by default.
2247 if (!isImm())
2248 return false;
2249 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2250 // Must be a constant.
2251 if (!CE) return false;
2252 unsigned Value = CE->getValue();
2253 return ARM_AM::isNEONi16splat(Value);
2254 }
2255
2256 bool isNEONi16splatNot() const {
2257 if (!isImm())
2258 return false;
2259 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2260 // Must be a constant.
2261 if (!CE) return false;
2262 unsigned Value = CE->getValue();
2263 return ARM_AM::isNEONi16splat(~Value & 0xffff);
2264 }
2265
2266 bool isNEONi32splat() const {
2267 if (isNEONByteReplicate(4))
2268 return false; // Leave that for bytes replication and forbid by default.
2269 if (!isImm())
2270 return false;
2271 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2272 // Must be a constant.
2273 if (!CE) return false;
2274 unsigned Value = CE->getValue();
2275 return ARM_AM::isNEONi32splat(Value);
2276 }
2277
2278 bool isNEONi32splatNot() const {
2279 if (!isImm())
2280 return false;
2281 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2282 // Must be a constant.
2283 if (!CE) return false;
2284 unsigned Value = CE->getValue();
2285 return ARM_AM::isNEONi32splat(~Value);
2286 }
2287
2288 static bool isValidNEONi32vmovImm(int64_t Value) {
2289 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2290 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2291 return ((Value & 0xffffffffffffff00) == 0) ||
2292 ((Value & 0xffffffffffff00ff) == 0) ||
2293 ((Value & 0xffffffffff00ffff) == 0) ||
2294 ((Value & 0xffffffff00ffffff) == 0) ||
2295 ((Value & 0xffffffffffff00ff) == 0xff) ||
2296 ((Value & 0xffffffffff00ffff) == 0xffff);
2297 }
2298
2299 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2300 assert((Width == 8 || Width == 16 || Width == 32) &&(static_cast<void> (0))
2301 "Invalid element width")(static_cast<void> (0));
2302 assert(NumElems * Width <= 64 && "Invalid result width")(static_cast<void> (0));
2303
2304 if (!isImm())
2305 return false;
2306 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307 // Must be a constant.
2308 if (!CE)
2309 return false;
2310 int64_t Value = CE->getValue();
2311 if (!Value)
2312 return false; // Don't bother with zero.
2313 if (Inv)
2314 Value = ~Value;
2315
2316 uint64_t Mask = (1ull << Width) - 1;
2317 uint64_t Elem = Value & Mask;
2318 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2319 return false;
2320 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2321 return false;
2322
2323 for (unsigned i = 1; i < NumElems; ++i) {
2324 Value >>= Width;
2325 if ((Value & Mask) != Elem)
2326 return false;
2327 }
2328 return true;
2329 }
2330
2331 bool isNEONByteReplicate(unsigned NumBytes) const {
2332 return isNEONReplicate(8, NumBytes, false);
2333 }
2334
2335 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2336 assert((FromW == 8 || FromW == 16 || FromW == 32) &&(static_cast<void> (0))
2337 "Invalid source width")(static_cast<void> (0));
2338 assert((ToW == 16 || ToW == 32 || ToW == 64) &&(static_cast<void> (0))
2339 "Invalid destination width")(static_cast<void> (0));
2340 assert(FromW < ToW && "ToW is not less than FromW")(static_cast<void> (0));
2341 }
2342
2343 template<unsigned FromW, unsigned ToW>
2344 bool isNEONmovReplicate() const {
2345 checkNeonReplicateArgs(FromW, ToW);
2346 if (ToW == 64 && isNEONi64splat())
2347 return false;
2348 return isNEONReplicate(FromW, ToW / FromW, false);
2349 }
2350
2351 template<unsigned FromW, unsigned ToW>
2352 bool isNEONinvReplicate() const {
2353 checkNeonReplicateArgs(FromW, ToW);
2354 return isNEONReplicate(FromW, ToW / FromW, true);
2355 }
2356
2357 bool isNEONi32vmov() const {
2358 if (isNEONByteReplicate(4))
2359 return false; // Let it to be classified as byte-replicate case.
2360 if (!isImm())
2361 return false;
2362 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2363 // Must be a constant.
2364 if (!CE)
2365 return false;
2366 return isValidNEONi32vmovImm(CE->getValue());
2367 }
2368
2369 bool isNEONi32vmovNeg() const {
2370 if (!isImm()) return false;
2371 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2372 // Must be a constant.
2373 if (!CE) return false;
2374 return isValidNEONi32vmovImm(~CE->getValue());
2375 }
2376
2377 bool isNEONi64splat() const {
2378 if (!isImm()) return false;
2379 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2380 // Must be a constant.
2381 if (!CE) return false;
2382 uint64_t Value = CE->getValue();
2383 // i64 value with each byte being either 0 or 0xff.
2384 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2385 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2386 return true;
2387 }
2388
2389 template<int64_t Angle, int64_t Remainder>
2390 bool isComplexRotation() const {
2391 if (!isImm()) return false;
2392
2393 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2394 if (!CE) return false;
2395 uint64_t Value = CE->getValue();
2396
2397 return (Value % Angle == Remainder && Value <= 270);
2398 }
2399
2400 bool isMVELongShift() const {
2401 if (!isImm()) return false;
2402 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2403 // Must be a constant.
2404 if (!CE) return false;
2405 uint64_t Value = CE->getValue();
2406 return Value >= 1 && Value <= 32;
2407 }
2408
2409 bool isMveSaturateOp() const {
2410 if (!isImm()) return false;
2411 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2412 if (!CE) return false;
2413 uint64_t Value = CE->getValue();
2414 return Value == 48 || Value == 64;
2415 }
2416
2417 bool isITCondCodeNoAL() const {
2418 if (!isITCondCode()) return false;
2419 ARMCC::CondCodes CC = getCondCode();
2420 return CC != ARMCC::AL;
2421 }
2422
2423 bool isITCondCodeRestrictedI() const {
2424 if (!isITCondCode())
2425 return false;
2426 ARMCC::CondCodes CC = getCondCode();
2427 return CC == ARMCC::EQ || CC == ARMCC::NE;
2428 }
2429
2430 bool isITCondCodeRestrictedS() const {
2431 if (!isITCondCode())
2432 return false;
2433 ARMCC::CondCodes CC = getCondCode();
2434 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2435 CC == ARMCC::GE;
2436 }
2437
2438 bool isITCondCodeRestrictedU() const {
2439 if (!isITCondCode())
2440 return false;
2441 ARMCC::CondCodes CC = getCondCode();
2442 return CC == ARMCC::HS || CC == ARMCC::HI;
2443 }
2444
2445 bool isITCondCodeRestrictedFP() const {
2446 if (!isITCondCode())
2447 return false;
2448 ARMCC::CondCodes CC = getCondCode();
2449 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2450 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2451 }
2452
2453 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2454 // Add as immediates when possible. Null MCExpr = 0.
2455 if (!Expr)
2456 Inst.addOperand(MCOperand::createImm(0));
2457 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2458 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2459 else
2460 Inst.addOperand(MCOperand::createExpr(Expr));
2461 }
2462
2463 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2464 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2465 addExpr(Inst, getImm());
2466 }
2467
2468 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2469 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2470 addExpr(Inst, getImm());
2471 }
2472
2473 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2474 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
2475 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2476 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2477 Inst.addOperand(MCOperand::createReg(RegNum));
2478 }
2479
2480 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2481 assert(N == 3 && "Invalid number of operands!")(static_cast<void> (0));
2482 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2483 unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2484 Inst.addOperand(MCOperand::createReg(RegNum));
2485 Inst.addOperand(MCOperand::createReg(0));
2486 }
2487
2488 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2489 assert(N == 4 && "Invalid number of operands!")(static_cast<void> (0));
2490 addVPTPredNOperands(Inst, N-1);
2491 unsigned RegNum;
2492 if (getVPTPred() == ARMVCC::None) {
2493 RegNum = 0;
2494 } else {
2495 unsigned NextOpIndex = Inst.getNumOperands();
2496 const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2497 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2498 assert(TiedOp >= 0 &&(static_cast<void> (0))
2499 "Inactive register in vpred_r is not tied to an output!")(static_cast<void> (0));
2500 RegNum = Inst.getOperand(TiedOp).getReg();
2501 }
2502 Inst.addOperand(MCOperand::createReg(RegNum));
2503 }
2504
2505 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2506 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2507 Inst.addOperand(MCOperand::createImm(getCoproc()));
2508 }
2509
2510 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2511 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2512 Inst.addOperand(MCOperand::createImm(getCoproc()));
2513 }
2514
2515 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2516 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2517 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2518 }
2519
2520 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2521 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2522 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2523 }
2524
2525 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2526 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2527 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2528 }
2529
2530 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2531 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2532 Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2533 }
2534
2535 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2536 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2537 Inst.addOperand(MCOperand::createReg(getReg()));
2538 }
2539
2540 void addRegOperands(MCInst &Inst, unsigned N) const {
2541 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2542 Inst.addOperand(MCOperand::createReg(getReg()));
2543 }
2544
2545 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2546 assert(N == 3 && "Invalid number of operands!")(static_cast<void> (0));
2547 assert(isRegShiftedReg() &&(static_cast<void> (0))
2548 "addRegShiftedRegOperands() on non-RegShiftedReg!")(static_cast<void> (0));
2549 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2550 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2551 Inst.addOperand(MCOperand::createImm(
2552 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2553 }
2554
2555 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2556 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
2557 assert(isRegShiftedImm() &&(static_cast<void> (0))
2558 "addRegShiftedImmOperands() on non-RegShiftedImm!")(static_cast<void> (0));
2559 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2560 // Shift of #32 is encoded as 0 where permitted
2561 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2562 Inst.addOperand(MCOperand::createImm(
2563 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2564 }
2565
2566 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2567 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2568 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2569 ShifterImm.Imm));
2570 }
2571
2572 void addRegListOperands(MCInst &Inst, unsigned N) const {
2573 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2574 const SmallVectorImpl<unsigned> &RegList = getRegList();
2575 for (SmallVectorImpl<unsigned>::const_iterator
2576 I = RegList.begin(), E = RegList.end(); I != E; ++I)
2577 Inst.addOperand(MCOperand::createReg(*I));
2578 }
2579
2580 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2581 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2582 const SmallVectorImpl<unsigned> &RegList = getRegList();
2583 for (SmallVectorImpl<unsigned>::const_iterator
2584 I = RegList.begin(), E = RegList.end(); I != E; ++I)
2585 Inst.addOperand(MCOperand::createReg(*I));
2586 }
2587
2588 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2589 addRegListOperands(Inst, N);
2590 }
2591
2592 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2593 addRegListOperands(Inst, N);
2594 }
2595
2596 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2597 addRegListOperands(Inst, N);
2598 }
2599
2600 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2601 addRegListOperands(Inst, N);
2602 }
2603
2604 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2605 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2606 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2607 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2608 }
2609
2610 void addModImmOperands(MCInst &Inst, unsigned N) const {
2611 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2612
2613 // Support for fixups (MCFixup)
2614 if (isImm())
2615 return addImmOperands(Inst, N);
2616
2617 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2618 }
2619
2620 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2621 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2622 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2623 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2624 Inst.addOperand(MCOperand::createImm(Enc));
2625 }
2626
2627 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2628 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2629 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2630 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2631 Inst.addOperand(MCOperand::createImm(Enc));
2632 }
2633
2634 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2635 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2636 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2637 uint32_t Val = -CE->getValue();
2638 Inst.addOperand(MCOperand::createImm(Val));
2639 }
2640
2641 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2642 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2643 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2644 uint32_t Val = -CE->getValue();
2645 Inst.addOperand(MCOperand::createImm(Val));
2646 }
2647
2648 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2649 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2650 // Munge the lsb/width into a bitfield mask.
2651 unsigned lsb = Bitfield.LSB;
2652 unsigned width = Bitfield.Width;
2653 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2654 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2655 (32 - (lsb + width)));
2656 Inst.addOperand(MCOperand::createImm(Mask));
2657 }
2658
2659 void addImmOperands(MCInst &Inst, unsigned N) const {
2660 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2661 addExpr(Inst, getImm());
2662 }
2663
2664 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2665 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2666 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2667 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2668 }
2669
2670 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2671 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2672 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2673 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2674 }
2675
2676 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2677 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2678 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2679 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2680 Inst.addOperand(MCOperand::createImm(Val));
2681 }
2682
2683 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2684 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2685 // FIXME: We really want to scale the value here, but the LDRD/STRD
2686 // instruction don't encode operands that way yet.
2687 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2688 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2689 }
2690
2691 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2692 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2693 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2694 // instruction don't encode operands that way yet.
2695 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2696 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2697 }
2698
2699 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2700 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2701 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2702 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2703 }
2704
2705 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2706 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2707 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2708 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2709 }
2710
2711 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2712 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2713 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2714 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2715 }
2716
2717 void addImm7Operands(MCInst &Inst, unsigned N) const {
2718 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2719 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2720 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2721 }
2722
2723 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2724 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2725 // The immediate is scaled by four in the encoding and is stored
2726 // in the MCInst as such. Lop off the low two bits here.
2727 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2728 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2729 }
2730
2731 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2732 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2733 // The immediate is scaled by four in the encoding and is stored
2734 // in the MCInst as such. Lop off the low two bits here.
2735 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2736 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2737 }
2738
2739 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2740 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2741 // The immediate is scaled by four in the encoding and is stored
2742 // in the MCInst as such. Lop off the low two bits here.
2743 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2744 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2745 }
2746
2747 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2748 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2749 // The constant encodes as the immediate-1, and we store in the instruction
2750 // the bits as encoded, so subtract off one here.
2751 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2752 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2753 }
2754
2755 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2756 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2757 // The constant encodes as the immediate-1, and we store in the instruction
2758 // the bits as encoded, so subtract off one here.
2759 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2760 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2761 }
2762
2763 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2764 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2765 // The constant encodes as the immediate, except for 32, which encodes as
2766 // zero.
2767 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2768 unsigned Imm = CE->getValue();
2769 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2770 }
2771
2772 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2773 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2774 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2775 // the instruction as well.
2776 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2777 int Val = CE->getValue();
2778 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2779 }
2780
2781 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2782 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2783 // The operand is actually a t2_so_imm, but we have its bitwise
2784 // negation in the assembly source, so twiddle it here.
2785 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2786 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2787 }
2788
2789 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2790 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2791 // The operand is actually a t2_so_imm, but we have its
2792 // negation in the assembly source, so twiddle it here.
2793 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2794 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2795 }
2796
2797 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2798 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2799 // The operand is actually an imm0_4095, but we have its
2800 // negation in the assembly source, so twiddle it here.
2801 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2802 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2803 }
2804
2805 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2806 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2807 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2808 return;
2809 }
2810 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2811 Inst.addOperand(MCOperand::createExpr(SR));
2812 }
2813
2814 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2815 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2816 if (isImm()) {
2817 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2818 if (CE) {
2819 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2820 return;
2821 }
2822 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2823 Inst.addOperand(MCOperand::createExpr(SR));
2824 return;
2825 }
2826
2827 assert(isGPRMem() && "Unknown value type!")(static_cast<void> (0));
2828 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!")(static_cast<void> (0));
2829 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2830 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2831 else
2832 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2833 }
2834
2835 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2836 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2837 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2838 }
2839
2840 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2841 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2842 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2843 }
2844
2845 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2846 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2847 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2848 }
2849
2850 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2851 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2852 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2853 }
2854
2855 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2856 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2857 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2858 }
2859
2860 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2861 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2862 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2863 }
2864
2865 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2866 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2867 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2868 }
2869
2870 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2871 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2872 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2873 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2874 else
2875 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2876 }
2877
2878 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2879 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
2880 assert(isImm() && "Not an immediate!")(static_cast<void> (0));
2881
2882 // If we have an immediate that's not a constant, treat it as a label
2883 // reference needing a fixup.
2884 if (!isa<MCConstantExpr>(getImm())) {
2885 Inst.addOperand(MCOperand::createExpr(getImm()));
2886 return;
2887 }
2888
2889 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2890 int Val = CE->getValue();
2891 Inst.addOperand(MCOperand::createImm(Val));
2892 }
2893
2894 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2895 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
2896 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2897 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2898 }
2899
2900 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2901 addAlignedMemoryOperands(Inst, N);
2902 }
2903
2904 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2905 addAlignedMemoryOperands(Inst, N);
2906 }
2907
2908 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2909 addAlignedMemoryOperands(Inst, N);
2910 }
2911
2912 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2913 addAlignedMemoryOperands(Inst, N);
2914 }
2915
2916 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2917 addAlignedMemoryOperands(Inst, N);
2918 }
2919
2920 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2921 addAlignedMemoryOperands(Inst, N);
2922 }
2923
2924 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2925 addAlignedMemoryOperands(Inst, N);
2926 }
2927
2928 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2929 addAlignedMemoryOperands(Inst, N);
2930 }
2931
2932 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2933 addAlignedMemoryOperands(Inst, N);
2934 }
2935
2936 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2937 addAlignedMemoryOperands(Inst, N);
2938 }
2939
2940 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2941 addAlignedMemoryOperands(Inst, N);
2942 }
2943
2944 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2945 assert(N == 3 && "Invalid number of operands!")(static_cast<void> (0));
2946 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2947 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2948 if (!Memory.OffsetRegNum) {
2949 if (!Memory.OffsetImm)
2950 Inst.addOperand(MCOperand::createImm(0));
2951 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2952 int32_t Val = CE->getValue();
2953 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2954 // Special case for #-0
2955 if (Val == std::numeric_limits<int32_t>::min())
2956 Val = 0;
2957 if (Val < 0)
2958 Val = -Val;
2959 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2960 Inst.addOperand(MCOperand::createImm(Val));
2961 } else
2962 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2963 } else {
2964 // For register offset, we encode the shift type and negation flag
2965 // here.
2966 int32_t Val =
2967 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2968 Memory.ShiftImm, Memory.ShiftType);
2969 Inst.addOperand(MCOperand::createImm(Val));
2970 }
2971 }
2972
2973 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2974 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
2975 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2976 assert(CE && "non-constant AM2OffsetImm operand!")(static_cast<void> (0));
2977 int32_t Val = CE->getValue();
2978 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2979 // Special case for #-0
2980 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2981 if (Val < 0) Val = -Val;
2982 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2983 Inst.addOperand(MCOperand::createReg(0));
2984 Inst.addOperand(MCOperand::createImm(Val));
2985 }
2986
2987 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2988 assert(N == 3 && "Invalid number of operands!")(static_cast<void> (0));
2989 // If we have an immediate that's not a constant, treat it as a label
2990 // reference needing a fixup. If it is a constant, it's something else
2991 // and we reject it.
2992 if (isImm()) {
2993 Inst.addOperand(MCOperand::createExpr(getImm()));
2994 Inst.addOperand(MCOperand::createReg(0));
2995 Inst.addOperand(MCOperand::createImm(0));
2996 return;
2997 }
2998
2999 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3000 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3001 if (!Memory.OffsetRegNum) {
3002 if (!Memory.OffsetImm)
3003 Inst.addOperand(MCOperand::createImm(0));
3004 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3005 int32_t Val = CE->getValue();
3006 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3007 // Special case for #-0
3008 if (Val == std::numeric_limits<int32_t>::min())
3009 Val = 0;
3010 if (Val < 0)
3011 Val = -Val;
3012 Val = ARM_AM::getAM3Opc(AddSub, Val);
3013 Inst.addOperand(MCOperand::createImm(Val));
3014 } else
3015 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3016 } else {
3017 // For register offset, we encode the shift type and negation flag
3018 // here.
3019 int32_t Val =
3020 ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3021 Inst.addOperand(MCOperand::createImm(Val));
3022 }
3023 }
3024
3025 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3026 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3027 if (Kind == k_PostIndexRegister) {
3028 int32_t Val =
3029 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3030 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3031 Inst.addOperand(MCOperand::createImm(Val));
3032 return;
3033 }
3034
3035 // Constant offset.
3036 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3037 int32_t Val = CE->getValue();
3038 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3039 // Special case for #-0
3040 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3041 if (Val < 0) Val = -Val;
3042 Val = ARM_AM::getAM3Opc(AddSub, Val);
3043 Inst.addOperand(MCOperand::createReg(0));
3044 Inst.addOperand(MCOperand::createImm(Val));
3045 }
3046
3047 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3048 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3049 // If we have an immediate that's not a constant, treat it as a label
3050 // reference needing a fixup. If it is a constant, it's something else
3051 // and we reject it.
3052 if (isImm()) {
3053 Inst.addOperand(MCOperand::createExpr(getImm()));
3054 Inst.addOperand(MCOperand::createImm(0));
3055 return;
3056 }
3057
3058 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3059 if (!Memory.OffsetImm)
3060 Inst.addOperand(MCOperand::createImm(0));
3061 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3062 // The lower two bits are always zero and as such are not encoded.
3063 int32_t Val = CE->getValue() / 4;
3064 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3065 // Special case for #-0
3066 if (Val == std::numeric_limits<int32_t>::min())
3067 Val = 0;
3068 if (Val < 0)
3069 Val = -Val;
3070 Val = ARM_AM::getAM5Opc(AddSub, Val);
3071 Inst.addOperand(MCOperand::createImm(Val));
3072 } else
3073 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3074 }
3075
3076 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3077 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3078 // If we have an immediate that's not a constant, treat it as a label
3079 // reference needing a fixup. If it is a constant, it's something else
3080 // and we reject it.
3081 if (isImm()) {
3082 Inst.addOperand(MCOperand::createExpr(getImm()));
3083 Inst.addOperand(MCOperand::createImm(0));
3084 return;
3085 }
3086
3087 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3088 // The lower bit is always zero and as such is not encoded.
3089 if (!Memory.OffsetImm)
3090 Inst.addOperand(MCOperand::createImm(0));
3091 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3092 int32_t Val = CE->getValue() / 2;
3093 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3094 // Special case for #-0
3095 if (Val == std::numeric_limits<int32_t>::min())
3096 Val = 0;
3097 if (Val < 0)
3098 Val = -Val;
3099 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3100 Inst.addOperand(MCOperand::createImm(Val));
3101 } else
3102 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3103 }
3104
3105 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3106 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3107 // If we have an immediate that's not a constant, treat it as a label
3108 // reference needing a fixup. If it is a constant, it's something else
3109 // and we reject it.
3110 if (isImm()) {
3111 Inst.addOperand(MCOperand::createExpr(getImm()));
3112 Inst.addOperand(MCOperand::createImm(0));
3113 return;
3114 }
3115
3116 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3117 addExpr(Inst, Memory.OffsetImm);
3118 }
3119
3120 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3121 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3122 // If we have an immediate that's not a constant, treat it as a label
3123 // reference needing a fixup. If it is a constant, it's something else
3124 // and we reject it.
3125 if (isImm()) {
3126 Inst.addOperand(MCOperand::createExpr(getImm()));
3127 Inst.addOperand(MCOperand::createImm(0));
3128 return;
3129 }
3130
3131 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3132 addExpr(Inst, Memory.OffsetImm);
3133 }
3134
3135 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3136 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3137 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3138 if (!Memory.OffsetImm)
3139 Inst.addOperand(MCOperand::createImm(0));
3140 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3141 // The lower two bits are always zero and as such are not encoded.
3142 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3143 else
3144 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3145 }
3146
3147 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3148 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3149 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3150 addExpr(Inst, Memory.OffsetImm);
3151 }
3152
3153 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3154 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3155 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3156 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3157 }
3158
3159 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3160 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3161 // If this is an immediate, it's a label reference.
3162 if (isImm()) {
3163 addExpr(Inst, getImm());
3164 Inst.addOperand(MCOperand::createImm(0));
3165 return;
3166 }
3167
3168 // Otherwise, it's a normal memory reg+offset.
3169 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3170 addExpr(Inst, Memory.OffsetImm);
3171 }
3172
3173 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3174 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3175 // If this is an immediate, it's a label reference.
3176 if (isImm()) {
3177 addExpr(Inst, getImm());
3178 Inst.addOperand(MCOperand::createImm(0));
3179 return;
3180 }
3181
3182 // Otherwise, it's a normal memory reg+offset.
3183 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3184 addExpr(Inst, Memory.OffsetImm);
3185 }
3186
3187 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3188 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3189 // This is container for the immediate that we will create the constant
3190 // pool from
3191 addExpr(Inst, getConstantPoolImm());
3192 }
3193
3194 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3195 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3196 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3197 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3198 }
3199
3200 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3201 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3202 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3203 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3204 }
3205
3206 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3207 assert(N == 3 && "Invalid number of operands!")(static_cast<void> (0));
3208 unsigned Val =
3209 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3210 Memory.ShiftImm, Memory.ShiftType);
3211 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3212 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3213 Inst.addOperand(MCOperand::createImm(Val));
3214 }
3215
3216 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3217 assert(N == 3 && "Invalid number of operands!")(static_cast<void> (0));
3218 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3219 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3220 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3221 }
3222
3223 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3224 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3225 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3226 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3227 }
3228
3229 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3230 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3231 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3232 if (!Memory.OffsetImm)
3233 Inst.addOperand(MCOperand::createImm(0));
3234 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3235 // The lower two bits are always zero and as such are not encoded.
3236 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3237 else
3238 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3239 }
3240
3241 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3242 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3243 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3244 if (!Memory.OffsetImm)
3245 Inst.addOperand(MCOperand::createImm(0));
3246 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3247 Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3248 else
3249 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3250 }
3251
3252 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3253 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3254 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3255 addExpr(Inst, Memory.OffsetImm);
3256 }
3257
3258 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3259 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3260 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3261 if (!Memory.OffsetImm)
3262 Inst.addOperand(MCOperand::createImm(0));
3263 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3264 // The lower two bits are always zero and as such are not encoded.
3265 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3266 else
3267 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3268 }
3269
3270 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3271 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3272 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3273 assert(CE && "non-constant post-idx-imm8 operand!")(static_cast<void> (0));
3274 int Imm = CE->getValue();
3275 bool isAdd = Imm >= 0;
3276 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3277 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3278 Inst.addOperand(MCOperand::createImm(Imm));
3279 }
3280
3281 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3282 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3283 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3284 assert(CE && "non-constant post-idx-imm8s4 operand!")(static_cast<void> (0));
3285 int Imm = CE->getValue();
3286 bool isAdd = Imm >= 0;
3287 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3288 // Immediate is scaled by 4.
3289 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3290 Inst.addOperand(MCOperand::createImm(Imm));
3291 }
3292
3293 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3294 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3295 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3296 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3297 }
3298
3299 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3300 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3301 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3302 // The sign, shift type, and shift amount are encoded in a single operand
3303 // using the AM2 encoding helpers.
3304 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3305 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3306 PostIdxReg.ShiftTy);
3307 Inst.addOperand(MCOperand::createImm(Imm));
3308 }
3309
3310 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3311 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3312 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3313 Inst.addOperand(MCOperand::createImm(CE->getValue()));
3314 }
3315
3316 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3317 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3318 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3319 }
3320
3321 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3322 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3323 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3324 }
3325
3326 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3327 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3328 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3329 }
3330
3331 void addVecListOperands(MCInst &Inst, unsigned N) const {
3332 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3333 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3334 }
3335
3336 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3337 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3338
3339 // When we come here, the VectorList field will identify a range
3340 // of q-registers by its base register and length, and it will
3341 // have already been error-checked to be the expected length of
3342 // range and contain only q-regs in the range q0-q7. So we can
3343 // count on the base register being in the range q0-q6 (for 2
3344 // regs) or q0-q4 (for 4)
3345 //
3346 // The MVE instructions taking a register range of this kind will
3347 // need an operand in the MQQPR or MQQQQPR class, representing the
3348 // entire range as a unit. So we must translate into that class,
3349 // by finding the index of the base register in the MQPR reg
3350 // class, and returning the super-register at the corresponding
3351 // index in the target class.
3352
3353 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3354 const MCRegisterClass *RC_out =
3355 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3356 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3357
3358 unsigned I, E = RC_out->getNumRegs();
3359 for (I = 0; I < E; I++)
3360 if (RC_in->getRegister(I) == VectorList.RegNum)
3361 break;
3362 assert(I < E && "Invalid vector list start register!")(static_cast<void> (0));
3363
3364 Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3365 }
3366
3367 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3368 assert(N == 2 && "Invalid number of operands!")(static_cast<void> (0));
3369 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3370 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3371 }
3372
3373 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3374 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3375 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3376 }
3377
3378 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3379 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3380 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3381 }
3382
3383 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3384 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3385 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3386 }
3387
3388 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3389 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3390 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3391 }
3392
3393 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3394 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3395 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3396 }
3397
3398 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3399 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3400 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3401 }
3402
3403 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3404 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3405 // The immediate encodes the type of constant as well as the value.
3406 // Mask in that this is an i8 splat.
3407 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3408 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3409 }
3410
3411 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3412 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3413 // The immediate encodes the type of constant as well as the value.
3414 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3415 unsigned Value = CE->getValue();
3416 Value = ARM_AM::encodeNEONi16splat(Value);
3417 Inst.addOperand(MCOperand::createImm(Value));
3418 }
3419
3420 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3421 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3422 // The immediate encodes the type of constant as well as the value.
3423 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3424 unsigned Value = CE->getValue();
3425 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3426 Inst.addOperand(MCOperand::createImm(Value));
3427 }
3428
3429 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3430 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3431 // The immediate encodes the type of constant as well as the value.
3432 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3433 unsigned Value = CE->getValue();
3434 Value = ARM_AM::encodeNEONi32splat(Value);
3435 Inst.addOperand(MCOperand::createImm(Value));
3436 }
3437
3438 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3439 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3440 // The immediate encodes the type of constant as well as the value.
3441 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3442 unsigned Value = CE->getValue();
3443 Value = ARM_AM::encodeNEONi32splat(~Value);
3444 Inst.addOperand(MCOperand::createImm(Value));
3445 }
3446
3447 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3448 // The immediate encodes the type of constant as well as the value.
3449 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3450 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||(static_cast<void> (0))
3451 Inst.getOpcode() == ARM::VMOVv16i8) &&(static_cast<void> (0))
3452 "All instructions that wants to replicate non-zero byte "(static_cast<void> (0))
3453 "always must be replaced with VMOVv8i8 or VMOVv16i8.")(static_cast<void> (0));
3454 unsigned Value = CE->getValue();
3455 if (Inv)
3456 Value = ~Value;
3457 unsigned B = Value & 0xff;
3458 B |= 0xe00; // cmode = 0b1110
3459 Inst.addOperand(MCOperand::createImm(B));
3460 }
3461
3462 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3463 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3464 addNEONi8ReplicateOperands(Inst, true);
3465 }
3466
3467 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3468 if (Value >= 256 && Value <= 0xffff)
3469 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3470 else if (Value > 0xffff && Value <= 0xffffff)
3471 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3472 else if (Value > 0xffffff)
3473 Value = (Value >> 24) | 0x600;
3474 return Value;
3475 }
3476
3477 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3478 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3479 // The immediate encodes the type of constant as well as the value.
3480 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3481 unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3482 Inst.addOperand(MCOperand::createImm(Value));
3483 }
3484
3485 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3486 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3487 addNEONi8ReplicateOperands(Inst, false);
3488 }
3489
3490 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3491 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3492 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3493 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||(static_cast<void> (0))
3494 Inst.getOpcode() == ARM::VMOVv8i16 ||(static_cast<void> (0))
3495 Inst.getOpcode() == ARM::VMVNv4i16 ||(static_cast<void> (0))
3496 Inst.getOpcode() == ARM::VMVNv8i16) &&(static_cast<void> (0))
3497 "All instructions that want to replicate non-zero half-word "(static_cast<void> (0))
3498 "always must be replaced with V{MOV,MVN}v{4,8}i16.")(static_cast<void> (0));
3499 uint64_t Value = CE->getValue();
3500 unsigned Elem = Value & 0xffff;
3501 if (Elem >= 256)
3502 Elem = (Elem >> 8) | 0x200;
3503 Inst.addOperand(MCOperand::createImm(Elem));
3504 }
3505
3506 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3507 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3508 // The immediate encodes the type of constant as well as the value.
3509 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3510 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3511 Inst.addOperand(MCOperand::createImm(Value));
3512 }
3513
3514 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3515 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3516 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3517 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||(static_cast<void> (0))
3518 Inst.getOpcode() == ARM::VMOVv4i32 ||(static_cast<void> (0))
3519 Inst.getOpcode() == ARM::VMVNv2i32 ||(static_cast<void> (0))
3520 Inst.getOpcode() == ARM::VMVNv4i32) &&(static_cast<void> (0))
3521 "All instructions that want to replicate non-zero word "(static_cast<void> (0))
3522 "always must be replaced with V{MOV,MVN}v{2,4}i32.")(static_cast<void> (0));
3523 uint64_t Value = CE->getValue();
3524 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3525 Inst.addOperand(MCOperand::createImm(Elem));
3526 }
3527
3528 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3529 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3530 // The immediate encodes the type of constant as well as the value.
3531 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3532 uint64_t Value = CE->getValue();
3533 unsigned Imm = 0;
3534 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3535 Imm |= (Value & 1) << i;
3536 }
3537 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3538 }
3539
3540 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3541 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3542 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3543 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3544 }
3545
3546 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3547 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3548 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3549 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3550 }
3551
3552 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3553 assert(N == 1 && "Invalid number of operands!")(static_cast<void> (0));
3554 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3555 unsigned Imm = CE->getValue();
3556 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand")(static_cast<void> (0));
3557 Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3558 }
3559
3560 void print(raw_ostream &OS) const override;
3561
3562 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3563 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3564 Op->ITMask.Mask = Mask;
3565 Op->StartLoc = S;
3566 Op->EndLoc = S;
3567 return Op;
3568 }
3569
3570 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3571 SMLoc S) {
3572 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3573 Op->CC.Val = CC;
3574 Op->StartLoc = S;
3575 Op->EndLoc = S;
3576 return Op;
3577 }
3578
3579 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3580 SMLoc S) {
3581 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3582 Op->VCC.Val = CC;
3583 Op->StartLoc = S;
3584 Op->EndLoc = S;
3585 return Op;
3586 }
3587
3588 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3589 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3590 Op->Cop.Val = CopVal;
3591 Op->StartLoc = S;
3592 Op->EndLoc = S;
3593 return Op;
3594 }
3595
3596 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3597 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3598 Op->Cop.Val = CopVal;
3599 Op->StartLoc = S;
3600 Op->EndLoc = S;
3601 return Op;
3602 }
3603
3604 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3605 SMLoc E) {
3606 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3607 Op->Cop.Val = Val;
3608 Op->StartLoc = S;
3609 Op->EndLoc = E;
3610 return Op;
3611 }
3612
3613 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3614 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3615 Op->Reg.RegNum = RegNum;
3616 Op->StartLoc = S;
3617 Op->EndLoc = S;
3618 return Op;
3619 }
3620
3621 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3622 auto Op = std::make_unique<ARMOperand>(k_Token);
3623 Op->Tok.Data = Str.data();
3624 Op->Tok.Length = Str.size();
3625 Op->StartLoc = S;
3626 Op->EndLoc = S;
3627 return Op;
3628 }
3629
3630 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3631 SMLoc E) {
3632 auto Op = std::make_unique<ARMOperand>(k_Register);
3633 Op->Reg.RegNum = RegNum;
3634 Op->StartLoc = S;
3635 Op->EndLoc = E;
3636 return Op;
3637 }
3638
3639 static std::unique_ptr<ARMOperand>
3640 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3641 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3642 SMLoc E) {
3643 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3644 Op->RegShiftedReg.ShiftTy = ShTy;
3645 Op->RegShiftedReg.SrcReg = SrcReg;
3646 Op->RegShiftedReg.ShiftReg = ShiftReg;
3647 Op->RegShiftedReg.ShiftImm = ShiftImm;
3648 Op->StartLoc = S;
3649 Op->EndLoc = E;
3650 return Op;
3651 }
3652
3653 static std::unique_ptr<ARMOperand>
3654 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3655 unsigned ShiftImm, SMLoc S, SMLoc E) {
3656 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3657 Op->RegShiftedImm.ShiftTy = ShTy;
3658 Op->RegShiftedImm.SrcReg = SrcReg;
3659 Op->RegShiftedImm.ShiftImm = ShiftImm;
3660 Op->StartLoc = S;
3661 Op->EndLoc = E;
3662 return Op;
3663 }
3664
3665 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3666 SMLoc S, SMLoc E) {
3667 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3668 Op->ShifterImm.isASR = isASR;
3669 Op->ShifterImm.Imm = Imm;
3670 Op->StartLoc = S;
3671 Op->EndLoc = E;
3672 return Op;
3673 }
3674
3675 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3676 SMLoc E) {
3677 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3678 Op->RotImm.Imm = Imm;
3679 Op->StartLoc = S;
3680 Op->EndLoc = E;
3681 return Op;
3682 }
3683
3684 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3685 SMLoc S, SMLoc E) {
3686 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3687 Op->ModImm.Bits = Bits;
3688 Op->ModImm.Rot = Rot;
3689 Op->StartLoc = S;
3690 Op->EndLoc = E;
3691 return Op;
3692 }
3693
3694 static std::unique_ptr<ARMOperand>
3695 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3696 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3697 Op->Imm.Val = Val;
3698 Op->StartLoc = S;
3699 Op->EndLoc = E;
3700 return Op;
3701 }
3702
3703 static std::unique_ptr<ARMOperand>
3704 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3705 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3706 Op->Bitfield.LSB = LSB;
3707 Op->Bitfield.Width = Width;
3708 Op->StartLoc = S;
3709 Op->EndLoc = E;
3710 return Op;
3711 }
3712
3713 static std::unique_ptr<ARMOperand>
3714 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3715 SMLoc StartLoc, SMLoc EndLoc) {
3716 assert(Regs.size() > 0 && "RegList contains no registers?")(static_cast<void> (0));
3717 KindTy Kind = k_RegisterList;
3718
3719 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3720 Regs.front().second)) {
3721 if (Regs.back().second == ARM::VPR)
3722 Kind = k_FPDRegisterListWithVPR;
3723 else
3724 Kind = k_DPRRegisterList;
3725 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3726 Regs.front().second)) {
3727 if (Regs.back().second == ARM::VPR)
3728 Kind = k_FPSRegisterListWithVPR;
3729 else
3730 Kind = k_SPRRegisterList;
3731 }
3732
3733 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3734 Kind = k_RegisterListWithAPSR;
3735
3736 assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding")(static_cast<void> (0));
3737
3738 auto Op = std::make_unique<ARMOperand>(Kind);
3739 for (const auto &P : Regs)
3740 Op->Registers.push_back(P.second);
3741
3742 Op->StartLoc = StartLoc;
3743 Op->EndLoc = EndLoc;
3744 return Op;
3745 }
3746
3747 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3748 unsigned Count,
3749 bool isDoubleSpaced,
3750 SMLoc S, SMLoc E) {
3751 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3752 Op->VectorList.RegNum = RegNum;
3753 Op->VectorList.Count = Count;
3754 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3755 Op->StartLoc = S;
3756 Op->EndLoc = E;
3757 return Op;
3758 }
3759
3760 static std::unique_ptr<ARMOperand>
3761 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3762 SMLoc S, SMLoc E) {
3763 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3764 Op->VectorList.RegNum = RegNum;
3765 Op->VectorList.Count = Count;
3766 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3767 Op->StartLoc = S;
3768 Op->EndLoc = E;
3769 return Op;
3770 }
3771
3772 static std::unique_ptr<ARMOperand>
3773 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3774 bool isDoubleSpaced, SMLoc S, SMLoc E) {
3775 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3776 Op->VectorList.RegNum = RegNum;
3777 Op->VectorList.Count = Count;
3778 Op->VectorList.LaneIndex = Index;
3779 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3780 Op->StartLoc = S;
3781 Op->EndLoc = E;
3782 return Op;
3783 }
3784
3785 static std::unique_ptr<ARMOperand>
3786 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3787 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3788 Op->VectorIndex.Val = Idx;
3789 Op->StartLoc = S;
3790 Op->EndLoc = E;
3791 return Op;
3792 }
3793
3794 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3795 SMLoc E) {
3796 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3797 Op->Imm.Val = Val;
3798 Op->StartLoc = S;
3799 Op->EndLoc = E;
3800 return Op;
3801 }
3802
3803 static std::unique_ptr<ARMOperand>
3804 CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3805 ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3806 bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3807 auto Op = std::make_unique<ARMOperand>(k_Memory);
3808 Op->Memory.BaseRegNum = BaseRegNum;
3809 Op->Memory.OffsetImm = OffsetImm;
3810 Op->Memory.OffsetRegNum = OffsetRegNum;
3811 Op->Memory.ShiftType = ShiftType;
3812 Op->Memory.ShiftImm = ShiftImm;
3813 Op->Memory.Alignment = Alignment;
3814 Op->Memory.isNegative = isNegative;
3815 Op->StartLoc = S;
3816 Op->EndLoc = E;
3817 Op->AlignmentLoc = AlignmentLoc;
3818 return Op;
3819 }
3820
3821 static std::unique_ptr<ARMOperand>
3822 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3823 unsigned ShiftImm, SMLoc S, SMLoc E) {
3824 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3825 Op->PostIdxReg.RegNum = RegNum;
3826 Op->PostIdxReg.isAdd = isAdd;
3827 Op->PostIdxReg.ShiftTy = ShiftTy;
3828 Op->PostIdxReg.ShiftImm = ShiftImm;
3829 Op->StartLoc = S;
3830 Op->EndLoc = E;
3831 return Op;
3832 }
3833
3834 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3835 SMLoc S) {
3836 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3837 Op->MBOpt.Val = Opt;
3838 Op->StartLoc = S;
3839 Op->EndLoc = S;
3840 return Op;
3841 }
3842
3843 static std::unique_ptr<ARMOperand>
3844 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3845 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3846 Op->ISBOpt.Val = Opt;
3847 Op->StartLoc = S;
3848 Op->EndLoc = S;
3849 return Op;
3850 }
3851
3852 static std::unique_ptr<ARMOperand>
3853 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3854 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3855 Op->TSBOpt.Val = Opt;
3856 Op->StartLoc = S;
3857 Op->EndLoc = S;
3858 return Op;
3859 }
3860
3861 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3862 SMLoc S) {
3863 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3864 Op->IFlags.Val = IFlags;
3865 Op->StartLoc = S;
3866 Op->EndLoc = S;
3867 return Op;
3868 }
3869
3870 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3871 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3872 Op->MMask.Val = MMask;
3873 Op->StartLoc = S;
3874 Op->EndLoc = S;
3875 return Op;
3876 }
3877
3878 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3879 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3880 Op->BankedReg.Val = Reg;
3881 Op->StartLoc = S;
3882 Op->EndLoc = S;
3883 return Op;
3884 }
3885};
3886
3887} // end anonymous namespace.
3888
3889void ARMOperand::print(raw_ostream &OS) const {
3890 auto RegName = [](unsigned Reg) {
3891 if (Reg)
3892 return ARMInstPrinter::getRegisterName(Reg);
3893 else
3894 return "noreg";
3895 };
3896
3897 switch (Kind) {
3898 case k_CondCode:
3899 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3900 break;
3901 case k_VPTPred:
3902 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3903 break;
3904 case k_CCOut:
3905 OS << "<ccout " << RegName(getReg()) << ">";
3906 break;
3907 case k_ITCondMask: {
3908 static const char *const MaskStr[] = {
3909 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3910 "(tt)", "(ttet)", "(tte)", "(ttee)",
3911 "(t)", "(tett)", "(tet)", "(tete)",
3912 "(te)", "(teet)", "(tee)", "(teee)",
3913 };
3914 assert((ITMask.Mask & 0xf) == ITMask.Mask)(static_cast<void> (0));
3915 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3916 break;
3917 }
3918 case k_CoprocNum:
3919 OS << "<coprocessor number: " << getCoproc() << ">";
3920 break;
3921 case k_CoprocReg:
3922 OS << "<coprocessor register: " << getCoproc() << ">";
3923 break;
3924 case k_CoprocOption:
3925 OS << "<coprocessor option: " << CoprocOption.Val << ">";
3926 break;
3927 case k_MSRMask:
3928 OS << "<mask: " << getMSRMask() << ">";
3929 break;
3930 case k_BankedReg:
3931 OS << "<banked reg: " << getBankedReg() << ">";
3932 break;
3933 case k_Immediate:
3934 OS << *getImm();
3935 break;
3936 case k_MemBarrierOpt:
3937 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3938 break;
3939 case k_InstSyncBarrierOpt:
3940 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3941 break;
3942 case k_TraceSyncBarrierOpt:
3943 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3944 break;
3945 case k_Memory:
3946 OS << "<memory";
3947 if (Memory.BaseRegNum)
3948 OS << " base:" << RegName(Memory.BaseRegNum);
3949 if (Memory.OffsetImm)
3950 OS << " offset-imm:" << *Memory.OffsetImm;
3951 if (Memory.OffsetRegNum)
3952 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3953 << RegName(Memory.OffsetRegNum);
3954 if (Memory.ShiftType != ARM_AM::no_shift) {
3955 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3956 OS << " shift-imm:" << Memory.ShiftImm;
3957 }
3958 if (Memory.Alignment)
3959 OS << " alignment:" << Memory.Alignment;
3960 OS << ">";
3961 break;
3962 case k_PostIndexRegister:
3963 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3964 << RegName(PostIdxReg.RegNum);
3965 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3966 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3967 << PostIdxReg.ShiftImm;
3968 OS << ">";
3969 break;
3970 case k_ProcIFlags: {
3971 OS << "<ARM_PROC::";
3972 unsigned IFlags = getProcIFlags();
3973 for (int i=2; i >= 0; --i)
3974 if (IFlags & (1 << i))
3975 OS << ARM_PROC::IFlagsToString(1 << i);
3976 OS << ">";
3977 break;
3978 }
3979 case k_Register:
3980 OS << "<register " << RegName(getReg()) << ">";
3981 break;
3982 case k_ShifterImmediate:
3983 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3984 << " #" << ShifterImm.Imm << ">";
3985 break;
3986 case k_ShiftedRegister:
3987 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3988 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3989 << RegName(RegShiftedReg.ShiftReg) << ">";
3990 break;
3991 case k_ShiftedImmediate:
3992 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3993 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3994 << RegShiftedImm.ShiftImm << ">";
3995 break;
3996 case k_RotateImmediate:
3997 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3998 break;
3999 case k_ModifiedImmediate:
4000 OS << "<mod_imm #" << ModImm.Bits << ", #"
4001 << ModImm.Rot << ")>";
4002 break;
4003 case k_ConstantPoolImmediate:
4004 OS << "<constant_pool_imm #" << *getConstantPoolImm();
4005 break;
4006 case k_BitfieldDescriptor:
4007 OS << "<bitfield " << "lsb: " << Bitfield.LSB
4008 << ", width: " << Bitfield.Width << ">";
4009 break;
4010 case k_RegisterList:
4011 case k_RegisterListWithAPSR:
4012 case k_DPRRegisterList:
4013 case k_SPRRegisterList:
4014 case k_FPSRegisterListWithVPR:
4015 case k_FPDRegisterListWithVPR: {
4016 OS << "<register_list ";
4017
4018 const SmallVectorImpl<unsigned> &RegList = getRegList();
4019 for (SmallVectorImpl<unsigned>::const_iterator
4020 I = RegList.begin(), E = RegList.end(); I != E; ) {
4021 OS << RegName(*I);
4022 if (++I < E) OS << ", ";
4023 }
4024
4025 OS << ">";
4026 break;
4027 }
4028 case k_VectorList:
4029 OS << "<vector_list " << VectorList.Count << " * "
4030 << RegName(VectorList.RegNum) << ">";
4031 break;
4032 case k_VectorListAllLanes:
4033 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4034 << RegName(VectorList.RegNum) << ">";
4035 break;
4036 case k_VectorListIndexed:
4037 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4038 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4039 break;
4040 case k_Token:
4041 OS << "'" << getToken() << "'";
4042 break;
4043 case k_VectorIndex:
4044 OS << "<vectorindex " << getVectorIndex() << ">";
4045 break;
4046 }
4047}
4048
4049/// @name Auto-generated Match Functions
4050/// {
4051
4052static unsigned MatchRegisterName(StringRef Name);
4053
4054/// }
4055
4056bool ARMAsmParser::ParseRegister(unsigned &RegNo,
4057 SMLoc &StartLoc, SMLoc &EndLoc) {
4058 const AsmToken &Tok = getParser().getTok();
4059 StartLoc = Tok.getLoc();
4060 EndLoc = Tok.getEndLoc();
4061 RegNo = tryParseRegister();
4062
4063 return (RegNo == (unsigned)-1);
4064}
4065
4066OperandMatchResultTy ARMAsmParser::tryParseRegister(unsigned &RegNo,
4067 SMLoc &StartLoc,
4068 SMLoc &EndLoc) {
4069 if (ParseRegister(RegNo, StartLoc, EndLoc))
4070 return MatchOperand_NoMatch;
4071 return MatchOperand_Success;
4072}
4073
4074/// Try to parse a register name. The token must be an Identifier when called,
4075/// and if it is a register name the token is eaten and the register number is
4076/// returned. Otherwise return -1.
4077int ARMAsmParser::tryParseRegister() {
4078 MCAsmParser &Parser = getParser();
4079 const AsmToken &Tok = Parser.getTok();
4080 if (Tok.isNot(AsmToken::Identifier)) return -1;
4081
4082 std::string lowerCase = Tok.getString().lower();
4083 unsigned RegNum = MatchRegisterName(lowerCase);
4084 if (!RegNum) {
4085 RegNum = StringSwitch<unsigned>(lowerCase)
4086 .Case("r13", ARM::SP)
4087 .Case("r14", ARM::LR)
4088 .Case("r15", ARM::PC)
4089 .Case("ip", ARM::R12)
4090 // Additional register name aliases for 'gas' compatibility.
4091 .Case("a1", ARM::R0)
4092 .Case("a2", ARM::R1)
4093 .Case("a3", ARM::R2)
4094 .Case("a4", ARM::R3)
4095 .Case("v1", ARM::R4)
4096 .Case("v2", ARM::R5)
4097 .Case("v3", ARM::R6)
4098 .Case("v4", ARM::R7)
4099 .Case("v5", ARM::R8)
4100 .Case("v6", ARM::R9)
4101 .Case("v7", ARM::R10)
4102 .Case("v8", ARM::R11)
4103 .Case("sb", ARM::R9)
4104 .Case("sl", ARM::R10)
4105 .Case("fp", ARM::R11)
4106 .Default(0);
4107 }
4108 if (!RegNum) {
4109 // Check for aliases registered via .req. Canonicalize to lower case.
4110 // That's more consistent since register names are case insensitive, and
4111 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4112 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4113 // If no match, return failure.
4114 if (Entry == RegisterReqs.end())
4115 return -1;
4116 Parser.Lex(); // Eat identifier token.
4117 return Entry->getValue();
4118 }
4119
4120 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4121 if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4122 return -1;
4123
4124 Parser.Lex(); // Eat identifier token.
4125
4126 return RegNum;
4127}
4128
4129// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4130// If a recoverable error occurs, return 1. If an irrecoverable error
4131// occurs, return -1. An irrecoverable error is one where tokens have been
4132// consumed in the process of trying to parse the shifter (i.e., when it is
4133// indeed a shifter operand, but malformed).
4134int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4135 MCAsmParser &Parser = getParser();
4136 SMLoc S = Parser.getTok().getLoc();
4137 const AsmToken &Tok = Parser.getTok();
4138 if (Tok.isNot(AsmToken::Identifier))
4139 return -1;
4140
4141 std::string lowerCase = Tok.getString().lower();
4142 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
4143 .Case("asl", ARM_AM::lsl)
4144 .Case("lsl", ARM_AM::lsl)
4145 .Case("lsr", ARM_AM::lsr)
4146 .Case("asr", ARM_AM::asr)
4147 .Case("ror", ARM_AM::ror)
4148 .Case("rrx", ARM_AM::rrx)
4149 .Default(ARM_AM::no_shift);
4150
4151 if (ShiftTy == ARM_AM::no_shift)
4152 return 1;
4153
4154 Parser.Lex(); // Eat the operator.
4155
4156 // The source register for the shift has already been added to the
4157 // operand list, so we need to pop it off and combine it into the shifted
4158 // register operand instead.
4159 std::unique_ptr<ARMOperand> PrevOp(
4160 (ARMOperand *)Operands.pop_back_val().release());
4161 if (!PrevOp->isReg())
4162 return Error(PrevOp->getStartLoc(), "shift must be of a register");
4163 int SrcReg = PrevOp->getReg();
4164
4165 SMLoc EndLoc;
4166 int64_t Imm = 0;
4167 int ShiftReg = 0;
4168 if (ShiftTy == ARM_AM::rrx) {
4169 // RRX Doesn't have an explicit shift amount. The encoder expects
4170 // the shift register to be the same as the source register. Seems odd,
4171 // but OK.
4172 ShiftReg = SrcReg;
4173 } else {
4174 // Figure out if this is shifted by a constant or a register (for non-RRX).
4175 if (Parser.getTok().is(AsmToken::Hash) ||
4176 Parser.getTok().is(AsmToken::Dollar)) {
4177 Parser.Lex(); // Eat hash.
4178 SMLoc ImmLoc = Parser.getTok().getLoc();
4179 const MCExpr *ShiftExpr = nullptr;
4180 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4181 Error(ImmLoc, "invalid immediate shift value");
4182 return -1;
4183 }
4184 // The expression must be evaluatable as an immediate.
4185 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4186 if (!CE) {
4187 Error(ImmLoc, "invalid immediate shift value");
4188 return -1;
4189 }
4190 // Range check the immediate.
4191 // lsl, ror: 0 <= imm <= 31
4192 // lsr, asr: 0 <= imm <= 32
4193 Imm = CE->getValue();
4194 if (Imm < 0 ||
4195 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4196 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4197 Error(ImmLoc, "immediate shift value out of range");
4198 return -1;
4199 }
4200 // shift by zero is a nop. Always send it through as lsl.
4201 // ('as' compatibility)
4202 if (Imm == 0)
4203 ShiftTy = ARM_AM::lsl;
4204 } else if (Parser.getTok().is(AsmToken::Identifier)) {
4205 SMLoc L = Parser.getTok().getLoc();
4206 EndLoc = Parser.getTok().getEndLoc();
4207 ShiftReg = tryParseRegister();
4208 if (ShiftReg == -1) {
4209 Error(L, "expected immediate or register in shift operand");
4210 return -1;
4211 }
4212 } else {
4213 Error(Parser.getTok().getLoc(),
4214 "expected immediate or register in shift operand");
4215 return -1;
4216 }
4217 }
4218
4219 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4220 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4221 ShiftReg, Imm,
4222 S, EndLoc));
4223 else
4224 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4225 S, EndLoc));
4226
4227 return 0;
4228}
4229
4230/// Try to parse a register name. The token must be an Identifier when called.
4231/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4232/// if there is a "writeback". 'true' if it's not a register.
4233///
4234/// TODO this is likely to change to allow different register types and or to
4235/// parse for a specific register type.
4236bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4237 MCAsmParser &Parser = getParser();
4238 SMLoc RegStartLoc = Parser.getTok().getLoc();
4239 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4240 int RegNo = tryParseRegister();
4241 if (RegNo == -1)
4242 return true;
4243
4244 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4245
4246 const AsmToken &ExclaimTok = Parser.getTok();
4247 if (ExclaimTok.is(AsmToken::Exclaim)) {
4248 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4249 ExclaimTok.getLoc()));
4250 Parser.Lex(); // Eat exclaim token
4251 return false;
4252 }
4253
4254 // Also check for an index operand. This is only legal for vector registers,
4255 // but that'll get caught OK in operand matching, so we don't need to
4256 // explicitly filter everything else out here.
4257 if (Parser.getTok().is(AsmToken::LBrac)) {
4258 SMLoc SIdx = Parser.getTok().getLoc();
4259 Parser.Lex(); // Eat left bracket token.
4260
4261 const MCExpr *ImmVal;
4262 if (getParser().parseExpression(ImmVal))
4263 return true;
4264 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4265 if (!MCE)
4266 return TokError("immediate value expected for vector index");
4267
4268 if (Parser.getTok().isNot(AsmToken::RBrac))
4269 return Error(Parser.getTok().getLoc(), "']' expected");
4270
4271 SMLoc E = Parser.getTok().getEndLoc();
4272 Parser.Lex(); // Eat right bracket token.
4273
4274 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4275 SIdx, E,
4276 getContext()));
4277 }
4278
4279 return false;
4280}
4281
4282/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4283/// instruction with a symbolic operand name.
4284/// We accept "crN" syntax for GAS compatibility.
4285/// <operand-name> ::= <prefix><number>
4286/// If CoprocOp is 'c', then:
4287/// <prefix> ::= c | cr
4288/// If CoprocOp is 'p', then :
4289/// <prefix> ::= p
4290/// <number> ::= integer in range [0, 15]
4291static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4292 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4293 // but efficient.
4294 if (Name.size() < 2 || Name[0] != CoprocOp)
4295 return -1;
4296 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4297
4298 switch (Name.size()) {
4299 default: return -1;
4300 case 1:
4301 switch (Name[0]) {
4302 default: return -1;
4303 case '0': return 0;
4304 case '1': return 1;
4305 case '2': return 2;
4306 case '3': return 3;
4307 case '4': return 4;
4308 case '5': return 5;
4309 case '6': return 6;
4310 case '7': return 7;
4311 case '8': return 8;
4312 case '9': return 9;
4313 }
4314 case 2:
4315 if (Name[0] != '1')
4316 return -1;
4317 switch (Name[1]) {
4318 default: return -1;
4319 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4320 // However, old cores (v5/v6) did use them in that way.
4321 case '0': return 10;
4322 case '1': return 11;
4323 case '2': return 12;
4324 case '3': return 13;
4325 case '4': return 14;
4326 case '5': return 15;
4327 }
4328 }
4329}
4330
4331/// parseITCondCode - Try to parse a condition code for an IT instruction.
4332OperandMatchResultTy
4333ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4334 MCAsmParser &Parser = getParser();
4335 SMLoc S = Parser.getTok().getLoc();
4336 const AsmToken &Tok = Parser.getTok();
4337 if (!Tok.is(AsmToken::Identifier))
4338 return MatchOperand_NoMatch;
4339 unsigned CC = ARMCondCodeFromString(Tok.getString());
4340 if (CC == ~0U)
4341 return MatchOperand_NoMatch;
4342 Parser.Lex(); // Eat the token.
4343
4344 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4345
4346 return MatchOperand_Success;
4347}
4348
4349/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4350/// token must be an Identifier when called, and if it is a coprocessor
4351/// number, the token is eaten and the operand is added to the operand list.
4352OperandMatchResultTy
4353ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4354 MCAsmParser &Parser = getParser();
4355 SMLoc S = Parser.getTok().getLoc();
4356 const AsmToken &Tok = Parser.getTok();
4357 if (Tok.isNot(AsmToken::Identifier))
4358 return MatchOperand_NoMatch;
4359
4360 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4361 if (Num == -1)
4362 return MatchOperand_NoMatch;
4363 if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4364 return MatchOperand_NoMatch;
4365
4366 Parser.Lex(); // Eat identifier token.
4367 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4368 return MatchOperand_Success;
4369}
4370
4371/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4372/// token must be an Identifier when called, and if it is a coprocessor
4373/// number, the token is eaten and the operand is added to the operand list.
4374OperandMatchResultTy
4375ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4376 MCAsmParser &Parser = getParser();
4377 SMLoc S = Parser.getTok().getLoc();
4378 const AsmToken &Tok = Parser.getTok();
4379 if (Tok.isNot(AsmToken::Identifier))
4380 return MatchOperand_NoMatch;
4381
4382 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4383 if (Reg == -1)
4384 return MatchOperand_NoMatch;
4385
4386 Parser.Lex(); // Eat identifier token.
4387 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4388 return MatchOperand_Success;
4389}
4390
4391/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4392/// coproc_option : '{' imm0_255 '}'
4393OperandMatchResultTy
4394ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4395 MCAsmParser &Parser = getParser();
4396 SMLoc S = Parser.getTok().getLoc();
4397
4398 // If this isn't a '{', this isn't a coprocessor immediate operand.
4399 if (Parser.getTok().isNot(AsmToken::LCurly))
4400 return MatchOperand_NoMatch;
4401 Parser.Lex(); // Eat the '{'
4402
4403 const MCExpr *Expr;
4404 SMLoc Loc = Parser.getTok().getLoc();
4405 if (getParser().parseExpression(Expr)) {
4406 Error(Loc, "illegal expression");
4407 return MatchOperand_ParseFail;
4408 }
4409 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4410 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
4411 Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
4412 return MatchOperand_ParseFail;
4413 }
4414 int Val = CE->getValue();
4415
4416 // Check for and consume the closing '}'
4417 if (Parser.getTok().isNot(AsmToken::RCurly))
4418 return MatchOperand_ParseFail;
4419 SMLoc E = Parser.getTok().getEndLoc();
4420 Parser.Lex(); // Eat the '}'
4421
4422 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4423 return MatchOperand_Success;
4424}
4425
4426// For register list parsing, we need to map from raw GPR register numbering
4427// to the enumeration values. The enumeration values aren't sorted by
4428// register number due to our using "sp", "lr" and "pc" as canonical names.
4429static unsigned getNextRegister(unsigned Reg) {
4430 // If this is a GPR, we need to do it manually, otherwise we can rely
4431 // on the sort ordering of the enumeration since the other reg-classes
4432 // are sane.
4433 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4434 return Reg + 1;
4435 switch(Reg) {
4436 default: llvm_unreachable("Invalid GPR number!")__builtin_unreachable();
4437 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4438 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4439 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4440 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4441 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4442 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4443 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4444 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4445 }
4446}
4447
4448// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4449// success, or false, if duplicate encoding found.
4450static bool
4451insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4452 unsigned Enc, unsigned Reg) {
4453 Regs.emplace_back(Enc, Reg);
4454 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4455 if (J->first == Enc) {
4456 Regs.erase(J.base());
4457 return false;
4458 }
4459 if (J->first < Enc)
4460 break;
4461 std::swap(*I, *J);
4462 }
4463 return true;
4464}
4465
4466/// Parse a register list.
4467bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
4468 bool EnforceOrder) {
4469 MCAsmParser &Parser = getParser();
4470 if (Parser.getTok().isNot(AsmToken::LCurly))
4471 return TokError("Token is not a Left Curly Brace");
4472 SMLoc S = Parser.getTok().getLoc();
4473 Parser.Lex(); // Eat '{' token.
4474 SMLoc RegLoc = Parser.getTok().getLoc();
4475
4476 // Check the first register in the list to see what register class
4477 // this is a list of.
4478 int Reg = tryParseRegister();
4479 if (Reg == -1)
4480 return Error(RegLoc, "register expected");
4481
4482 // The reglist instructions have at most 16 registers, so reserve
4483 // space for that many.
4484 int EReg = 0;
4485 SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4486
4487 // Allow Q regs and just interpret them as the two D sub-registers.
4488 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4489 Reg = getDRegFromQReg(Reg);
4490 EReg = MRI->getEncodingValue(Reg);
4491 Registers.emplace_back(EReg, Reg);
4492 ++Reg;
4493 }
4494 const MCRegisterClass *RC;
4495 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4496 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4497 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4498 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4499 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4500 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4501 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4502 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4503 else
4504 return Error(RegLoc, "invalid register in register list");
4505
4506 // Store the register.
4507 EReg = MRI->getEncodingValue(Reg);
4508 Registers.emplace_back(EReg, Reg);
4509
4510 // This starts immediately after the first register token in the list,
4511 // so we can see either a comma or a minus (range separator) as a legal
4512 // next token.
4513 while (Parser.getTok().is(AsmToken::Comma) ||
4514 Parser.getTok().is(AsmToken::Minus)) {
4515 if (Parser.getTok().is(AsmToken::Minus)) {
4516 Parser.Lex(); // Eat the minus.
4517 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4518 int EndReg = tryParseRegister();
4519 if (EndReg == -1)
4520 return Error(AfterMinusLoc, "register expected");
4521 // Allow Q regs and just interpret them as the two D sub-registers.
4522 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4523 EndReg = getDRegFromQReg(EndReg) + 1;
4524 // If the register is the same as the start reg, there's nothing
4525 // more to do.
4526 if (Reg == EndReg)
4527 continue;
4528 // The register must be in the same register class as the first.
4529 if (!RC->contains(EndReg))
4530 return Error(AfterMinusLoc, "invalid register in register list");
4531 // Ranges must go from low to high.
4532 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4533 return Error(AfterMinusLoc, "bad range in register list");
4534
4535 // Add all the registers in the range to the register list.
4536 while (Reg != EndReg) {
4537 Reg = getNextRegister(Reg);
4538 EReg = MRI->getEncodingValue(Reg);
4539 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4540 Warning(AfterMinusLoc, StringRef("duplicated register (") +
4541 ARMInstPrinter::getRegisterName(Reg) +
4542 ") in register list");
4543 }
4544 }
4545 continue;
4546 }
4547 Parser.Lex(); // Eat the comma.
4548 RegLoc = Parser.getTok().getLoc();
4549 int OldReg = Reg;
4550 const AsmToken RegTok = Parser.getTok();
4551 Reg = tryParseRegister();
4552 if (Reg == -1)
4553 return Error(RegLoc, "register expected");
4554 // Allow Q regs and just interpret them as the two D sub-registers.
4555 bool isQReg = false;
4556 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4557 Reg = getDRegFromQReg(Reg);
4558 isQReg = true;
4559 }
4560 if (!RC->contains(Reg) &&
4561 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4562 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4563 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4564 // subset of GPRRegClassId except it contains APSR as well.
4565 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4566 }
4567 if (Reg == ARM::VPR &&
4568 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4569 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4570 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4571 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4572 EReg = MRI->getEncodingValue(Reg);
4573 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4574 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4575 ") in register list");
4576 }
4577 continue;
4578 }
4579 // The register must be in the same register class as the first.
4580 if (!RC->contains(Reg))
4581 return Error(RegLoc, "invalid register in register list");
4582 // In most cases, the list must be monotonically increasing. An
4583 // exception is CLRM, which is order-independent anyway, so
4584 // there's no potential for confusion if you write clrm {r2,r1}
4585 // instead of clrm {r1,r2}.
4586 if (EnforceOrder &&
4587 MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4588 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4589 Warning(RegLoc, "register list not in ascending order");
4590 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4591 return Error(RegLoc, "register list not in ascending order");
4592 }
4593 // VFP register lists must also be contiguous.
4594 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4595 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4596 Reg != OldReg + 1)
4597 return Error(RegLoc, "non-contiguous register range");
4598 EReg = MRI->getEncodingValue(Reg);
4599 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4600 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4601 ") in register list");
4602 }
4603 if (isQReg) {
4604 EReg = MRI->getEncodingValue(++Reg);
4605 Registers.emplace_back(EReg, Reg);
4606 }
4607 }
4608
4609 if (Parser.getTok().isNot(AsmToken::RCurly))
4610 return Error(Parser.getTok().getLoc(), "'}' expected");
4611 SMLoc E = Parser.getTok().getEndLoc();
4612 Parser.Lex(); // Eat '}' token.
4613
4614 // Push the register list operand.
4615 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4616
4617 // The ARM system instruction variants for LDM/STM have a '^' token here.
4618 if (Parser.getTok().is(AsmToken::Caret)) {
4619 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4620 Parser.Lex(); // Eat '^' token.
4621 }
4622
4623 return false;
4624}
4625
4626// Helper function to parse the lane index for vector lists.
4627OperandMatchResultTy ARMAsmParser::
4628parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4629 MCAsmParser &Parser = getParser();
4630 Index = 0; // Always return a defined index value.
4631 if (Parser.getTok().is(AsmToken::LBrac)) {
4632 Parser.Lex(); // Eat the '['.
4633 if (Parser.getTok().is(AsmToken::RBrac)) {
4634 // "Dn[]" is the 'all lanes' syntax.
4635 LaneKind = AllLanes;
4636 EndLoc = Parser.getTok().getEndLoc();
4637 Parser.Lex(); // Eat the ']'.
4638 return MatchOperand_Success;
4639 }
4640
4641 // There's an optional '#' token here. Normally there wouldn't be, but
4642 // inline assemble puts one in, and it's friendly to accept that.
4643 if (Parser.getTok().is(AsmToken::Hash))
4644 Parser.Lex(); // Eat '#' or '$'.
4645
4646 const MCExpr *LaneIndex;
4647 SMLoc Loc = Parser.getTok().getLoc();
4648 if (getParser().parseExpression(LaneIndex)) {
4649 Error(Loc, "illegal expression");
4650 return MatchOperand_ParseFail;
4651 }
4652 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4653 if (!CE) {
4654 Error(Loc, "lane index must be empty or an integer");
4655 return MatchOperand_ParseFail;
4656 }
4657 if (Parser.getTok().isNot(AsmToken::RBrac)) {
4658 Error(Parser.getTok().getLoc(), "']' expected");
4659 return MatchOperand_ParseFail;
4660 }
4661 EndLoc = Parser.getTok().getEndLoc();
4662 Parser.Lex(); // Eat the ']'.
4663 int64_t Val = CE->getValue();
4664
4665 // FIXME: Make this range check context sensitive for .8, .16, .32.
4666 if (Val < 0 || Val > 7) {
4667 Error(Parser.getTok().getLoc(), "lane index out of range");
4668 return MatchOperand_ParseFail;
4669 }
4670 Index = Val;
4671 LaneKind = IndexedLane;
4672 return MatchOperand_Success;
4673 }
4674 LaneKind = NoLanes;
4675 return MatchOperand_Success;
4676}
4677
4678// parse a vector register list
4679OperandMatchResultTy
4680ARMAsmParser::parseVectorList(OperandVector &Operands) {
4681 MCAsmParser &Parser = getParser();
4682 VectorLaneTy LaneKind;
4683 unsigned LaneIndex;
4684 SMLoc S = Parser.getTok().getLoc();
4685 // As an extension (to match gas), support a plain D register or Q register
4686 // (without encosing curly braces) as a single or double entry list,
4687 // respectively.
4688 if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4689 SMLoc E = Parser.getTok().getEndLoc();
4690 int Reg = tryParseRegister();
4691 if (Reg == -1)
4692 return MatchOperand_NoMatch;
4693 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4694 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4695 if (Res != MatchOperand_Success)
4696 return Res;
4697 switch (LaneKind) {
4698 case NoLanes:
4699 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4700 break;
4701 case AllLanes:
4702 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4703 S, E));
4704 break;
4705 case IndexedLane:
4706 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4707 LaneIndex,
4708 false, S, E));
4709 break;
4710 }
4711 return MatchOperand_Success;
4712 }
4713 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4714 Reg = getDRegFromQReg(Reg);
4715 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4716 if (Res != MatchOperand_Success)
4717 return Res;
4718 switch (LaneKind) {
4719 case NoLanes:
4720 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4721 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4722 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4723 break;
4724 case AllLanes:
4725 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4726 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4727 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4728 S, E));
4729 break;
4730 case IndexedLane:
4731 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4732 LaneIndex,
4733 false, S, E));
4734 break;
4735 }
4736 return MatchOperand_Success;
4737 }
4738 Error(S, "vector register expected");
4739 return MatchOperand_ParseFail;
4740 }
4741
4742 if (Parser.getTok().isNot(AsmToken::LCurly))
4743 return MatchOperand_NoMatch;
4744
4745 Parser.Lex(); // Eat '{' token.
4746 SMLoc RegLoc = Parser.getTok().getLoc();
4747
4748 int Reg = tryParseRegister();
4749 if (Reg == -1) {
4750 Error(RegLoc, "register expected");
4751 return MatchOperand_ParseFail;
4752 }
4753 unsigned Count = 1;
4754 int Spacing = 0;
4755 unsigned FirstReg = Reg;
4756
4757 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4758 Error(Parser.getTok().getLoc(), "vector register in range Q0-Q7 expected");
4759 return MatchOperand_ParseFail;
4760 }
4761 // The list is of D registers, but we also allow Q regs and just interpret
4762 // them as the two D sub-registers.
4763 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4764 FirstReg = Reg = getDRegFromQReg(Reg);
4765 Spacing = 1; // double-spacing requires explicit D registers, otherwise
4766 // it's ambiguous with four-register single spaced.
4767 ++Reg;
4768 ++Count;
4769 }
4770
4771 SMLoc E;
4772 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4773 return MatchOperand_ParseFail;
4774
4775 while (Parser.getTok().is(AsmToken::Comma) ||
4776 Parser.getTok().is(AsmToken::Minus)) {
4777 if (Parser.getTok().is(AsmToken::Minus)) {
4778 if (!Spacing)
4779 Spacing = 1; // Register range implies a single spaced list.
4780 else if (Spacing == 2) {
4781 Error(Parser.getTok().getLoc(),
4782 "sequential registers in double spaced list");
4783 return MatchOperand_ParseFail;
4784 }
4785 Parser.Lex(); // Eat the minus.
4786 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4787 int EndReg = tryParseRegister();
4788 if (EndReg == -1) {
4789 Error(AfterMinusLoc, "register expected");
4790 return MatchOperand_ParseFail;
4791 }
4792 // Allow Q regs and just interpret them as the two D sub-registers.
4793 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4794 EndReg = getDRegFromQReg(EndReg) + 1;
4795 // If the register is the same as the start reg, there's nothing
4796 // more to do.
4797 if (Reg == EndReg)
4798 continue;
4799 // The register must be in the same register class as the first.
4800 if ((hasMVE() &&
4801 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4802 (!hasMVE() &&
4803 !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg))) {
4804 Error(AfterMinusLoc, "invalid register in register list");
4805 return MatchOperand_ParseFail;
4806 }
4807 // Ranges must go from low to high.
4808 if (Reg > EndReg) {
4809 Error(AfterMinusLoc, "bad range in register list");
4810 return MatchOperand_ParseFail;
4811 }
4812 // Parse the lane specifier if present.
4813 VectorLaneTy NextLaneKind;
4814 unsigned NextLaneIndex;
4815 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4816 MatchOperand_Success)
4817 return MatchOperand_ParseFail;
4818 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4819 Error(AfterMinusLoc, "mismatched lane index in register list");
4820 return MatchOperand_ParseFail;
4821 }
4822
4823 // Add all the registers in the range to the register list.
4824 Count += EndReg - Reg;
4825 Reg = EndReg;
4826 continue;
4827 }
4828 Parser.Lex(); // Eat the comma.
4829 RegLoc = Parser.getTok().getLoc();
4830 int OldReg = Reg;
4831 Reg = tryParseRegister();
4832 if (Reg == -1) {
4833 Error(RegLoc, "register expected");
4834 return MatchOperand_ParseFail;
4835 }
4836
4837 if (hasMVE()) {
4838 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg)) {
4839 Error(RegLoc, "vector register in range Q0-Q7 expected");
4840 return MatchOperand_ParseFail;
4841 }
4842 Spacing = 1;
4843 }
4844 // vector register lists must be contiguous.
4845 // It's OK to use the enumeration values directly here rather, as the
4846 // VFP register classes have the enum sorted properly.
4847 //
4848 // The list is of D registers, but we also allow Q regs and just interpret
4849 // them as the two D sub-registers.
4850 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4851 if (!Spacing)
4852 Spacing = 1; // Register range implies a single spaced list.
4853 else if (Spacing == 2) {
4854 Error(RegLoc,
4855 "invalid register in double-spaced list (must be 'D' register')");
4856 return MatchOperand_ParseFail;
4857 }
4858 Reg = getDRegFromQReg(Reg);
4859 if (Reg != OldReg + 1) {
4860 Error(RegLoc, "non-contiguous register range");
4861 return MatchOperand_ParseFail;
4862 }
4863 ++Reg;
4864 Count += 2;
4865 // Parse the lane specifier if present.
4866 VectorLaneTy NextLaneKind;
4867 unsigned NextLaneIndex;
4868 SMLoc LaneLoc = Parser.getTok().getLoc();
4869 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4870 MatchOperand_Success)
4871 return MatchOperand_ParseFail;
4872 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4873 Error(LaneLoc, "mismatched lane index in register list");
4874 return MatchOperand_ParseFail;
4875 }
4876 continue;
4877 }
4878 // Normal D register.
4879 // Figure out the register spacing (single or double) of the list if
4880 // we don't know it already.
4881 if (!Spacing)
4882 Spacing = 1 + (Reg == OldReg + 2);
4883
4884 // Just check that it's contiguous and keep going.
4885 if (Reg != OldReg + Spacing) {
4886 Error(RegLoc, "non-contiguous register range");
4887 return MatchOperand_ParseFail;
4888 }
4889 ++Count;
4890 // Parse the lane specifier if present.
4891 VectorLaneTy NextLaneKind;
4892 unsigned NextLaneIndex;
4893 SMLoc EndLoc = Parser.getTok().getLoc();
4894 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4895 return MatchOperand_ParseFail;
4896 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4897 Error(EndLoc, "mismatched lane index in register list");
4898 return MatchOperand_ParseFail;
4899 }
4900 }
4901
4902 if (Parser.getTok().isNot(AsmToken::RCurly)) {
4903 Error(Parser.getTok().getLoc(), "'}' expected");
4904 return MatchOperand_ParseFail;
4905 }
4906 E = Parser.getTok().getEndLoc();
4907 Parser.Lex(); // Eat '}' token.
4908
4909 switch (LaneKind) {
4910 case NoLanes:
4911 case AllLanes: {
4912 // Two-register operands have been converted to the
4913 // composite register classes.
4914 if (Count == 2 && !hasMVE()) {
4915 const MCRegisterClass *RC = (Spacing == 1) ?
4916 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4917 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4918 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4919 }
4920 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4921 ARMOperand::CreateVectorListAllLanes);
4922 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4923 break;
4924 }
4925 case IndexedLane:
4926 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4927 LaneIndex,
4928 (Spacing == 2),
4929 S, E));
4930 break;
4931 }
4932 return MatchOperand_Success;
4933}
4934
4935/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4936OperandMatchResultTy
4937ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4938 MCAsmParser &Parser = getParser();
4939 SMLoc S = Parser.getTok().getLoc();
4940 const AsmToken &Tok = Parser.getTok();
4941 unsigned Opt;
4942
4943 if (Tok.is(AsmToken::Identifier)) {
4944 StringRef OptStr = Tok.getString();
4945
4946 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4947 .Case("sy", ARM_MB::SY)
4948 .Case("st", ARM_MB::ST)
4949 .Case("ld", ARM_MB::LD)
4950 .Case("sh", ARM_MB::ISH)
4951 .Case("ish", ARM_MB::ISH)
4952 .Case("shst", ARM_MB::ISHST)
4953 .Case("ishst", ARM_MB::ISHST)
4954 .Case("ishld", ARM_MB::ISHLD)
4955 .Case("nsh", ARM_MB::NSH)
4956 .Case("un", ARM_MB::NSH)
4957 .Case("nshst", ARM_MB::NSHST)
4958 .Case("nshld", ARM_MB::NSHLD)
4959 .Case("unst", ARM_MB::NSHST)
4960 .Case("osh", ARM_MB::OSH)
4961 .Case("oshst", ARM_MB::OSHST)
4962 .Case("oshld", ARM_MB::OSHLD)
4963 .Default(~0U);
4964
4965 // ishld, oshld, nshld and ld are only available from ARMv8.
4966 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4967 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4968 Opt = ~0U;
4969
4970 if (Opt == ~0U)
4971 return MatchOperand_NoMatch;
4972
4973 Parser.Lex(); // Eat identifier token.
4974 } else if (Tok.is(AsmToken::Hash) ||
4975 Tok.is(AsmToken::Dollar) ||
4976 Tok.is(AsmToken::Integer)) {
4977 if (Parser.getTok().isNot(AsmToken::Integer))
4978 Parser.Lex(); // Eat '#' or '$'.
4979 SMLoc Loc = Parser.getTok().getLoc();
4980
4981 const MCExpr *MemBarrierID;
4982 if (getParser().parseExpression(MemBarrierID)) {
4983 Error(Loc, "illegal expression");
4984 return MatchOperand_ParseFail;
4985 }
4986
4987 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4988 if (!CE) {
4989 Error(Loc, "constant expression expected");
4990 return MatchOperand_ParseFail;
4991 }
4992
4993 int Val = CE->getValue();
4994 if (Val & ~0xf) {
4995 Error(Loc, "immediate value out of range");
4996 return MatchOperand_ParseFail;
4997 }
4998
4999 Opt = ARM_MB::RESERVED_0 + Val;
5000 } else
5001 return MatchOperand_ParseFail;
5002
5003 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
5004 return MatchOperand_Success;
5005}
5006
5007OperandMatchResultTy
5008ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5009 MCAsmParser &Parser = getParser();
5010 SMLoc S = Parser.getTok().getLoc();
5011 const AsmToken &Tok = Parser.getTok();
5012
5013 if (Tok.isNot(AsmToken::Identifier))
5014 return MatchOperand_NoMatch;
5015
5016 if (!Tok.getString().equals_insensitive("csync"))
5017 return MatchOperand_NoMatch;
5018
5019 Parser.Lex(); // Eat identifier token.
5020
5021 Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
5022 return MatchOperand_Success;
5023}
5024
5025/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
5026OperandMatchResultTy
5027ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
5028 MCAsmParser &Parser = getParser();
5029 SMLoc S = Parser.getTok().getLoc();
5030 const AsmToken &Tok = Parser.getTok();
5031 unsigned Opt;
5032
5033 if (Tok.is(AsmToken::Identifier)) {
5034 StringRef OptStr = Tok.getString();
5035
5036 if (OptStr.equals_insensitive("sy"))
5037 Opt = ARM_ISB::SY;
5038 else
5039 return MatchOperand_NoMatch;
5040
5041 Parser.Lex(); // Eat identifier token.
5042 } else if (Tok.is(AsmToken::Hash) ||
5043 Tok.is(AsmToken::Dollar) ||
5044 Tok.is(AsmToken::Integer)) {
5045 if (Parser.getTok().isNot(AsmToken::Integer))
5046 Parser.Lex(); // Eat '#' or '$'.
5047 SMLoc Loc = Parser.getTok().getLoc();
5048
5049 const MCExpr *ISBarrierID;
5050 if (getParser().parseExpression(ISBarrierID)) {
5051 Error(Loc, "illegal expression");
5052 return MatchOperand_ParseFail;
5053 }
5054
5055 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5056 if (!CE) {
5057 Error(Loc, "constant expression expected");
5058 return MatchOperand_ParseFail;
5059 }
5060
5061 int Val = CE->getValue();
5062 if (Val & ~0xf) {
5063 Error(Loc, "immediate value out of range");
5064 return MatchOperand_ParseFail;
5065 }
5066
5067 Opt = ARM_ISB::RESERVED_0 + Val;
5068 } else
5069 return MatchOperand_ParseFail;
5070
5071 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5072 (ARM_ISB::InstSyncBOpt)Opt, S));
5073 return MatchOperand_Success;
5074}
5075
5076
5077/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5078OperandMatchResultTy
5079ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5080 MCAsmParser &Parser = getParser();
5081 SMLoc S = Parser.getTok().getLoc();
5082 const AsmToken &Tok = Parser.getTok();
5083 if (!Tok.is(AsmToken::Identifier))
5084 return MatchOperand_NoMatch;
5085 StringRef IFlagsStr = Tok.getString();
5086
5087 // An iflags string of "none" is interpreted to mean that none of the AIF
5088 // bits are set. Not a terribly useful instruction, but a valid encoding.
5089 unsigned IFlags = 0;
5090 if (IFlagsStr != "none") {
5091 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5092 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5093 .Case("a", ARM_PROC::A)
5094 .Case("i", ARM_PROC::I)
5095 .Case("f", ARM_PROC::F)
5096 .Default(~0U);
5097
5098 // If some specific iflag is already set, it means that some letter is
5099 // present more than once, this is not acceptable.
5100 if (Flag == ~0U || (IFlags & Flag))
5101 return MatchOperand_NoMatch;
5102
5103 IFlags |= Flag;
5104 }
5105 }
5106
5107 Parser.Lex(); // Eat identifier token.
5108 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5109 return MatchOperand_Success;
5110}
5111
5112/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5113OperandMatchResultTy
5114ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5115 MCAsmParser &Parser = getParser();
5116 SMLoc S = Parser.getTok().getLoc();
5117 const AsmToken &Tok = Parser.getTok();
5118
5119 if (Tok.is(AsmToken::Integer)) {
5120 int64_t Val = Tok.getIntVal();
5121 if (Val > 255 || Val < 0) {
5122 return MatchOperand_NoMatch;
5123 }
5124 unsigned SYSmvalue = Val & 0xFF;
5125 Parser.Lex();
5126 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5127 return MatchOperand_Success;
5128 }
5129
5130 if (!Tok.is(AsmToken::Identifier))
5131 return MatchOperand_NoMatch;
5132 StringRef Mask = Tok.getString();
5133
5134 if (isMClass()) {
5135 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5136 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5137 return MatchOperand_NoMatch;
5138
5139 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5140
5141 Parser.Lex(); // Eat identifier token.
5142 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5143 return MatchOperand_Success;
5144 }
5145
5146 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5147 size_t Start = 0, Next = Mask.find('_');
5148 StringRef Flags = "";
5149 std::string SpecReg = Mask.slice(Start, Next).lower();
5150 if (Next != StringRef::npos)
5151 Flags = Mask.slice(Next+1, Mask.size());
5152
5153 // FlagsVal contains the complete mask:
5154 // 3-0: Mask
5155 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5156 unsigned FlagsVal = 0;
5157
5158 if (SpecReg == "apsr") {
5159 FlagsVal = StringSwitch<unsigned>(Flags)
5160 .Case("nzcvq", 0x8) // same as CPSR_f
5161 .Case("g", 0x4) // same as CPSR_s
5162 .Case("nzcvqg", 0xc) // same as CPSR_fs
5163 .Default(~0U);
5164
5165 if (FlagsVal == ~0U) {
5166 if (!Flags.empty())
5167 return MatchOperand_NoMatch;
5168 else
5169 FlagsVal = 8; // No flag
5170 }
5171 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5172 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5173 if (Flags == "all" || Flags == "")
5174 Flags = "fc";
5175 for (int i = 0, e = Flags.size(); i != e; ++i) {
5176 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5177 .Case("c", 1)
5178 .Case("x", 2)
5179 .Case("s", 4)
5180 .Case("f", 8)
5181 .Default(~0U);
5182
5183 // If some specific flag is already set, it means that some letter is
5184 // present more than once, this is not acceptable.
5185 if (Flag == ~0U || (FlagsVal & Flag))
5186 return MatchOperand_NoMatch;
5187 FlagsVal |= Flag;
5188 }
5189 } else // No match for special register.
5190 return MatchOperand_NoMatch;
5191
5192 // Special register without flags is NOT equivalent to "fc" flags.
5193 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
5194 // two lines would enable gas compatibility at the expense of breaking
5195 // round-tripping.
5196 //
5197 // if (!FlagsVal)
5198 // FlagsVal = 0x9;
5199
5200 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5201 if (SpecReg == "spsr")
5202 FlagsVal |= 16;
5203
5204 Parser.Lex(); // Eat identifier token.
5205 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5206 return MatchOperand_Success;
5207}
5208
5209/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5210/// use in the MRS/MSR instructions added to support virtualization.
5211OperandMatchResultTy
5212ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5213 MCAsmParser &Parser = getParser();
5214 SMLoc S = Parser.getTok().getLoc();
5215 const AsmToken &Tok = Parser.getTok();
5216 if (!Tok.is(AsmToken::Identifier))
5217 return MatchOperand_NoMatch;
5218 StringRef RegName = Tok.getString();
5219
5220 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5221 if (!TheReg)
5222 return MatchOperand_NoMatch;
5223 unsigned Encoding = TheReg->Encoding;
5224
5225 Parser.Lex(); // Eat identifier token.
5226 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5227 return MatchOperand_Success;
5228}
5229
5230OperandMatchResultTy
5231ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
5232 int High) {
5233 MCAsmParser &Parser = getParser();
5234 const AsmToken &Tok = Parser.getTok();
5235 if (Tok.isNot(AsmToken::Identifier)) {
5236 Error(Parser.getTok().getLoc(), Op + " operand expected.");
5237 return MatchOperand_ParseFail;
5238 }
5239 StringRef ShiftName = Tok.getString();
5240 std::string LowerOp = Op.lower();
5241 std::string UpperOp = Op.upper();
5242 if (ShiftName != LowerOp && ShiftName != UpperOp) {
5243 Error(Parser.getTok().getLoc(), Op + " operand expected.");
5244 return MatchOperand_ParseFail;
5245 }
5246 Parser.Lex(); // Eat shift type token.
5247
5248 // There must be a '#' and a shift amount.
5249 if (Parser.getTok().isNot(AsmToken::Hash) &&
5250 Parser.getTok().isNot(AsmToken::Dollar)) {
5251 Error(Parser.getTok().getLoc(), "'#' expected");
5252 return MatchOperand_ParseFail;
5253 }
5254 Parser.Lex(); // Eat hash token.
5255
5256 const MCExpr *ShiftAmount;
5257 SMLoc Loc = Parser.getTok().getLoc();
5258 SMLoc EndLoc;
5259 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5260 Error(Loc, "illegal expression");
5261 return MatchOperand_ParseFail;
5262 }
5263 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5264 if (!CE) {
5265 Error(Loc, "constant expression expected");
5266 return MatchOperand_ParseFail;
5267 }
5268 int Val = CE->getValue();
5269 if (Val < Low || Val > High) {
5270 Error(Loc, "immediate value out of range");
5271 return MatchOperand_ParseFail;
5272 }
5273
5274 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5275
5276 return MatchOperand_Success;
5277}
5278
5279OperandMatchResultTy
5280ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5281 MCAsmParser &Parser = getParser();
5282 const AsmToken &Tok = Parser.getTok();
5283 SMLoc S = Tok.getLoc();
5284 if (Tok.isNot(AsmToken::Identifier)) {
5285 Error(S, "'be' or 'le' operand expected");
5286 return MatchOperand_ParseFail;
5287 }
5288 int Val = StringSwitch<int>(Tok.getString().lower())
5289 .Case("be", 1)
5290 .Case("le", 0)
5291 .Default(-1);
5292 Parser.Lex(); // Eat the token.
5293
5294 if (Val == -1) {
5295 Error(S, "'be' or 'le' operand expected");
5296 return MatchOperand_ParseFail;
5297 }
5298 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5299 getContext()),
5300 S, Tok.getEndLoc()));
5301 return MatchOperand_Success;
5302}
5303
5304/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5305/// instructions. Legal values are:
5306/// lsl #n 'n' in [0,31]
5307/// asr #n 'n' in [1,32]
5308/// n == 32 encoded as n == 0.
5309OperandMatchResultTy
5310ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5311 MCAsmParser &Parser = getParser();
5312 const AsmToken &Tok = Parser.getTok();
5313 SMLoc S = Tok.getLoc();
5314 if (Tok.isNot(AsmToken::Identifier)) {
5315 Error(S, "shift operator 'asr' or 'lsl' expected");
5316 return MatchOperand_ParseFail;
5317 }
5318 StringRef ShiftName = Tok.getString();
5319 bool isASR;
5320 if (ShiftName == "lsl" || ShiftName == "LSL")
5321 isASR = false;
5322 else if (ShiftName == "asr" || ShiftName == "ASR")
5323 isASR = true;
5324 else {
5325 Error(S, "shift operator 'asr' or 'lsl' expected");
5326 return MatchOperand_ParseFail;
5327 }
5328 Parser.Lex(); // Eat the operator.
5329
5330 // A '#' and a shift amount.
5331 if (Parser.getTok().isNot(AsmToken::Hash) &&
5332 Parser.getTok().isNot(AsmToken::Dollar)) {
5333 Error(Parser.getTok().getLoc(), "'#' expected");
5334 return MatchOperand_ParseFail;
5335 }
5336 Parser.Lex(); // Eat hash token.
5337 SMLoc ExLoc = Parser.getTok().getLoc();
5338
5339 const MCExpr *ShiftAmount;
5340 SMLoc EndLoc;
5341 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5342 Error(ExLoc, "malformed shift expression");
5343 return MatchOperand_ParseFail;
5344 }
5345 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5346 if (!CE) {
5347 Error(ExLoc, "shift amount must be an immediate");
5348 return MatchOperand_ParseFail;
5349 }
5350
5351 int64_t Val = CE->getValue();
5352 if (isASR) {
5353 // Shift amount must be in [1,32]
5354 if (Val < 1 || Val > 32) {
5355 Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5356 return MatchOperand_ParseFail;
5357 }
5358 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5359 if (isThumb() && Val == 32) {
5360 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5361 return MatchOperand_ParseFail;
5362 }
5363 if (Val == 32) Val = 0;
5364 } else {
5365 // Shift amount must be in [1,32]
5366 if (Val < 0 || Val > 31) {
5367 Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5368 return MatchOperand_ParseFail;
5369 }
5370 }
5371
5372 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5373
5374 return MatchOperand_Success;
5375}
5376
5377/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5378/// of instructions. Legal values are:
5379/// ror #n 'n' in {0, 8, 16, 24}
5380OperandMatchResultTy
5381ARMAsmParser::parseRotImm(OperandVector &Operands) {
5382 MCAsmParser &Parser = getParser();
5383 const AsmToken &Tok = Parser.getTok();
5384 SMLoc S = Tok.getLoc();
5385 if (Tok.isNot(AsmToken::Identifier))
5386 return MatchOperand_NoMatch;
5387 StringRef ShiftName = Tok.getString();
5388 if (ShiftName != "ror" && ShiftName != "ROR")
5389 return MatchOperand_NoMatch;
5390 Parser.Lex(); // Eat the operator.
5391
5392 // A '#' and a rotate amount.
5393 if (Parser.getTok().isNot(AsmToken::Hash) &&
5394 Parser.getTok().isNot(AsmToken::Dollar)) {
5395 Error(Parser.getTok().getLoc(), "'#' expected");
5396 return MatchOperand_ParseFail;
5397 }
5398 Parser.Lex(); // Eat hash token.
5399 SMLoc ExLoc = Parser.getTok().getLoc();
5400
5401 const MCExpr *ShiftAmount;
5402 SMLoc EndLoc;
5403 if (getParser().parseExpression(ShiftAmount, EndLoc)) {
5404 Error(ExLoc, "malformed rotate expression");
5405 return MatchOperand_ParseFail;
5406 }
5407 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5408 if (!CE) {
5409 Error(ExLoc, "rotate amount must be an immediate");
5410 return MatchOperand_ParseFail;
5411 }
5412
5413 int64_t Val = CE->getValue();
5414 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5415 // normally, zero is represented in asm by omitting the rotate operand
5416 // entirely.
5417 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
5418 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5419 return MatchOperand_ParseFail;
5420 }
5421
5422 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5423
5424 return MatchOperand_Success;
5425}
5426
5427OperandMatchResultTy
5428ARMAsmParser::parseModImm(OperandVector &Operands) {
5429 MCAsmParser &Parser = getParser();
5430 MCAsmLexer &Lexer = getLexer();
5431 int64_t Imm1, Imm2;
5432
5433 SMLoc S = Parser.getTok().getLoc();
5434
5435 // 1) A mod_imm operand can appear in the place of a register name:
5436 // add r0, #mod_imm
5437 // add r0, r0, #mod_imm
5438 // to correctly handle the latter, we bail out as soon as we see an
5439 // identifier.
5440 //
5441 // 2) Similarly, we do not want to parse into complex operands:
5442 // mov r0, #mod_imm
5443 // mov r0, :lower16:(_foo)
5444 if (Parser.getTok().is(AsmToken::Identifier) ||
5445 Parser.getTok().is(AsmToken::Colon))
5446 return MatchOperand_NoMatch;
5447
5448 // Hash (dollar) is optional as per the ARMARM
5449 if (Parser.getTok().is(AsmToken::Hash) ||
5450 Parser.getTok().is(AsmToken::Dollar)) {
5451 // Avoid parsing into complex operands (#:)
5452 if (Lexer.peekTok().is(AsmToken::Colon))
5453 return MatchOperand_NoMatch;
5454
5455 // Eat the hash (dollar)
5456 Parser.Lex();
5457 }
5458
5459 SMLoc Sx1, Ex1;
5460 Sx1 = Parser.getTok().getLoc();
5461 const MCExpr *Imm1Exp;
5462 if (getParser().parseExpression(Imm1Exp, Ex1)) {
5463 Error(Sx1, "malformed expression");
5464 return MatchOperand_ParseFail;
5465 }
5466
5467 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5468
5469 if (CE) {
5470 // Immediate must fit within 32-bits
5471 Imm1 = CE->getValue();
5472 int Enc = ARM_AM::getSOImmVal(Imm1);
5473 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5474 // We have a match!
5475 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5476 (Enc & 0xF00) >> 7,
5477 Sx1, Ex1));
5478 return MatchOperand_Success;
5479 }
5480
5481 // We have parsed an immediate which is not for us, fallback to a plain
5482 // immediate. This can happen for instruction aliases. For an example,
5483 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5484 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5485 // instruction with a mod_imm operand. The alias is defined such that the
5486 // parser method is shared, that's why we have to do this here.
5487 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5488 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5489 return MatchOperand_Success;
5490 }
5491 } else {
5492 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5493 // MCFixup). Fallback to a plain immediate.
5494 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5495 return MatchOperand_Success;
5496 }
5497
5498 // From this point onward, we expect the input to be a (#bits, #rot) pair
5499 if (Parser.getTok().isNot(AsmToken::Comma)) {
5500 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
5501 return MatchOperand_ParseFail;
5502 }
5503
5504 if (Imm1 & ~0xFF) {
5505 Error(Sx1, "immediate operand must a number in the range [0, 255]");
5506 return MatchOperand_ParseFail;
5507 }
5508
5509 // Eat the comma
5510 Parser.Lex();
5511
5512 // Repeat for #rot
5513 SMLoc Sx2, Ex2;
5514 Sx2 = Parser.getTok().getLoc();
5515
5516 // Eat the optional hash (dollar)
5517 if (Parser.getTok().is(AsmToken::Hash) ||
5518 Parser.getTok().is(AsmToken::Dollar))
5519 Parser.Lex();
5520
5521 const MCExpr *Imm2Exp;
5522 if (getParser().parseExpression(Imm2Exp, Ex2)) {
5523 Error(Sx2, "malformed expression");
5524 return MatchOperand_ParseFail;
5525 }
5526
5527 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5528
5529 if (CE) {
5530 Imm2 = CE->getValue();
5531 if (!(Imm2 & ~0x1E)) {
5532 // We have a match!
5533 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5534 return MatchOperand_Success;
5535 }
5536 Error(Sx2, "immediate operand must an even number in the range [0, 30]");
5537 return MatchOperand_ParseFail;
5538 } else {
5539 Error(Sx2, "constant expression expected");
5540 return MatchOperand_ParseFail;
5541 }
5542}
5543
5544OperandMatchResultTy
5545ARMAsmParser::parseBitfield(OperandVector &Operands) {
5546 MCAsmParser &Parser = getParser();
5547 SMLoc S = Parser.getTok().getLoc();
5548 // The bitfield descriptor is really two operands, the LSB and the width.
5549 if (Parser.getTok().isNot(AsmToken::Hash) &&
5550 Parser.getTok().isNot(AsmToken::Dollar)) {
5551 Error(Parser.getTok().getLoc(), "'#' expected");
5552 return MatchOperand_ParseFail;
5553 }
5554 Parser.Lex(); // Eat hash token.
5555
5556 const MCExpr *LSBExpr;
5557 SMLoc E = Parser.getTok().getLoc();
5558 if (getParser().parseExpression(LSBExpr)) {
5559 Error(E, "malformed immediate expression");
5560 return MatchOperand_ParseFail;
5561 }
5562 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5563 if (!CE) {
5564 Error(E, "'lsb' operand must be an immediate");
5565 return MatchOperand_ParseFail;
5566 }
5567
5568 int64_t LSB = CE->getValue();
5569 // The LSB must be in the range [0,31]
5570 if (LSB < 0 || LSB > 31) {
5571 Error(E, "'lsb' operand must be in the range [0,31]");
5572 return MatchOperand_ParseFail;
5573 }
5574 E = Parser.getTok().getLoc();
5575
5576 // Expect another immediate operand.
5577 if (Parser.getTok().isNot(AsmToken::Comma)) {
5578 Error(Parser.getTok().getLoc(), "too few operands");
5579 return MatchOperand_ParseFail;
5580 }
5581 Parser.Lex(); // Eat hash token.
5582 if (Parser.getTok().isNot(AsmToken::Hash) &&
5583 Parser.getTok().isNot(AsmToken::Dollar)) {
5584 Error(Parser.getTok().getLoc(), "'#' expected");
5585 return MatchOperand_ParseFail;
5586 }
5587 Parser.Lex(); // Eat hash token.
5588
5589 const MCExpr *WidthExpr;
5590 SMLoc EndLoc;
5591 if (getParser().parseExpression(WidthExpr, EndLoc)) {
5592 Error(E, "malformed immediate expression");
5593 return MatchOperand_ParseFail;
5594 }
5595 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5596 if (!CE) {
5597 Error(E, "'width' operand must be an immediate");
5598 return MatchOperand_ParseFail;
5599 }
5600
5601 int64_t Width = CE->getValue();
5602 // The LSB must be in the range [1,32-lsb]
5603 if (Width < 1 || Width > 32 - LSB) {
5604 Error(E, "'width' operand must be in the range [1,32-lsb]");
5605 return MatchOperand_ParseFail;
5606 }
5607
5608 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5609
5610 return MatchOperand_Success;
5611}
5612
5613OperandMatchResultTy
5614ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5615 // Check for a post-index addressing register operand. Specifically:
5616 // postidx_reg := '+' register {, shift}
5617 // | '-' register {, shift}
5618 // | register {, shift}
5619
5620 // This method must return MatchOperand_NoMatch without consuming any tokens
5621 // in the case where there is no match, as other alternatives take other
5622 // parse methods.
5623 MCAsmParser &Parser = getParser();
5624 AsmToken Tok = Parser.getTok();
5625 SMLoc S = Tok.getLoc();
5626 bool haveEaten = false;
5627 bool isAdd = true;
5628 if (Tok.is(AsmToken::Plus)) {
5629 Parser.Lex(); // Eat the '+' token.
5630 haveEaten = true;
5631 } else if (Tok.is(AsmToken::Minus)) {
5632 Parser.Lex(); // Eat the '-' token.
5633 isAdd = false;
5634 haveEaten = true;
5635 }
5636
5637 SMLoc E = Parser.getTok().getEndLoc();
5638 int Reg = tryParseRegister();
5639 if (Reg == -1) {
5640 if (!haveEaten)
5641 return MatchOperand_NoMatch;
5642 Error(Parser.getTok().getLoc(), "register expected");
5643 return MatchOperand_ParseFail;
5644 }
5645
5646 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5647 unsigned ShiftImm = 0;
5648 if (Parser.getTok().is(AsmToken::Comma)) {
5649 Parser.Lex(); // Eat the ','.
5650 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5651 return MatchOperand_ParseFail;
5652
5653 // FIXME: Only approximates end...may include intervening whitespace.
5654 E = Parser.getTok().getLoc();
5655 }
5656
5657 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5658 ShiftImm, S, E));
5659
5660 return MatchOperand_Success;
5661}
5662
5663OperandMatchResultTy
5664ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5665 // Check for a post-index addressing register operand. Specifically:
5666 // am3offset := '+' register
5667 // | '-' register
5668 // | register
5669 // | # imm
5670 // | # + imm
5671 // | # - imm
5672
5673 // This method must return MatchOperand_NoMatch without consuming any tokens
5674 // in the case where there is no match, as other alternatives take other
5675 // parse methods.
5676 MCAsmParser &Parser = getParser();
5677 AsmToken Tok = Parser.getTok();
5678 SMLoc S = Tok.getLoc();
5679
5680 // Do immediates first, as we always parse those if we have a '#'.
5681 if (Parser.getTok().is(AsmToken::Hash) ||
5682 Parser.getTok().is(AsmToken::Dollar)) {
5683 Parser.Lex(); // Eat '#' or '$'.
5684 // Explicitly look for a '-', as we need to encode negative zero
5685 // differently.
5686 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5687 const MCExpr *Offset;
5688 SMLoc E;
5689 if (getParser().parseExpression(Offset, E))
5690 return MatchOperand_ParseFail;
5691 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5692 if (!CE) {
5693 Error(S, "constant expression expected");
5694 return MatchOperand_ParseFail;
5695 }
5696 // Negative zero is encoded as the flag value
5697 // std::numeric_limits<int32_t>::min().
5698 int32_t Val = CE->getValue();
5699 if (isNegative && Val == 0)
5700 Val = std::numeric_limits<int32_t>::min();
5701
5702 Operands.push_back(
5703 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5704
5705 return MatchOperand_Success;
5706 }
5707
5708 bool haveEaten = false;
5709 bool isAdd = true;
5710 if (Tok.is(AsmToken::Plus)) {
5711 Parser.Lex(); // Eat the '+' token.
5712 haveEaten = true;
5713 } else if (Tok.is(AsmToken::Minus)) {
5714 Parser.Lex(); // Eat the '-' token.
5715 isAdd = false;
5716 haveEaten = true;
5717 }
5718
5719 Tok = Parser.getTok();
5720 int Reg = tryParseRegister();
5721 if (Reg == -1) {
5722 if (!haveEaten)
5723 return MatchOperand_NoMatch;
5724 Error(Tok.getLoc(), "register expected");
5725 return MatchOperand_ParseFail;
5726 }
5727
5728 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5729 0, S, Tok.getEndLoc()));
5730
5731 return MatchOperand_Success;
5732}
5733
5734/// Convert parsed operands to MCInst. Needed here because this instruction
5735/// only has two register operands, but multiplication is commutative so
5736/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5737void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5738 const OperandVector &Operands) {
5739 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5740 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5741 // If we have a three-operand form, make sure to set Rn to be the operand
5742 // that isn't the same as Rd.
5743 unsigned RegOp = 4;
5744 if (Operands.size() == 6 &&
5745 ((ARMOperand &)*Operands[4]).getReg() ==
5746 ((ARMOperand &)*Operands[3]).getReg())
5747 RegOp = 5;
5748 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5749 Inst.addOperand(Inst.getOperand(0));
5750 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5751}
5752
5753void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5754 const OperandVector &Operands) {
5755 int CondOp = -1, ImmOp = -1;
5756 switch(Inst.getOpcode()) {
5757 case ARM::tB:
5758 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
5759
5760 case ARM::t2B:
5761 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5762
5763 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches")__builtin_unreachable();
5764 }
5765 // first decide whether or not the branch should be conditional
5766 // by looking at it's location relative to an IT block
5767 if(inITBlock()) {
5768 // inside an IT block we cannot have any conditional branches. any
5769 // such instructions needs to be converted to unconditional form
5770 switch(Inst.getOpcode()) {
5771 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5772 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5773 }
5774 } else {
5775 // outside IT blocks we can only have unconditional branches with AL
5776 // condition code or conditional branches with non-AL condition code
5777 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5778 switch(Inst.getOpcode()) {
5779 case ARM::tB:
5780 case ARM::tBcc:
5781 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5782 break;
5783 case ARM::t2B:
5784 case ARM::t2Bcc:
5785 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5786 break;
5787 }
5788 }
5789
5790 // now decide on encoding size based on branch target range
5791 switch(Inst.getOpcode()) {
5792 // classify tB as either t2B or t1B based on range of immediate operand
5793 case ARM::tB: {
5794 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5795 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5796 Inst.setOpcode(ARM::t2B);
5797 break;
5798 }
5799 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5800 case ARM::tBcc: {
5801 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5802 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5803 Inst.setOpcode(ARM::t2Bcc);
5804 break;
5805 }
5806 }
5807 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5808 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5809}
5810
5811void ARMAsmParser::cvtMVEVMOVQtoDReg(
5812 MCInst &Inst, const OperandVector &Operands) {
5813
5814 // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5815 assert(Operands.size() == 8)(static_cast<void> (0));
5816
5817 ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5818 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5819 ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5820 ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5821 // skip second copy of Qd in Operands[6]
5822 ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5823 ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5824}
5825
5826/// Parse an ARM memory expression, return false if successful else return true
5827/// or an error. The first token must be a '[' when called.
5828bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5829 MCAsmParser &Parser = getParser();
5830 SMLoc S, E;
5831 if (Parser.getTok().isNot(AsmToken::LBrac))
5832 return TokError("Token is not a Left Bracket");
5833 S = Parser.getTok().getLoc();
5834 Parser.Lex(); // Eat left bracket token.
5835
5836 const AsmToken &BaseRegTok = Parser.getTok();
5837 int BaseRegNum = tryParseRegister();
5838 if (BaseRegNum == -1)
5839 return Error(BaseRegTok.getLoc(), "register expected");
5840
5841 // The next token must either be a comma, a colon or a closing bracket.
5842 const AsmToken &Tok = Parser.getTok();
5843 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5844 !Tok.is(AsmToken::RBrac))
5845 return Error(Tok.getLoc(), "malformed memory operand");
5846
5847 if (Tok.is(AsmToken::RBrac)) {
5848 E = Tok.getEndLoc();
5849 Parser.Lex(); // Eat right bracket token.
5850
5851 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5852 ARM_AM::no_shift, 0, 0, false,
5853 S, E));
5854
5855 // If there's a pre-indexing writeback marker, '!', just add it as a token
5856 // operand. It's rather odd, but syntactically valid.
5857 if (Parser.getTok().is(AsmToken::Exclaim)) {
5858 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5859 Parser.Lex(); // Eat the '!'.
5860 }
5861
5862 return false;
5863 }
5864
5865 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&(static_cast<void> (0))
5866 "Lost colon or comma in memory operand?!")(static_cast<void> (0));
5867 if (Tok.is(AsmToken::Comma)) {
5868 Parser.Lex(); // Eat the comma.
5869 }
5870
5871 // If we have a ':', it's an alignment specifier.
5872 if (Parser.getTok().is(AsmToken::Colon)) {
5873 Parser.Lex(); // Eat the ':'.
5874 E = Parser.getTok().getLoc();
5875 SMLoc AlignmentLoc = Tok.getLoc();
5876
5877 const MCExpr *Expr;
5878 if (getParser().parseExpression(Expr))
5879 return true;
5880
5881 // The expression has to be a constant. Memory references with relocations
5882 // don't come through here, as they use the <label> forms of the relevant
5883 // instructions.
5884 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5885 if (!CE)
5886 return Error (E, "constant expression expected");
5887
5888 unsigned Align = 0;
5889 switch (CE->getValue()) {
5890 default:
5891 return Error(E,
5892 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5893 case 16: Align = 2; break;
5894 case 32: Align = 4; break;
5895 case 64: Align = 8; break;
5896 case 128: Align = 16; break;
5897 case 256: Align = 32; break;
5898 }
5899
5900 // Now we should have the closing ']'
5901 if (Parser.getTok().isNot(AsmToken::RBrac))
5902 return Error(Parser.getTok().getLoc(), "']' expected");
5903 E = Parser.getTok().getEndLoc();
5904 Parser.Lex(); // Eat right bracket token.
5905
5906 // Don't worry about range checking the value here. That's handled by
5907 // the is*() predicates.
5908 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5909 ARM_AM::no_shift, 0, Align,
5910 false, S, E, AlignmentLoc));
5911
5912 // If there's a pre-indexing writeback marker, '!', just add it as a token
5913 // operand.
5914 if (Parser.getTok().is(AsmToken::Exclaim)) {
5915 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5916 Parser.Lex(); // Eat the '!'.
5917 }
5918
5919 return false;
5920 }
5921
5922 // If we have a '#' or '$', it's an immediate offset, else assume it's a
5923 // register offset. Be friendly and also accept a plain integer or expression
5924 // (without a leading hash) for gas compatibility.
5925 if (Parser.getTok().is(AsmToken::Hash) ||
5926 Parser.getTok().is(AsmToken::Dollar) ||
5927 Parser.getTok().is(AsmToken::LParen) ||
5928 Parser.getTok().is(AsmToken::Integer)) {
5929 if (Parser.getTok().is(AsmToken::Hash) ||
5930 Parser.getTok().is(AsmToken::Dollar))
5931 Parser.Lex(); // Eat '#' or '$'
5932 E = Parser.getTok().getLoc();
5933
5934 bool isNegative = getParser().getTok().is(AsmToken::Minus);
5935 const MCExpr *Offset, *AdjustedOffset;
5936 if (getParser().parseExpression(Offset))
5937 return true;
5938
5939 if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
5940 // If the constant was #-0, represent it as
5941 // std::numeric_limits<int32_t>::min().
5942 int32_t Val = CE->getValue();
5943 if (isNegative && Val == 0)
5944 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5945 getContext());
5946 // Don't worry about range checking the value here. That's handled by
5947 // the is*() predicates.
5948 AdjustedOffset = CE;
5949 } else